filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_20163
|
import os
import openpype.api
from openpype.hosts.photoshop import api as photoshop
class ExtractImage(openpype.api.Extractor):
"""Produce a flattened image file from instance
This plug-in takes into account only the layers in the group.
"""
label = "Extract Image"
hosts = ["photoshop"]
families = ["image", "background"]
formats = ["png", "jpg"]
def process(self, instance):
staging_dir = self.staging_dir(instance)
self.log.info("Outputting image to {}".format(staging_dir))
# Perform extraction
stub = photoshop.stub()
files = {}
with photoshop.maintained_selection():
self.log.info("Extracting %s" % str(list(instance)))
with photoshop.maintained_visibility():
layer = instance.data.get("layer")
ids = set([layer.id])
add_ids = instance.data.pop("ids", None)
if add_ids:
ids.update(set(add_ids))
extract_ids = set([ll.id for ll in stub.
get_layers_in_layers_ids(ids)])
stub.hide_all_others_layers_ids(extract_ids)
file_basename = os.path.splitext(
stub.get_active_document_name()
)[0]
for extension in self.formats:
_filename = "{}.{}".format(file_basename, extension)
files[extension] = _filename
full_filename = os.path.join(staging_dir, _filename)
stub.saveAs(full_filename, extension, True)
self.log.info(f"Extracted: {extension}")
representations = []
for extension, filename in files.items():
representations.append({
"name": extension,
"ext": extension,
"files": filename,
"stagingDir": staging_dir
})
instance.data["representations"] = representations
instance.data["stagingDir"] = staging_dir
self.log.info(f"Extracted {instance} to {staging_dir}")
|
the-stack_106_20165
|
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import asyncio
from unittest import TestCase, main, mock
from orc8r.protos.common_pb2 import Void
from orc8r.protos.service303_pb2 import ServiceInfo
from orc8r.protos.service303_pb2_grpc import Service303Stub
from orc8r.protos.mconfig import mconfigs_pb2
from magma.common.service import MagmaService
from magma.common.service_registry import ServiceRegistry
class Service303Tests(TestCase):
"""
Tests for the MagmaService and the Service303 interface
"""
@mock.patch('time.time', mock.MagicMock(return_value=12345))
def setUp(self):
ServiceRegistry.add_service('test', '0.0.0.0', 0)
self._stub = None
self._loop = asyncio.new_event_loop()
# Use a new event loop to ensure isolated tests
self._service = MagmaService(
name='test',
empty_mconfig=mconfigs_pb2.MagmaD(),
loop=self._loop,
)
asyncio.set_event_loop(self._service.loop)
@mock.patch(
'magma.common.service_registry.ServiceRegistry.get_proxy_config')
def test_service_run(self, mock_get_proxy_config):
"""
Test if the service starts and stops gracefully.
"""
self.assertEqual(self._service.state, ServiceInfo.STARTING)
mock_get_proxy_config.return_value = {
'cloud_address': '127.0.0.1',
'proxy_cloud_connections': True
}
# Start the service and pause the loop
self._service.loop.stop()
self._service.run()
asyncio.set_event_loop(self._service.loop)
self._service.log_counter._periodic_task.cancel()
self.assertEqual(self._service.state, ServiceInfo.ALIVE)
# Create a rpc stub and query the Service303 interface
ServiceRegistry.add_service('test', '0.0.0.0', self._service.port)
channel = ServiceRegistry.get_rpc_channel('test',
ServiceRegistry.LOCAL)
self._stub = Service303Stub(channel)
info = ServiceInfo(name='test',
version='0.0.0',
state=ServiceInfo.ALIVE,
health=ServiceInfo.APP_HEALTHY,
start_time_secs=12345)
self.assertEqual(self._stub.GetServiceInfo(Void()), info)
# Stop the service
self._stub.StopService(Void())
self._service.loop.run_forever()
self.assertEqual(self._service.state, ServiceInfo.STOPPED)
if __name__ == "__main__":
main()
|
the-stack_106_20166
|
"""
Sample repo pull request events module.
Type examples:
{'CreateEvent', e.g. branch
'IssueCommentEvent',
'IssuesEvent',
'PullRequestEvent',
'PushEvent',
'WatchEvent'}
"""
import pprint
from collections import Counter
from etc import config
from lib.connection import CONN
def main():
for repo_name in config.REPO_PATHS:
repo = CONN.get_repo(repo_name)
print(repo.name)
events_c = Counter()
events = list(repo.get_events())
ev = [x.type for x in events]
events_c.update(ev)
for e in events:
data = dict(
username=e.actor.login, created_at=str(e.created_at.date()), type=e.type
)
print(data)
p = e.payload
payload_data = dict(
action=p.get("action"),
comments=p.get("comment"),
pull_request=p.get("pull_request"),
issue=p.get("issue"),
)
for k, v in payload_data.items():
if v:
if isinstance(v, dict):
new_v = list(v.keys())
payload_data[k] = new_v
pprint.pprint(payload_data)
print()
pprint.pprint(events_c.most_common())
if __name__ == "__main__":
main()
|
the-stack_106_20167
|
# !/usr/local/python/bin/python
# -*- coding: utf-8 -*-
# (C) Wu Dong, 2020
# All rights reserved
# @Author: 'Wu Dong <[email protected]>'
# @Time: '2020-03-19 10:33'
""" 演示 pre-request 框架如何使用Json校验
"""
import json
from flask import Flask
from pre_request import pre, Rule
app = Flask(__name__)
app.config["TESTING"] = True
client = app.test_client()
# json=True,此时框架会自动将params数据进行json解析
json_params = {
"params": Rule(json=True)
}
@app.route("/json", methods=["GET", "POST"])
@pre.catch(json_params)
def example_json_handler(params):
return str(params)
def example_json_filter():
""" 演示邮箱验证
"""
resp = client.post("/json", json={
"params": json.dumps(["hello", "work", "!"])
})
print(resp.data)
if __name__ == "__main__":
example_json_filter()
|
the-stack_106_20169
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend TRANSCENDENCEs received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a transcendenced or transcendence-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the transcendence data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Transcendence/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Transcendence")
return os.path.expanduser("~/.transcendence")
def read_bitcoin_config(dbdir):
"""Read the transcendence.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "transcendence.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a transcendence JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 38843 if testnet else 5520
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the transcendenced we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(transcendenced):
info = transcendenced.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
transcendenced.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = transcendenced.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(transcendenced):
address_summary = dict()
address_to_account = dict()
for info in transcendenced.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = transcendenced.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = transcendenced.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-transcendence-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(transcendenced, fromaddresses, toaddress, amount, fee):
all_coins = list_available(transcendenced)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to transcendenced.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = transcendenced.createrawtransaction(inputs, outputs)
signed_rawtx = transcendenced.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(transcendenced, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = transcendenced.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(transcendenced, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = transcendenced.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(transcendenced, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get TRANSCENDENCEs from")
parser.add_option("--to", dest="to", default=None,
help="address to get send TRANSCENDENCEs to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of transcendence.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
transcendenced = connect_JSON(config)
if options.amount is None:
address_summary = list_available(transcendenced)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(transcendenced) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(transcendenced, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(transcendenced, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = transcendenced.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
the-stack_106_20170
|
import numpy as np
def kernel(M, float_n, data):
mean = np.mean(data, axis=0)
stddev = np.std(data, axis=0)
stddev[stddev <= 0.1] = 1.0
data -= mean
data /= np.sqrt(float_n) * stddev
corr = np.eye(M, dtype=data.dtype)
for i in range(M - 1):
corr[i + 1:M, i] = corr[i, i + 1:M] = data[:, i] @ data[:, i + 1:M]
return corr
|
the-stack_106_20172
|
import logging
from PySide2.QtWidgets import QFrame, QLabel, QVBoxLayout, QHBoxLayout, QScrollArea, QSizePolicy, \
QTableWidget, QTableWidgetItem
from PySide2.QtCore import Qt, QSize
from ...ui.dialogs.new_state import SrcAddrAnnotation
l = logging.getLogger('ui.widgets.qconstraint_viewer')
class QConstraintViewer(QFrame):
COLUMNS = [ "Constraint", "Src Address", "Cardinality", "Depth", "# Variables" ]
def __init__(self, state, parent, workspace):
super(QConstraintViewer, self).__init__(parent)
self._state = state
self.workspace = workspace
self.table = None
self._state.am_subscribe(self._watch_state)
#
# Public methods
#
def reload(self):
self.table.setRowCount(0)
for constraint in self._state.solver.constraints:
count = self.table.rowCount()
self.table.insertRow(count)
self.table.setItem(count, 0, QTableWidgetItem(constraint.shallow_repr()))
src_addr = next(a for a in constraint.annotations if type(a) == SrcAddrAnnotation).addr
self.table.setItem(count, 1, QTableWidgetItem(hex(src_addr)))
self.table.setItem(count, 2, QTableWidgetItem(str(constraint.cardinality)))
self.table.setItem(count, 3, QTableWidgetItem(str(constraint.depth)))
self.table.setItem(count, 4, QTableWidgetItem(str(len(list(constraint.recursive_leaf_asts)))))
#
# Private methods
#
def _init_widgets(self):
if self._state.am_none():
return
layout = QVBoxLayout()
area = QScrollArea()
area.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
area.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)
area.setWidgetResizable(True)
table = QTableWidget(0, 0)
table.setColumnCount(len(self.COLUMNS))
table.setHorizontalHeaderLabels(self.COLUMNS)
self.table = table
layout.addWidget(table)
# common ones
layout.setSpacing(0)
layout.addStretch(0)
layout.setContentsMargins(2, 2, 2, 2)
# the container
container = QFrame()
container.setAutoFillBackground(True)
palette = container.palette()
palette.setColor(container.backgroundRole(), Qt.white)
container.setPalette(palette)
container.setLayout(layout)
area.setWidget(container)
base_layout = QVBoxLayout()
base_layout.addWidget(area)
self.setLayout(base_layout)
def _watch_state(self, **kwargs):
if self.table is None:
self._init_widgets()
self.reload()
|
the-stack_106_20175
|
from .taadd_com import TaaddCom
class TenMangaCom(TaaddCom):
_name_selector = '.read-page a[href*="/book/"]'
_pages_selector = '.sl-page'
_chapters_selector = '.chapter-box .choose-page a:last-child'
img_selector = '.pic_box .manga_pic'
main = TenMangaCom
|
the-stack_106_20176
|
#############################################################################
# Import #
#############################################################################
import os
import random
import PIL.Image as Image
from tqdm import tqdm
import numpy as np
import scipy.io
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
from torch.autograd import Function
import torch.nn.functional as F
class DotDict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
#############################################################################
# Hyperparameters #
#############################################################################
opt = DotDict()
opt.dataset = '3Dchairs' # [ celebA | 102flowers | 3Dchairs ]
opt.dataPath = './data'
# Input space
opt.nc = 3 # number of input channels
opt.sizeX = 64 # size of the image
opt.sizeZ = 128 # size of random noise vectors
# Convolution settings
opt.nf = 64 # base number of filter in G and D
opt.nLayers = 4 # number of conv layers in G and D
# Hardward settings
opt.workers = 4 # workers data for preprocessing
opt.cuda = True # use CUDA
opt.gpu = 0 # GPU id
# Optimisation scheme
opt.batchSize = 128 # minibatch size
opt.nIteration = 75001 # number of training iterations
opt.lrG = 2e-4 # learning rate for G
opt.lrD = 5e-5 # learning rate for D
# Save/Load networks
opt.checkpointDir = '.' # checkpoints directory
opt.load = 0 # if > 0, load given checkpoint
opt.checkpointFreq = 500 # frequency of checkpoints (in number of epochs)
#############################################################################
# Loading Weights #
#############################################################################
opt.netG = ''
opt.netD = ''
if opt.load > 0:
opt.netG = '%s/netG_%d.pth' % (opt.checkpointDir, opt.load)
opt.netD = '%s/netD_%d.pth' % (opt.checkpointDir, opt.load)
#############################################################################
# RandomSeed #
#############################################################################
opt.manualSeed = random.randint(1, 10000) # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
#############################################################################
# CUDA #
#############################################################################
cudnn.benchmark = True
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
if opt.cuda:
torch.cuda.set_device(opt.gpu)
#############################################################################
# Dataloader #
#############################################################################
class PairCelebADataset(torch.utils.data.Dataset):
def __init__(self, dataPath, labelFile, transform=transforms.ToTensor()):
super(PairCelebADataset, self).__init__()
self.dataPath = dataPath
with open(labelFile, 'r') as f:
lines = np.array([p.split() for p in f.readlines()])
self.files = lines[:,0]
self.labels = lines[:,1].astype(int)
self.transform = transform
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
label = self.labels[idx]
file1 = self.files[idx]
file2 = np.random.choice(self.files[self.labels == label])
img1 = self.transform(Image.open(os.path.join(self.dataPath, file1)))
img2 = self.transform(Image.open(os.path.join(self.dataPath, file2)))
return img1, img2, torch.LongTensor(1).fill_(int(label))
class Pair3DchairsDataset(torch.utils.data.Dataset):
def __init__(self, dataPath, transform=transforms.ToTensor()):
super(Pair3DchairsDataset, self).__init__()
self.dataPath = dataPath
self.folders = np.array(os.listdir(dataPath))
self.transform = transform
def __len__(self):
return len(self.folders)
def __getitem__(self, idx):
idA, idB = np.random.choice(os.listdir(os.path.join(self.dataPath, self.folders[idx])),2)
label = idx
imgA = Image.open(os.path.join(self.dataPath, self.folders[idx], idA))
imgB = Image.open(os.path.join(self.dataPath, self.folders[idx], idB))
imgA = self.transform(imgA)
imgB = self.transform(imgB)
return imgA, imgB, torch.LongTensor(1).fill_(int(label))
class Pair102flowersDataset(torch.utils.data.Dataset):
def __init__(self, dataPath, labelFile, nc, transform=transforms.ToTensor()):
super(Pair102flowersDataset, self).__init__()
self.dataPath = dataPath
self.files = np.sort(os.listdir(dataPath))
self.labels = scipy.io.loadmat(labelFile)['labels'][0]
self.transform = transform
self.nc = nc
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
label = self.labels[idx]
fileA = self.files[idx]
fileB = np.random.choice(self.files[self.labels == label])
imgA = Image.open(os.path.join(self.dataPath, fileA))
imgB = Image.open(os.path.join(self.dataPath, fileB))
imgA = self.transform(imgA)
imgB = self.transform(imgB)
if imgA.size(0) == 1:
imgA = imgA.repeat(self.nc,1,1)
if imgB.size(0) == 1:
imgB = imgB.repeat(self.nc,1,1)
return imgA[:self.nc], imgB[:self.nc], torch.LongTensor(1).fill_(int(label))
#############################################################################
# Datasets #
#############################################################################
if opt.dataset == 'celebA':
dataset = PairCelebADataset(os.path.join(opt.dataPath, "celebA/aligned"),
os.path.join(opt.dataPath, "celebA/identity_celebA_train.txt"),
transforms.Compose([transforms.CenterCrop(128),
transforms.Resize(opt.sizeX),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
elif opt.dataset == '3Dchairs':
dataset = Pair3DchairsDataset(os.path.join(opt.dataPath, "rendered_chairs/train"),
transforms.Compose([transforms.CenterCrop(300),
transforms.Resize(opt.sizeX),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
elif opt.dataset == '102flowers':
dataset = Pair102flowersDataset(os.path.join(opt.dataPath, "102flowers/jpg"),
os.path.join(opt.dataPath, "102flowers/imagelabels.mat"),
opt.nc,
transforms.Compose([transforms.Resize(opt.sizeX),
transforms.CenterCrop(opt.sizeX),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers))
#############################################################################
# weights init #
#############################################################################
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
if m.weight:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
#############################################################################
# Modules #
#############################################################################
class _dcEncoder(nn.Module):
def __init__(self, nIn=3, nOut=1024, nf=64, nLayer=4, sizeX=64):
super(_dcEncoder, self).__init__()
self.mods = nn.Sequential()
sizeX = sizeX //2
self.mods.add_module("Conv0_%dx%dx%d" % (nf, sizeX, sizeX), nn.Conv2d(nIn, nf, 4, 2, 1, bias=False))
self.mods.add_module("BN0", nn.BatchNorm2d(nf))
self.mods.add_module("ReLU0", nn.ReLU(True))
for i in range(1,nLayer):
sizeX = sizeX //2
self.mods.add_module("Conv%d_%dx%dx%d" % (i, nf*2, sizeX, sizeX), nn.Conv2d(nf, nf*2, 4, 2, 1, bias=False))
self.mods.add_module("BN%d"% i, nn.BatchNorm2d(nf*2))
self.mods.add_module("ReLU%d" % i, nn.ReLU(True))
nf = nf * 2
self.mods.add_module("FC_%dx1x1" % nOut, nn.Conv2d(nf, nOut, sizeX, bias=False))
weights_init(self.mods)
def forward(self, x):
return self.mods(x)
class _dcDecoder(nn.Module):
def __init__(self, nIn=1024, nOut=3, nf=512, nLayer=4, sizeX=64):
super(_dcDecoder, self).__init__()
sizeX = sizeX // (2**nLayer)
nf = nf * (2 ** (nLayer - 1))
self.mods = nn.Sequential()
self.mods.add_module("FC_%dx%dx%d" % (nf,sizeX,sizeX), nn.ConvTranspose2d(nIn, nf, sizeX, bias=False))
self.mods.add_module("BN0", nn.BatchNorm2d(nf))
self.mods.add_module("ReLU0", nn.ReLU(True))
for i in range(1,nLayer):
sizeX = sizeX * 2
self.mods.add_module("ConvTr%d_%dx%dx%d" % (i, nf//2, sizeX, sizeX), nn.ConvTranspose2d(nf, nf//2, 4, 2, 1, bias=False))
self.mods.add_module("BN%d"% i, nn.BatchNorm2d(nf//2))
self.mods.add_module("ReLU%d" % i, nn.ReLU(True))
nf = nf // 2
self.mods.add_module("ConvTrO_%dx%dx%d" % (nf, sizeX, sizeX), nn.ConvTranspose2d(nf, nOut, 4, 2, 1, bias=False))
weights_init(self.mods)
def forward(self, x):
return self.mods(x)
class _dcDiscriminator(nn.Module):
def __init__(self, nIn=3, nOut=1024, nf=64, nLayer=4, sizeX=64):
super(_dcDiscriminator, self).__init__()
self.mods = nn.Sequential()
sizeX = sizeX //2
self.mods.add_module("Conv0_%dx%dx%d" % (nf, sizeX, sizeX), nn.Conv2d(nIn, nf, 4, 2, 1, bias=False))
self.mods.add_module("LReLU0", nn.LeakyReLU(0.2))
for i in range(1,nLayer):
sizeX = sizeX //2
self.mods.add_module("Conv%d_%dx%dx%d" % (i, nf*2, sizeX, sizeX), nn.Conv2d(nf, nf*2, 4, 2, 1, bias=False))
self.mods.add_module("BN%d"% i, nn.BatchNorm2d(nf*2))
self.mods.add_module("LReLU%d" % i, nn.LeakyReLU(0.2))
nf = nf * 2
self.mods.add_module("FC_%dx1x1" % nOut, nn.Conv2d(nf, nOut, sizeX, bias=False))
weights_init(self.mods)
def forward(self, x):
return self.mods(x)
netG = _dcDecoder(nIn=opt.sizeZ, nOut=opt.nc*2, nf=opt.nf, nLayer=opt.nLayers, sizeX=opt.sizeX)
netD = _dcDiscriminator(nIn=opt.nc*2, nOut=1, nf=opt.nf, nLayer=opt.nLayers, sizeX=opt.sizeX)
if opt.netG != '':
netG.load_state_dict(torch.load(opt.netG))
if opt.netD != '':
netD.load_state_dict(torch.load(opt.netD))
print(netG)
print(netD)
discriminationLoss = nn.BCEWithLogitsLoss()
#############################################################################
# Placeholders #
#############################################################################
x1_real = torch.FloatTensor(opt.batchSize, opt.nc, opt.sizeX, opt.sizeX)
x2_real = torch.FloatTensor(opt.batchSize, opt.nc, opt.sizeX, opt.sizeX)
z = torch.FloatTensor(opt.batchSize, opt.sizeZ, 1, 1).normal_()
labelPos = torch.FloatTensor(opt.batchSize)
labelNeg = torch.FloatTensor(opt.batchSize)
#############################################################################
# Test data #
#############################################################################
z_test = torch.FloatTensor(opt.batchSize, opt.sizeZ, 1, 1).normal_()
#############################################################################
# To Cuda #
#############################################################################
if opt.cuda:
print("Convert to Cuda")
torch.cuda.set_device(opt.gpu)
netG.cuda()
netD.cuda()
discriminationLoss.cuda()
x1_real = x1_real.cuda()
x2_real = x2_real.cuda()
z = z.cuda()
labelPos = labelPos.cuda()
labelNeg = labelNeg.cuda()
z_test = z_test.cuda()
#############################################################################
# Optimizer #
#############################################################################
optimizerG = optim.Adam(netG.parameters(), lr=opt.lrG, betas=(0.5, 0.999))
optimizerD = optim.Adam(netD.parameters(), lr=opt.lrD, betas=(0.5, 0.999))
#############################################################################
# Train #
#############################################################################
print("Start Training")
iteration = opt.load * len(dataloader)
epoch = opt.load
while iteration <= opt.nIteration:
log_dNeg = []
log_dPos = []
for x1_cpu, x2_cpu, _ in tqdm(dataloader):
#######################
# Init iteration #
#######################
netG.train()
netD.train()
x1_real.resize_(x1_cpu.size(0), x1_cpu.size(1), x1_cpu.size(2), x1_cpu.size(3)).copy_(x1_cpu)
x2_real.resize_(x2_cpu.size(0), x2_cpu.size(1), x2_cpu.size(2), x2_cpu.size(3)).copy_(x2_cpu)
z.resize_(x1_cpu.size(0), opt.sizeZ, 1, 1).normal_()
labelPos.resize_(x1_cpu.size(0), 1, 1, 1).fill_(.9)
labelNeg.resize_(x1_cpu.size(0), 1, 1, 1).fill_(.1)
#######################
# Train #
#######################
# Generation Objective
netG.zero_grad()
x_generated = F.tanh(netG(Variable(z)))
generationObjective = discriminationLoss(netD(x_generated), Variable(labelPos))
generationObjective.backward()
# Discrimination gradients
netD.zero_grad()
dPos = netD(Variable(torch.cat((x1_real,x2_real),1)))
dNeg = netD(x_generated.detach())
discriminationObjective = discriminationLoss(dNeg, Variable(labelNeg)) + discriminationLoss(dPos, Variable(labelPos))
discriminationObjective.backward()
# Update weights
optimizerG.step()
optimizerD.step()
# Logs
dPos = dPos.detach()
dNeg = dNeg.detach()
dPos.volatile = True
dNeg.volatile = True
log_dPos.append(F.sigmoid(dPos).data.mean())
log_dNeg.append(F.sigmoid(dNeg).data.mean())
iteration += 1
epoch = epoch+1
print(epoch,
np.array(log_dPos).mean(),
np.array(log_dNeg).mean(),
)
with open('logs.dat', 'ab') as f:
np.savetxt(f, np.vstack((np.array(log_dPos),
np.array(log_dNeg),
)).T)
if epoch % opt.checkpointFreq == 0:
netG.eval()
x_test = F.tanh(netG(Variable(z_test, volatile=True)))
vutils.save_image(x_test.data.view(-1, opt.nc, opt.sizeX, opt.sizeX), "%d.png" % epoch, nrow=8, normalize=True, range=(-1,1))
# torch.save(netG.state_dict(), '%s/netG_%d.pth' % (opt.checkpointDir, epoch))
# torch.save(netD.state_dict(), '%s/netD_%d.pth' % (opt.checkpointDir, epoch))
|
the-stack_106_20179
|
# -*- coding: utf-8 -*-
"""Definition of the QueueOnce task and AlreadyQueued exception."""
from celery import Task, states
from celery.result import EagerResult
from .helpers import queue_once_key, import_backend
class AlreadyQueued(Exception):
def __init__(self, countdown):
self.message = "Expires in {} seconds".format(countdown)
self.countdown = countdown
try:
from inspect import signature
except:
from funcsigs import signature
class QueueOnce(Task):
abstract = True
once = {
'graceful': False,
'unlock_before_run': False
}
"""
'There can be only one'. - Highlander (1986)
An abstract tasks with the ability to detect if it has already been queued.
When running the task (through .delay/.apply_async) it checks if the tasks
is not already queued. By default it will raise an
an AlreadyQueued exception if it is, by you can silence this by including
`once={'graceful': True}` in apply_async or in the task's settings.
Example:
>>> from celery_queue.tasks import QueueOnce
>>> from celery import task
>>> @task(base=QueueOnce, once={'graceful': True})
>>> def example(time):
>>> from time import sleep
>>> sleep(time)
"""
@property
def config(self):
app = self._get_app()
return app.conf
@property
def once_config(self):
return self.config.ONCE
@property
def once_backend(self):
return import_backend(self.once_config)
@property
def default_timeout(self):
return self.once_config['settings'].get('default_timeout', 60 * 60)
def unlock_before_run(self):
return self.once.get('unlock_before_run', False)
def __init__(self, *args, **kwargs):
self._signature = signature(self.run)
return super(QueueOnce, self).__init__(*args, **kwargs)
def __call__(self, *args, **kwargs):
# Only clear the lock before the task's execution if the
# "unlock_before_run" option is True
if self.unlock_before_run():
key = self.get_key(args, kwargs)
self.once_backend.clear_lock(key)
return super(QueueOnce, self).__call__(*args, **kwargs)
def apply_async(self, args=None, kwargs=None, **options):
"""
Attempts to queues a task.
Will raises an AlreadyQueued exception if already queued.
:param \*args: positional arguments passed on to the task.
:param \*\*kwargs: keyword arguments passed on to the task.
:keyword \*\*once: (optional)
:param: graceful: (optional)
If True, wouldn't raise an exception if already queued.
Instead will return none.
:param: timeout: (optional)
An `int' number of seconds after which the lock will expire.
If not set, defaults to 1 hour.
:param: keys: (optional)
"""
once_options = options.get('once', {})
once_graceful = once_options.get(
'graceful', self.once.get('graceful', False))
once_timeout = once_options.get(
'timeout', self.once.get('timeout', self.default_timeout))
if not options.get('retries'):
key = self.get_key(args, kwargs)
try:
self.once_backend.raise_or_lock(key, timeout=once_timeout)
except AlreadyQueued as e:
if once_graceful:
return EagerResult(None, None, states.REJECTED)
raise e
return super(QueueOnce, self).apply_async(args, kwargs, **options)
def _get_call_args(self, args, kwargs):
call_args = self._signature.bind(*args, **kwargs).arguments
# Remove the task instance from the kwargs. This only happens when the
# task has the 'bind' attribute set to True. We remove it, as the task
# has a memory pointer in its repr, that will change between the task
# caller and the celery worker
if isinstance(call_args.get('self'), Task):
del call_args['self']
return call_args
def get_key(self, args=None, kwargs=None):
"""
Generate the key from the name of the task (e.g. 'tasks.example') and
args/kwargs.
"""
restrict_to = self.once.get('keys', None)
args = args or {}
kwargs = kwargs or {}
call_args = self._get_call_args(args, kwargs)
key = queue_once_key(self.name, call_args, restrict_to)
return key
def after_return(self, status, retval, task_id, args, kwargs, einfo):
"""
After a task has run (both successfully or with a failure) clear the
lock if "unlock_before_run" is False.
"""
# Only clear the lock after the task's execution if the
# "unlock_before_run" option is False
if not self.unlock_before_run():
key = self.get_key(args, kwargs)
self.once_backend.clear_lock(key)
|
the-stack_106_20180
|
""" Google Text to Speech
Available Commands:
.tts LanguageCode as reply to a message
.tts LangaugeCode | text to speak"""
import asyncio
import os
import subprocess
from datetime import datetime
from gtts import gTTS
from FIREX.utils import admin_cmd, edit_or_reply, sudo_cmd
from userbot.cmdhelp import CmdHelp
@bot.on(admin_cmd(pattern=r"tts (.*)"))
@bot.on(sudo_cmd(pattern=r"tts (.*)", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
input_str = event.pattern_match.group(1)
start = datetime.now()
if event.reply_to_msg_id:
previous_message = await event.get_reply_message()
text = previous_message.message
lan = input_str
elif "|" in input_str:
lan, text = input_str.split("|")
else:
await edit_or_reply(event, "Invalid Syntax. Module stopping.")
return
text = text.strip()
lan = lan.strip()
if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY):
os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY)
required_file_name = Config.TMP_DOWNLOAD_DIRECTORY + "voice.ogg"
try:
# https://github.com/SpEcHiDe/UniBorg/commit/17f8682d5d2df7f3921f50271b5b6722c80f4106
tts = gTTS(text, lang=lan)
tts.save(required_file_name)
command_to_execute = [
"ffmpeg",
"-i",
required_file_name,
"-map",
"0:a",
"-codec:a",
"libopus",
"-b:a",
"100k",
"-vbr",
"on",
required_file_name + ".opus",
]
try:
t_response = subprocess.check_output(
command_to_execute, stderr=subprocess.STDOUT
)
except (subprocess.CalledProcessError, NameError, FileNotFoundError) as exc:
await edit_or_reply(event, str(exc))
# continue sending required_file_name
else:
os.remove(required_file_name)
required_file_name = required_file_name + ".opus"
end = datetime.now()
ms = (end - start).seconds
await borg.send_file(
event.chat_id,
required_file_name,
# caption="Processed {} ({}) in {} seconds!".format(text[0:97], lan, ms),
reply_to=event.message.reply_to_msg_id,
allow_cache=False,
voice_note=True,
)
os.remove(required_file_name)
await edit_or_reply(
event, "Processed {} ({}) in {} seconds!".format(text[0:97], lan, ms)
)
await asyncio.sleep(5)
await event.delete()
except Exception as e:
await edit_or_reply(event, str(e))
CmdHelp("tts").add_command(
"tts",
"<reply to text>/<text>",
"Google Text To Speech Module. Alternetive for Voice module. Use .voice if this doesn't work",
).add()
|
the-stack_106_20181
|
# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import annotations
from enum import Enum
from typing import (
Dict,
ItemsView,
Iterable,
KeysView,
List,
Mapping,
NamedTuple,
OrderedDict,
ValuesView,
)
from braket.circuits.instruction import Instruction
from braket.circuits.noise import Noise
from braket.circuits.qubit import Qubit
from braket.circuits.qubit_set import QubitSet
class MomentType(str, Enum):
"""
The type of moments.
GATE: a gate
NOISE: a noise channel added directly to the circuit
GATE_NOISE: a gate-based noise channel
INITIALIZATION_NOISE: a initialization noise channel
READOUT_NOISE: a readout noise channel
"""
GATE = "gate"
NOISE = "noise"
GATE_NOISE = "gate_noise"
INITIALIZATION_NOISE = "initialization_noise"
READOUT_NOISE = "readout_noise"
class MomentsKey(NamedTuple):
"""Key of the Moments mapping.
Args:
time: moment
qubits: qubit set
moment_type: can be GATE, NOISE, or GATE_NOISE which is associated with gates;
and READOUT_NOISE or INITIALIZATION_NOISE.
noise_index: the number of noise channels at the same moment. For gates, this is the
number of gate_noise channels associated with that gate. For all other noise
types, noise_index starts from 0; but for gate noise, it starts from 1.
"""
time: int
qubits: QubitSet
moment_type: MomentType
noise_index: int
class Moments(Mapping[MomentsKey, Instruction]):
"""
An ordered mapping of `MomentsKey` or `NoiseMomentsKey` to `Instruction`. The
core data structure that contains instructions, ordering they are inserted in, and
time slices when they occur. `Moments` implements `Mapping` and functions the same as
a read-only dictionary. It is mutable only through the `add()` method.
This data structure is useful to determine a dependency of instructions, such as
printing or optimizing circuit structure, before sending it to a quantum
device. The original insertion order is preserved and can be retrieved via the `values()`
method.
Args:
instructions (Iterable[Instruction], optional): Instructions to initialize self.
Default = [].
Examples:
>>> moments = Moments()
>>> moments.add([Instruction(Gate.H(), 0), Instruction(Gate.CNot(), [0, 1])])
>>> moments.add([Instruction(Gate.H(), 0), Instruction(Gate.H(), 1)])
>>> for i, item in enumerate(moments.items()):
... print(f"Item {i}")
... print(f"\\tKey: {item[0]}")
... print(f"\\tValue: {item[1]}")
...
Item 0
Key: MomentsKey(time=0, qubits=QubitSet([Qubit(0)]))
Value: Instruction('operator': H, 'target': QubitSet([Qubit(0)]))
Item 1
Key: MomentsKey(time=1, qubits=QubitSet([Qubit(0), Qubit(1)]))
Value: Instruction('operator': CNOT, 'target': QubitSet([Qubit(0), Qubit(1)]))
Item 2
Key: MomentsKey(time=2, qubits=QubitSet([Qubit(0)]))
Value: Instruction('operator': H, 'target': QubitSet([Qubit(0)]))
Item 3
Key: MomentsKey(time=2, qubits=QubitSet([Qubit(1)]))
Value: Instruction('operator': H, 'target': QubitSet([Qubit(1)]))
"""
def __init__(self, instructions: Iterable[Instruction] = None):
self._moments: OrderedDict[MomentsKey, Instruction] = OrderedDict()
self._max_times: Dict[Qubit, int] = {}
self._qubits = QubitSet()
self._depth = 0
self.add(instructions or [])
@property
def depth(self) -> int:
"""int: Get the depth (number of slices) of self."""
return self._depth
@property
def qubit_count(self) -> int:
"""int: Get the number of qubits used across all of the instructions."""
return len(self._qubits)
@property
def qubits(self) -> QubitSet:
"""
QubitSet: Get the qubits used across all of the instructions. The order of qubits is based
on the order in which the instructions were added.
Note:
Don't mutate this object, any changes may impact the behavior of this class and / or
consumers. If you need to mutate this, then copy it via `QubitSet(moments.qubits())`.
"""
return self._qubits
def time_slices(self) -> Dict[int, List[Instruction]]:
"""
Get instructions keyed by time.
Returns:
Dict[int, List[Instruction]]: Key is the time and value is a list of instructions that
occur at that moment in time. The order of instructions is in no particular order.
Note:
This is a computed result over self and can be freely mutated. This is re-computed with
every call, with a computational runtime O(N) where N is the number
of instructions in self.
"""
time_slices = {}
self.sort_moments()
for key, instruction in self._moments.items():
instructions = time_slices.get(key.time, [])
instructions.append(instruction)
time_slices[key.time] = instructions
return time_slices
def add(self, instructions: Iterable[Instruction], noise_index: int = 0) -> None:
"""
Add instructions to self.
Args:
instructions (Iterable[Instruction]): Instructions to add to self. The instruction is
added to the max time slice in which the instruction fits.
"""
for instruction in instructions:
self._add(instruction, noise_index)
def _add(self, instruction: Instruction, noise_index: int = 0) -> None:
if isinstance(instruction.operator, Noise):
self.add_noise(instruction)
else:
qubit_range = instruction.target
time = max([self._max_time_for_qubit(qubit) for qubit in qubit_range]) + 1
# Mark all qubits in qubit_range with max_time
for qubit in qubit_range:
self._max_times[qubit] = max(time, self._max_time_for_qubit(qubit))
self._moments[
MomentsKey(time, instruction.target, MomentType.GATE, noise_index)
] = instruction
self._qubits.update(instruction.target)
self._depth = max(self._depth, time + 1)
def add_noise(
self, instruction: Instruction, input_type: str = "noise", noise_index: int = 0
) -> None:
qubit_range = instruction.target
time = max(0, *[self._max_time_for_qubit(qubit) for qubit in qubit_range])
if input_type == MomentType.INITIALIZATION_NOISE:
time = 0
while MomentsKey(time, qubit_range, input_type, noise_index) in self._moments:
noise_index = noise_index + 1
self._moments[MomentsKey(time, qubit_range, input_type, noise_index)] = instruction
self._qubits.update(qubit_range)
def sort_moments(self) -> None:
"""
Make the disordered moments in order.
1. Make the readout noise in the end
2. Make the initialization noise at the beginning
"""
# key for NOISE, GATE and GATE_NOISE
key_noise = []
# key for INITIALIZATION_NOISE
key_initialization_noise = []
# key for READOUT_NOISE
key_readout_noise = []
moment_copy = OrderedDict()
sorted_moment = OrderedDict()
for key, instruction in self._moments.items():
moment_copy[key] = instruction
if key.moment_type == MomentType.READOUT_NOISE:
key_readout_noise.append(key)
elif key.moment_type == MomentType.INITIALIZATION_NOISE:
key_initialization_noise.append(key)
else:
key_noise.append(key)
for key in key_initialization_noise:
sorted_moment[key] = moment_copy[key]
for key in key_noise:
sorted_moment[key] = moment_copy[key]
# find the max time in the circuit and make it the time for readout noise
max_time = max(self._depth - 1, 0)
for key in key_readout_noise:
sorted_moment[
MomentsKey(max_time, key.qubits, MomentType.READOUT_NOISE, key.noise_index)
] = moment_copy[key]
self._moments = sorted_moment
def _max_time_for_qubit(self, qubit: Qubit) -> int:
return self._max_times.get(qubit, -1)
#
# Implement abstract methods, default to calling selfs underlying dictionary
#
def keys(self) -> KeysView[MomentsKey]:
"""Return a view of self's keys."""
return self._moments.keys()
def items(self) -> ItemsView[MomentsKey, Instruction]:
"""Return a view of self's (key, instruction)."""
return self._moments.items()
def values(self) -> ValuesView[Instruction]:
"""Return a view of self's instructions."""
self.sort_moments()
return self._moments.values()
def get(self, key: MomentsKey, default=None) -> Instruction:
"""
Get the instruction in self by key.
Args:
key (MomentsKey): Key of the instruction to fetch.
default (Any, optional): Value to return if `key` is not in `moments`. Default = `None`.
Returns:
Instruction: `moments[key]` if `key` in `moments`, else `default` is returned.
"""
return self._moments.get(key, default)
def __getitem__(self, key):
return self._moments.__getitem__(key)
def __iter__(self):
return self._moments.__iter__()
def __len__(self):
return self._moments.__len__()
def __contains__(self, item):
return self._moments.__contains__(item)
def __eq__(self, other):
if isinstance(other, Moments):
return (self._moments) == (other._moments)
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is not NotImplemented:
return not result
return NotImplemented
def __repr__(self):
return self._moments.__repr__()
def __str__(self):
return self._moments.__str__()
|
the-stack_106_20182
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2018, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import mock
from mock import patch
# External imports
# Bokeh imports
from bokeh.core.validation import check_integrity
from bokeh.plotting import figure
from bokeh.models import GlyphRenderer, Label, Plot, LinearAxis
from bokeh.models.ranges import FactorRange, DataRange1d, Range1d
from bokeh.models.scales import CategoricalScale, LinearScale, LogScale
from bokeh.models.tools import PanTool
# Module under test
import bokeh.models.plots as bmp
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
_LEGEND_EMPTY_WARNING = """
You are attemptings to set `plot.legend.location` on a plot that has zero legends added, this will have no effect.
Before legend properties can be set, you must add a Legend explicitly, or call a glyph method with the 'legend' parameter set.
"""
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class TestPlotLegendProperty(object):
def test_basic(self):
plot = figure(tools='')
x = plot.legend
assert isinstance(x, bmp._list_attr_splat)
assert len(x) == 0
plot.circle([1,2], [3,4], legend="foo")
x = plot.legend
assert isinstance(x, bmp._list_attr_splat)
assert len(x) == 1
def test_warnign(self):
plot = figure(tools='')
with pytest.warns(UserWarning) as warns:
plot.legend.location = "above"
assert len(warns) == 1
assert warns[0].message.args[0] == _LEGEND_EMPTY_WARNING
class TestPlotSelect(object):
def setup_method(self):
self._plot = figure(tools='pan')
self._plot.circle([1,2,3], [3,2,1], name='foo')
@patch('bokeh.models.plots.find')
def test_string_arg(self, mock_find):
self._plot.select('foo')
assert mock_find.called
assert mock_find.call_args[0][1] == dict(name='foo')
@patch('bokeh.models.plots.find')
def test_type_arg(self, mock_find):
self._plot.select(PanTool)
assert mock_find.called
assert mock_find.call_args[0][1] == dict(type=PanTool)
@patch('bokeh.models.plots.find')
def test_kwargs(self, mock_find):
kw = dict(name='foo', type=GlyphRenderer)
self._plot.select(**kw)
assert mock_find.called
assert mock_find.call_args[0][1] == kw
@patch('bokeh.models.plots.find')
def test_single_selector_kwarg(self, mock_find):
kw = dict(name='foo', type=GlyphRenderer)
self._plot.select(selector=kw)
assert mock_find.called
assert mock_find.call_args[0][1] == kw
def test_selector_kwarg_and_extra_kwargs(self):
with pytest.raises(TypeError) as exc:
self._plot.select(selector=dict(foo='foo'), bar='bar')
assert "when passing 'selector' keyword arg, not other keyword args may be present" == str(exc.value)
def test_bad_arg_type(self):
with pytest.raises(TypeError) as exc:
self._plot.select(10)
assert "selector must be a dictionary, string or plot object." == str(exc.value)
def test_too_many_args(self):
with pytest.raises(TypeError) as exc:
self._plot.select('foo', 'bar')
assert 'select accepts at most ONE positional argument.' == str(exc.value)
def test_no_input(self):
with pytest.raises(TypeError) as exc:
self._plot.select()
assert 'select requires EITHER a positional argument, OR keyword arguments.' == str(exc.value)
def test_arg_and_kwarg(self):
with pytest.raises(TypeError) as exc:
self._plot.select('foo', type=PanTool)
assert 'select accepts EITHER a positional argument, OR keyword arguments (not both).' == str(exc.value)
class TestPlotValidation(object):
def test_missing_renderers(self):
p = figure()
p.renderers = []
with mock.patch('bokeh.core.validation.check.log') as mock_logger:
check_integrity([p])
assert mock_logger.warning.call_count == 1
assert mock_logger.warning.call_args[0][0].startswith("W-1000 (MISSING_RENDERERS): Plot has no renderers")
def test_missing_scale(self):
p = figure()
p.x_scale = None
with mock.patch('bokeh.core.validation.check.log') as mock_logger:
check_integrity([p])
assert mock_logger.error.call_count == 1
assert mock_logger.error.call_args[0][0].startswith("E-1008 (REQUIRED_SCALE): A required Scale object is missing: x_scale")
p.y_scale = None
with mock.patch('bokeh.core.validation.check.log') as mock_logger:
check_integrity([p])
assert mock_logger.error.call_count == 1
assert mock_logger.error.call_args[0][0].startswith("E-1008 (REQUIRED_SCALE): A required Scale object is missing: x_scale, y_scale")
def test_missing_range(self):
p = figure()
p.x_range = None
with mock.patch('bokeh.core.validation.check.log') as mock_logger:
check_integrity([p])
assert mock_logger.error.call_count == 1
assert mock_logger.error.call_args[0][0].startswith("E-1004 (REQUIRED_RANGE): A required Range object is missing: x_range")
p.y_range = None
with mock.patch('bokeh.core.validation.check.log') as mock_logger:
check_integrity([p])
assert mock_logger.error.call_count == 1
assert mock_logger.error.call_args[0][0].startswith("E-1004 (REQUIRED_RANGE): A required Range object is missing: x_range, y_range")
def test_bad_extra_range_name(self):
p = figure()
p.xaxis.x_range_name="junk"
with mock.patch('bokeh.core.validation.check.log') as mock_logger:
check_integrity([p])
assert mock_logger.error.call_count == 1
assert mock_logger.error.call_args[0][0].startswith(
"E-1020 (BAD_EXTRA_RANGE_NAME): An extra range name is configued with a name that does not correspond to any range: x_range_name='junk' [LinearAxis"
)
p = figure()
p.extra_x_ranges['foo'] = Range1d()
p.grid.x_range_name="junk"
with mock.patch('bokeh.core.validation.check.log') as mock_logger:
check_integrity([p])
assert mock_logger.error.call_count == 1
assert mock_logger.error.call_args[0][0].startswith(
"E-1020 (BAD_EXTRA_RANGE_NAME): An extra range name is configued with a name that does not correspond to any range: x_range_name='junk' [Grid"
)
assert mock_logger.error.call_args[0][0].count("Grid") == 2
def test_plot_add_layout_raises_error_if_not_render():
plot = figure()
with pytest.raises(ValueError):
plot.add_layout(Range1d())
def test_plot_add_layout_raises_error_if_plot_already_on_annotation():
plot = figure()
with pytest.raises(ValueError):
plot.add_layout(Label(plot=plot))
def test_plot_add_layout_adds_label_to_plot_renderers():
plot = figure()
label = Label()
plot.add_layout(label)
assert label in plot.renderers
def test_plot_add_layout_adds_axis_to_renderers_and_side_renderers():
plot = figure()
axis = LinearAxis()
plot.add_layout(axis, 'left')
assert axis in plot.renderers
assert axis in plot.left
def test_sizing_mode_property_is_fixed_by_default():
plot = figure()
assert plot.sizing_mode == 'fixed'
class BaseTwinAxis(object):
"""Base class for testing extra ranges"""
def verify_axis(self, axis_name):
plot = Plot()
range_obj = getattr(plot, 'extra_{}_ranges'.format(axis_name))
range_obj['foo_range'] = self.get_range_instance()
assert range_obj['foo_range']
def test_x_range(self):
self.verify_axis('x')
def test_y_range(self):
self.verify_axis('y')
@staticmethod
def get_range_instance():
raise NotImplementedError
class TestCategoricalTwinAxis(BaseTwinAxis, object):
"""Test whether extra x and y ranges can be categorical"""
@staticmethod
def get_range_instance():
return FactorRange('foo', 'bar')
class TestLinearTwinAxis(BaseTwinAxis, object):
"""Test whether extra x and y ranges can be Range1d"""
@staticmethod
def get_range_instance():
return Range1d(0, 42)
def test_plot_with_no_title_specified_creates_an_empty_title():
plot = Plot()
assert plot.title.text == ""
def test_plot__scale_classmethod():
assert isinstance(Plot._scale("auto"), LinearScale)
assert isinstance(Plot._scale("linear"), LinearScale)
assert isinstance(Plot._scale("log"), LogScale)
assert isinstance(Plot._scale("categorical"), CategoricalScale)
with pytest.raises(ValueError):
Plot._scale("malformed_type")
def test__check_required_scale_has_scales():
plot = Plot()
check = plot._check_required_scale()
assert check == []
def test__check_required_scale_missing_scales():
plot = Plot(x_scale=None, y_scale=None)
check = plot._check_required_scale()
assert check != []
def test__check_compatible_scale_and_ranges_compat_numeric():
plot = Plot(x_scale=LinearScale(), x_range=Range1d())
check = plot._check_compatible_scale_and_ranges()
assert check == []
plot = Plot(y_scale=LogScale(), y_range=DataRange1d())
check = plot._check_compatible_scale_and_ranges()
assert check == []
def test__check_compatible_scale_and_ranges_compat_factor():
plot = Plot(x_scale=CategoricalScale(), x_range=FactorRange())
check = plot._check_compatible_scale_and_ranges()
assert check == []
def test__check_compatible_scale_and_ranges_incompat_numeric_scale_and_factor_range():
plot = Plot(x_scale=LinearScale(), x_range=FactorRange())
check = plot._check_compatible_scale_and_ranges()
assert check != []
def test__check_compatible_scale_and_ranges_incompat_factor_scale_and_numeric_range():
plot = Plot(x_scale=CategoricalScale(), x_range=DataRange1d())
check = plot._check_compatible_scale_and_ranges()
assert check != []
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
the-stack_106_20184
|
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for apitools.base.protorpclite.messages."""
import pickle
import re
import sys
import types
import unittest
import six
from apitools.base.protorpclite import descriptor
from apitools.base.protorpclite import message_types
from apitools.base.protorpclite import messages
from apitools.base.protorpclite import test_util
# This package plays lots of games with modifying global variables inside
# test cases. Hence:
# pylint:disable=function-redefined
# pylint:disable=global-variable-not-assigned
# pylint:disable=global-variable-undefined
# pylint:disable=redefined-outer-name
# pylint:disable=undefined-variable
# pylint:disable=unused-variable
# pylint:disable=too-many-lines
class ModuleInterfaceTest(test_util.ModuleInterfaceTest,
test_util.TestCase):
MODULE = messages
class ValidationErrorTest(test_util.TestCase):
def testStr_NoFieldName(self):
"""Test string version of ValidationError when no name provided."""
self.assertEquals('Validation error',
str(messages.ValidationError('Validation error')))
def testStr_FieldName(self):
"""Test string version of ValidationError when no name provided."""
validation_error = messages.ValidationError('Validation error')
validation_error.field_name = 'a_field'
self.assertEquals('Validation error', str(validation_error))
class EnumTest(test_util.TestCase):
def setUp(self):
"""Set up tests."""
# Redefine Color class in case so that changes to it (an
# error) in one test does not affect other tests.
global Color # pylint:disable=global-variable-not-assigned
# pylint:disable=unused-variable
class Color(messages.Enum):
RED = 20
ORANGE = 2
YELLOW = 40
GREEN = 4
BLUE = 50
INDIGO = 5
VIOLET = 80
def testNames(self):
"""Test that names iterates over enum names."""
self.assertEquals(
set(['BLUE', 'GREEN', 'INDIGO', 'ORANGE', 'RED',
'VIOLET', 'YELLOW']),
set(Color.names()))
def testNumbers(self):
"""Tests that numbers iterates of enum numbers."""
self.assertEquals(set([2, 4, 5, 20, 40, 50, 80]), set(Color.numbers()))
def testIterate(self):
"""Test that __iter__ iterates over all enum values."""
self.assertEquals(set(Color),
set([Color.RED,
Color.ORANGE,
Color.YELLOW,
Color.GREEN,
Color.BLUE,
Color.INDIGO,
Color.VIOLET]))
def testNaturalOrder(self):
"""Test that natural order enumeration is in numeric order."""
self.assertEquals([Color.ORANGE,
Color.GREEN,
Color.INDIGO,
Color.RED,
Color.YELLOW,
Color.BLUE,
Color.VIOLET],
sorted(Color))
def testByName(self):
"""Test look-up by name."""
self.assertEquals(Color.RED, Color.lookup_by_name('RED'))
self.assertRaises(KeyError, Color.lookup_by_name, 20)
self.assertRaises(KeyError, Color.lookup_by_name, Color.RED)
def testByNumber(self):
"""Test look-up by number."""
self.assertRaises(KeyError, Color.lookup_by_number, 'RED')
self.assertEquals(Color.RED, Color.lookup_by_number(20))
self.assertRaises(KeyError, Color.lookup_by_number, Color.RED)
def testConstructor(self):
"""Test that constructor look-up by name or number."""
self.assertEquals(Color.RED, Color('RED'))
self.assertEquals(Color.RED, Color(u'RED'))
self.assertEquals(Color.RED, Color(20))
if six.PY2:
self.assertEquals(Color.RED, Color(long(20)))
self.assertEquals(Color.RED, Color(Color.RED))
self.assertRaises(TypeError, Color, 'Not exists')
self.assertRaises(TypeError, Color, 'Red')
self.assertRaises(TypeError, Color, 100)
self.assertRaises(TypeError, Color, 10.0)
def testLen(self):
"""Test that len function works to count enums."""
self.assertEquals(7, len(Color))
def testNoSubclasses(self):
"""Test that it is not possible to sub-class enum classes."""
def declare_subclass():
class MoreColor(Color):
pass
self.assertRaises(messages.EnumDefinitionError,
declare_subclass)
def testClassNotMutable(self):
"""Test that enum classes themselves are not mutable."""
self.assertRaises(AttributeError,
setattr,
Color,
'something_new',
10)
def testInstancesMutable(self):
"""Test that enum instances are not mutable."""
self.assertRaises(TypeError,
setattr,
Color.RED,
'something_new',
10)
def testDefEnum(self):
"""Test def_enum works by building enum class from dict."""
WeekDay = messages.Enum.def_enum({'Monday': 1,
'Tuesday': 2,
'Wednesday': 3,
'Thursday': 4,
'Friday': 6,
'Saturday': 7,
'Sunday': 8},
'WeekDay')
self.assertEquals('Wednesday', WeekDay(3).name)
self.assertEquals(6, WeekDay('Friday').number)
self.assertEquals(WeekDay.Sunday, WeekDay('Sunday'))
def testNonInt(self):
"""Test that non-integer values rejection by enum def."""
self.assertRaises(messages.EnumDefinitionError,
messages.Enum.def_enum,
{'Bad': '1'},
'BadEnum')
def testNegativeInt(self):
"""Test that negative numbers rejection by enum def."""
self.assertRaises(messages.EnumDefinitionError,
messages.Enum.def_enum,
{'Bad': -1},
'BadEnum')
def testLowerBound(self):
"""Test that zero is accepted by enum def."""
class NotImportant(messages.Enum):
"""Testing for value zero"""
VALUE = 0
self.assertEquals(0, int(NotImportant.VALUE))
def testTooLargeInt(self):
"""Test that numbers too large are rejected."""
self.assertRaises(messages.EnumDefinitionError,
messages.Enum.def_enum,
{'Bad': (2 ** 29)},
'BadEnum')
def testRepeatedInt(self):
"""Test duplicated numbers are forbidden."""
self.assertRaises(messages.EnumDefinitionError,
messages.Enum.def_enum,
{'Ok': 1, 'Repeated': 1},
'BadEnum')
def testStr(self):
"""Test converting to string."""
self.assertEquals('RED', str(Color.RED))
self.assertEquals('ORANGE', str(Color.ORANGE))
def testInt(self):
"""Test converting to int."""
self.assertEquals(20, int(Color.RED))
self.assertEquals(2, int(Color.ORANGE))
def testRepr(self):
"""Test enum representation."""
self.assertEquals('Color(RED, 20)', repr(Color.RED))
self.assertEquals('Color(YELLOW, 40)', repr(Color.YELLOW))
def testDocstring(self):
"""Test that docstring is supported ok."""
class NotImportant(messages.Enum):
"""I have a docstring."""
VALUE1 = 1
self.assertEquals('I have a docstring.', NotImportant.__doc__)
def testDeleteEnumValue(self):
"""Test that enum values cannot be deleted."""
self.assertRaises(TypeError, delattr, Color, 'RED')
def testEnumName(self):
"""Test enum name."""
module_name = test_util.get_module_name(EnumTest)
self.assertEquals('%s.Color' % module_name, Color.definition_name())
self.assertEquals(module_name, Color.outer_definition_name())
self.assertEquals(module_name, Color.definition_package())
def testDefinitionName_OverrideModule(self):
"""Test enum module is overriden by module package name."""
global package
try:
package = 'my.package'
self.assertEquals('my.package.Color', Color.definition_name())
self.assertEquals('my.package', Color.outer_definition_name())
self.assertEquals('my.package', Color.definition_package())
finally:
del package
def testDefinitionName_NoModule(self):
"""Test what happens when there is no module for enum."""
class Enum1(messages.Enum):
pass
original_modules = sys.modules
sys.modules = dict(sys.modules)
try:
del sys.modules[__name__]
self.assertEquals('Enum1', Enum1.definition_name())
self.assertEquals(None, Enum1.outer_definition_name())
self.assertEquals(None, Enum1.definition_package())
self.assertEquals(six.text_type, type(Enum1.definition_name()))
finally:
sys.modules = original_modules
def testDefinitionName_Nested(self):
"""Test nested Enum names."""
class MyMessage(messages.Message):
class NestedEnum(messages.Enum):
pass
class NestedMessage(messages.Message):
class NestedEnum(messages.Enum):
pass
module_name = test_util.get_module_name(EnumTest)
self.assertEquals('%s.MyMessage.NestedEnum' % module_name,
MyMessage.NestedEnum.definition_name())
self.assertEquals('%s.MyMessage' % module_name,
MyMessage.NestedEnum.outer_definition_name())
self.assertEquals(module_name,
MyMessage.NestedEnum.definition_package())
self.assertEquals(
'%s.MyMessage.NestedMessage.NestedEnum' % module_name,
MyMessage.NestedMessage.NestedEnum.definition_name())
self.assertEquals(
'%s.MyMessage.NestedMessage' % module_name,
MyMessage.NestedMessage.NestedEnum.outer_definition_name())
self.assertEquals(
module_name,
MyMessage.NestedMessage.NestedEnum.definition_package())
def testMessageDefinition(self):
"""Test that enumeration knows its enclosing message definition."""
class OuterEnum(messages.Enum):
pass
self.assertEquals(None, OuterEnum.message_definition())
class OuterMessage(messages.Message):
class InnerEnum(messages.Enum):
pass
self.assertEquals(
OuterMessage, OuterMessage.InnerEnum.message_definition())
def testComparison(self):
"""Test comparing various enums to different types."""
class Enum1(messages.Enum):
VAL1 = 1
VAL2 = 2
class Enum2(messages.Enum):
VAL1 = 1
self.assertEquals(Enum1.VAL1, Enum1.VAL1)
self.assertNotEquals(Enum1.VAL1, Enum1.VAL2)
self.assertNotEquals(Enum1.VAL1, Enum2.VAL1)
self.assertNotEquals(Enum1.VAL1, 'VAL1')
self.assertNotEquals(Enum1.VAL1, 1)
self.assertNotEquals(Enum1.VAL1, 2)
self.assertNotEquals(Enum1.VAL1, None)
self.assertNotEquals(Enum1.VAL1, Enum2.VAL1)
self.assertTrue(Enum1.VAL1 < Enum1.VAL2)
self.assertTrue(Enum1.VAL2 > Enum1.VAL1)
self.assertNotEquals(1, Enum2.VAL1)
def testPickle(self):
"""Testing pickling and unpickling of Enum instances."""
colors = list(Color)
unpickled = pickle.loads(pickle.dumps(colors))
self.assertEquals(colors, unpickled)
# Unpickling shouldn't create new enum instances.
for i, color in enumerate(colors):
self.assertTrue(color is unpickled[i])
class FieldListTest(test_util.TestCase):
def setUp(self):
self.integer_field = messages.IntegerField(1, repeated=True)
def testConstructor(self):
self.assertEquals([1, 2, 3],
messages.FieldList(self.integer_field, [1, 2, 3]))
self.assertEquals([1, 2, 3],
messages.FieldList(self.integer_field, (1, 2, 3)))
self.assertEquals([], messages.FieldList(self.integer_field, []))
def testNone(self):
self.assertRaises(TypeError, messages.FieldList,
self.integer_field, None)
def testDoNotAutoConvertString(self):
string_field = messages.StringField(1, repeated=True)
self.assertRaises(messages.ValidationError,
messages.FieldList, string_field, 'abc')
def testConstructorCopies(self):
a_list = [1, 3, 6]
field_list = messages.FieldList(self.integer_field, a_list)
self.assertFalse(a_list is field_list)
self.assertFalse(field_list is
messages.FieldList(self.integer_field, field_list))
def testNonRepeatedField(self):
self.assertRaisesWithRegexpMatch(
messages.FieldDefinitionError,
'FieldList may only accept repeated fields',
messages.FieldList,
messages.IntegerField(1),
[])
def testConstructor_InvalidValues(self):
self.assertRaisesWithRegexpMatch(
messages.ValidationError,
re.escape("Expected type %r "
"for IntegerField, found 1 (type %r)"
% (six.integer_types, str)),
messages.FieldList, self.integer_field, ["1", "2", "3"])
def testConstructor_Scalars(self):
self.assertRaisesWithRegexpMatch(
messages.ValidationError,
"IntegerField is repeated. Found: 3",
messages.FieldList, self.integer_field, 3)
self.assertRaisesWithRegexpMatch(
messages.ValidationError,
("IntegerField is repeated. Found: "
"<(list[_]?|sequence)iterator object"),
messages.FieldList, self.integer_field, iter([1, 2, 3]))
def testSetSlice(self):
field_list = messages.FieldList(self.integer_field, [1, 2, 3, 4, 5])
field_list[1:3] = [10, 20]
self.assertEquals([1, 10, 20, 4, 5], field_list)
def testSetSlice_InvalidValues(self):
field_list = messages.FieldList(self.integer_field, [1, 2, 3, 4, 5])
def setslice():
field_list[1:3] = ['10', '20']
msg_re = re.escape("Expected type %r "
"for IntegerField, found 10 (type %r)"
% (six.integer_types, str))
self.assertRaisesWithRegexpMatch(
messages.ValidationError,
msg_re,
setslice)
def testSetItem(self):
field_list = messages.FieldList(self.integer_field, [2])
field_list[0] = 10
self.assertEquals([10], field_list)
def testSetItem_InvalidValues(self):
field_list = messages.FieldList(self.integer_field, [2])
def setitem():
field_list[0] = '10'
self.assertRaisesWithRegexpMatch(
messages.ValidationError,
re.escape("Expected type %r "
"for IntegerField, found 10 (type %r)"
% (six.integer_types, str)),
setitem)
def testAppend(self):
field_list = messages.FieldList(self.integer_field, [2])
field_list.append(10)
self.assertEquals([2, 10], field_list)
def testAppend_InvalidValues(self):
field_list = messages.FieldList(self.integer_field, [2])
field_list.name = 'a_field'
def append():
field_list.append('10')
self.assertRaisesWithRegexpMatch(
messages.ValidationError,
re.escape("Expected type %r "
"for IntegerField, found 10 (type %r)"
% (six.integer_types, str)),
append)
def testExtend(self):
field_list = messages.FieldList(self.integer_field, [2])
field_list.extend([10])
self.assertEquals([2, 10], field_list)
def testExtend_InvalidValues(self):
field_list = messages.FieldList(self.integer_field, [2])
def extend():
field_list.extend(['10'])
self.assertRaisesWithRegexpMatch(
messages.ValidationError,
re.escape("Expected type %r "
"for IntegerField, found 10 (type %r)"
% (six.integer_types, str)),
extend)
def testInsert(self):
field_list = messages.FieldList(self.integer_field, [2, 3])
field_list.insert(1, 10)
self.assertEquals([2, 10, 3], field_list)
def testInsert_InvalidValues(self):
field_list = messages.FieldList(self.integer_field, [2, 3])
def insert():
field_list.insert(1, '10')
self.assertRaisesWithRegexpMatch(
messages.ValidationError,
re.escape("Expected type %r "
"for IntegerField, found 10 (type %r)"
% (six.integer_types, str)),
insert)
def testPickle(self):
"""Testing pickling and unpickling of FieldList instances."""
field_list = messages.FieldList(self.integer_field, [1, 2, 3, 4, 5])
unpickled = pickle.loads(pickle.dumps(field_list))
self.assertEquals(field_list, unpickled)
self.assertIsInstance(unpickled.field, messages.IntegerField)
self.assertEquals(1, unpickled.field.number)
self.assertTrue(unpickled.field.repeated)
class FieldTest(test_util.TestCase):
def ActionOnAllFieldClasses(self, action):
"""Test all field classes except Message and Enum.
Message and Enum require separate tests.
Args:
action: Callable that takes the field class as a parameter.
"""
classes = (messages.IntegerField,
messages.FloatField,
messages.BooleanField,
messages.BytesField,
messages.StringField)
for field_class in classes:
action(field_class)
def testNumberAttribute(self):
"""Test setting the number attribute."""
def action(field_class):
# Check range.
self.assertRaises(messages.InvalidNumberError,
field_class,
0)
self.assertRaises(messages.InvalidNumberError,
field_class,
-1)
self.assertRaises(messages.InvalidNumberError,
field_class,
messages.MAX_FIELD_NUMBER + 1)
# Check reserved.
self.assertRaises(messages.InvalidNumberError,
field_class,
messages.FIRST_RESERVED_FIELD_NUMBER)
self.assertRaises(messages.InvalidNumberError,
field_class,
messages.LAST_RESERVED_FIELD_NUMBER)
self.assertRaises(messages.InvalidNumberError,
field_class,
'1')
# This one should work.
field_class(number=1)
self.ActionOnAllFieldClasses(action)
def testRequiredAndRepeated(self):
"""Test setting the required and repeated fields."""
def action(field_class):
field_class(1, required=True)
field_class(1, repeated=True)
self.assertRaises(messages.FieldDefinitionError,
field_class,
1,
required=True,
repeated=True)
self.ActionOnAllFieldClasses(action)
def testInvalidVariant(self):
"""Test field with invalid variants."""
def action(field_class):
if field_class is not message_types.DateTimeField:
self.assertRaises(messages.InvalidVariantError,
field_class,
1,
variant=messages.Variant.ENUM)
self.ActionOnAllFieldClasses(action)
def testDefaultVariant(self):
"""Test that default variant is used when not set."""
def action(field_class):
field = field_class(1)
self.assertEquals(field_class.DEFAULT_VARIANT, field.variant)
self.ActionOnAllFieldClasses(action)
def testAlternateVariant(self):
"""Test that default variant is used when not set."""
field = messages.IntegerField(1, variant=messages.Variant.UINT32)
self.assertEquals(messages.Variant.UINT32, field.variant)
def testDefaultFields_Single(self):
"""Test default field is correct type (single)."""
defaults = {
messages.IntegerField: 10,
messages.FloatField: 1.5,
messages.BooleanField: False,
messages.BytesField: b'abc',
messages.StringField: u'abc',
}
def action(field_class):
field_class(1, default=defaults[field_class])
self.ActionOnAllFieldClasses(action)
# Run defaults test again checking for str/unicode compatiblity.
defaults[messages.StringField] = 'abc'
self.ActionOnAllFieldClasses(action)
def testStringField_BadUnicodeInDefault(self):
"""Test binary values in string field."""
self.assertRaisesWithRegexpMatch(
messages.InvalidDefaultError,
r"Invalid default value for StringField:.*: "
r"Field encountered non-ASCII string .*: "
r"'ascii' codec can't decode byte 0x89 in position 0: "
r"ordinal not in range",
messages.StringField, 1, default=b'\x89')
def testDefaultFields_InvalidSingle(self):
"""Test default field is correct type (invalid single)."""
def action(field_class):
self.assertRaises(messages.InvalidDefaultError,
field_class,
1,
default=object())
self.ActionOnAllFieldClasses(action)
def testDefaultFields_InvalidRepeated(self):
"""Test default field does not accept defaults."""
self.assertRaisesWithRegexpMatch(
messages.FieldDefinitionError,
'Repeated fields may not have defaults',
messages.StringField, 1, repeated=True, default=[1, 2, 3])
def testDefaultFields_None(self):
"""Test none is always acceptable."""
def action(field_class):
field_class(1, default=None)
field_class(1, required=True, default=None)
field_class(1, repeated=True, default=None)
self.ActionOnAllFieldClasses(action)
def testDefaultFields_Enum(self):
"""Test the default for enum fields."""
class Symbol(messages.Enum):
ALPHA = 1
BETA = 2
GAMMA = 3
field = messages.EnumField(Symbol, 1, default=Symbol.ALPHA)
self.assertEquals(Symbol.ALPHA, field.default)
def testDefaultFields_EnumStringDelayedResolution(self):
"""Test that enum fields resolve default strings."""
field = messages.EnumField(
'apitools.base.protorpclite.descriptor.FieldDescriptor.Label',
1,
default='OPTIONAL')
self.assertEquals(
descriptor.FieldDescriptor.Label.OPTIONAL, field.default)
def testDefaultFields_EnumIntDelayedResolution(self):
"""Test that enum fields resolve default integers."""
field = messages.EnumField(
'apitools.base.protorpclite.descriptor.FieldDescriptor.Label',
1,
default=2)
self.assertEquals(
descriptor.FieldDescriptor.Label.REQUIRED, field.default)
def testDefaultFields_EnumOkIfTypeKnown(self):
"""Test enum fields accept valid default values when type is known."""
field = messages.EnumField(descriptor.FieldDescriptor.Label,
1,
default='REPEATED')
self.assertEquals(
descriptor.FieldDescriptor.Label.REPEATED, field.default)
def testDefaultFields_EnumForceCheckIfTypeKnown(self):
"""Test that enum fields validate default values if type is known."""
self.assertRaisesWithRegexpMatch(TypeError,
'No such value for NOT_A_LABEL in '
'Enum Label',
messages.EnumField,
descriptor.FieldDescriptor.Label,
1,
default='NOT_A_LABEL')
def testDefaultFields_EnumInvalidDelayedResolution(self):
"""Test that enum fields raise errors upon delayed resolution error."""
field = messages.EnumField(
'apitools.base.protorpclite.descriptor.FieldDescriptor.Label',
1,
default=200)
self.assertRaisesWithRegexpMatch(TypeError,
'No such value for 200 in Enum Label',
getattr,
field,
'default')
def testValidate_Valid(self):
"""Test validation of valid values."""
values = {
messages.IntegerField: 10,
messages.FloatField: 1.5,
messages.BooleanField: False,
messages.BytesField: b'abc',
messages.StringField: u'abc',
}
def action(field_class):
# Optional.
field = field_class(1)
field.validate(values[field_class])
# Required.
field = field_class(1, required=True)
field.validate(values[field_class])
# Repeated.
field = field_class(1, repeated=True)
field.validate([])
field.validate(())
field.validate([values[field_class]])
field.validate((values[field_class],))
# Right value, but not repeated.
self.assertRaises(messages.ValidationError,
field.validate,
values[field_class])
self.assertRaises(messages.ValidationError,
field.validate,
values[field_class])
self.ActionOnAllFieldClasses(action)
def testValidate_Invalid(self):
"""Test validation of valid values."""
values = {
messages.IntegerField: "10",
messages.FloatField: "blah",
messages.BooleanField: 0,
messages.BytesField: 10.20,
messages.StringField: 42,
}
def action(field_class):
# Optional.
field = field_class(1)
self.assertRaises(messages.ValidationError,
field.validate,
values[field_class])
# Required.
field = field_class(1, required=True)
self.assertRaises(messages.ValidationError,
field.validate,
values[field_class])
# Repeated.
field = field_class(1, repeated=True)
self.assertRaises(messages.ValidationError,
field.validate,
[values[field_class]])
self.assertRaises(messages.ValidationError,
field.validate,
(values[field_class],))
self.ActionOnAllFieldClasses(action)
def testValidate_None(self):
"""Test that None is valid for non-required fields."""
def action(field_class):
# Optional.
field = field_class(1)
field.validate(None)
# Required.
field = field_class(1, required=True)
self.assertRaisesWithRegexpMatch(messages.ValidationError,
'Required field is missing',
field.validate,
None)
# Repeated.
field = field_class(1, repeated=True)
field.validate(None)
self.assertRaisesWithRegexpMatch(
messages.ValidationError,
'Repeated values for %s may '
'not be None' % field_class.__name__,
field.validate,
[None])
self.assertRaises(messages.ValidationError,
field.validate,
(None,))
self.ActionOnAllFieldClasses(action)
def testValidateElement(self):
"""Test validation of valid values."""
values = {
messages.IntegerField: (10, -1, 0),
messages.FloatField: (1.5, -1.5, 3), # for json it is all a number
messages.BooleanField: (True, False),
messages.BytesField: (b'abc',),
messages.StringField: (u'abc',),
}
def action(field_class):
# Optional.
field = field_class(1)
for value in values[field_class]:
field.validate_element(value)
# Required.
field = field_class(1, required=True)
for value in values[field_class]:
field.validate_element(value)
# Repeated.
field = field_class(1, repeated=True)
self.assertRaises(messages.ValidationError,
field.validate_element,
[])
self.assertRaises(messages.ValidationError,
field.validate_element,
())
for value in values[field_class]:
field.validate_element(value)
# Right value, but repeated.
self.assertRaises(messages.ValidationError,
field.validate_element,
list(values[field_class])) # testing list
self.assertRaises(messages.ValidationError,
field.validate_element,
values[field_class]) # testing tuple
self.ActionOnAllFieldClasses(action)
def testValidateCastingElement(self):
field = messages.FloatField(1)
self.assertEquals(type(field.validate_element(12)), float)
self.assertEquals(type(field.validate_element(12.0)), float)
# pylint: disable=redefined-variable-type
field = messages.IntegerField(1)
self.assertEquals(type(field.validate_element(12)), int)
self.assertRaises(messages.ValidationError,
field.validate_element,
12.0) # should fails from float to int
def testReadOnly(self):
"""Test that objects are all read-only."""
def action(field_class):
field = field_class(10)
self.assertRaises(AttributeError,
setattr,
field,
'number',
20)
self.assertRaises(AttributeError,
setattr,
field,
'anything_else',
'whatever')
self.ActionOnAllFieldClasses(action)
def testMessageField(self):
"""Test the construction of message fields."""
self.assertRaises(messages.FieldDefinitionError,
messages.MessageField,
str,
10)
self.assertRaises(messages.FieldDefinitionError,
messages.MessageField,
messages.Message,
10)
class MyMessage(messages.Message):
pass
field = messages.MessageField(MyMessage, 10)
self.assertEquals(MyMessage, field.type)
def testMessageField_ForwardReference(self):
"""Test the construction of forward reference message fields."""
global MyMessage
global ForwardMessage
try:
class MyMessage(messages.Message):
self_reference = messages.MessageField('MyMessage', 1)
forward = messages.MessageField('ForwardMessage', 2)
nested = messages.MessageField(
'ForwardMessage.NestedMessage', 3)
inner = messages.MessageField('Inner', 4)
class Inner(messages.Message):
sibling = messages.MessageField('Sibling', 1)
class Sibling(messages.Message):
pass
class ForwardMessage(messages.Message):
class NestedMessage(messages.Message):
pass
self.assertEquals(MyMessage,
MyMessage.field_by_name('self_reference').type)
self.assertEquals(ForwardMessage,
MyMessage.field_by_name('forward').type)
self.assertEquals(ForwardMessage.NestedMessage,
MyMessage.field_by_name('nested').type)
self.assertEquals(MyMessage.Inner,
MyMessage.field_by_name('inner').type)
self.assertEquals(MyMessage.Sibling,
MyMessage.Inner.field_by_name('sibling').type)
finally:
try:
del MyMessage
del ForwardMessage
except: # pylint:disable=bare-except
pass
def testMessageField_WrongType(self):
"""Test that forward referencing the wrong type raises an error."""
global AnEnum
try:
class AnEnum(messages.Enum):
pass
class AnotherMessage(messages.Message):
a_field = messages.MessageField('AnEnum', 1)
self.assertRaises(messages.FieldDefinitionError,
getattr,
AnotherMessage.field_by_name('a_field'),
'type')
finally:
del AnEnum
def testMessageFieldValidate(self):
"""Test validation on message field."""
class MyMessage(messages.Message):
pass
class AnotherMessage(messages.Message):
pass
field = messages.MessageField(MyMessage, 10)
field.validate(MyMessage())
self.assertRaises(messages.ValidationError,
field.validate,
AnotherMessage())
def testMessageFieldMessageType(self):
"""Test message_type property."""
class MyMessage(messages.Message):
pass
class HasMessage(messages.Message):
field = messages.MessageField(MyMessage, 1)
self.assertEqual(HasMessage.field.type, HasMessage.field.message_type)
def testMessageFieldValueFromMessage(self):
class MyMessage(messages.Message):
pass
class HasMessage(messages.Message):
field = messages.MessageField(MyMessage, 1)
instance = MyMessage()
self.assertTrue(
instance is HasMessage.field.value_from_message(instance))
def testMessageFieldValueFromMessageWrongType(self):
class MyMessage(messages.Message):
pass
class HasMessage(messages.Message):
field = messages.MessageField(MyMessage, 1)
self.assertRaisesWithRegexpMatch(
messages.DecodeError,
'Expected type MyMessage, got int: 10',
HasMessage.field.value_from_message, 10)
def testMessageFieldValueToMessage(self):
class MyMessage(messages.Message):
pass
class HasMessage(messages.Message):
field = messages.MessageField(MyMessage, 1)
instance = MyMessage()
self.assertTrue(
instance is HasMessage.field.value_to_message(instance))
def testMessageFieldValueToMessageWrongType(self):
class MyMessage(messages.Message):
pass
class MyOtherMessage(messages.Message):
pass
class HasMessage(messages.Message):
field = messages.MessageField(MyMessage, 1)
instance = MyOtherMessage()
self.assertRaisesWithRegexpMatch(
messages.EncodeError,
'Expected type MyMessage, got MyOtherMessage: <MyOtherMessage>',
HasMessage.field.value_to_message, instance)
def testIntegerField_AllowLong(self):
"""Test that the integer field allows for longs."""
if six.PY2:
messages.IntegerField(10, default=long(10))
def testMessageFieldValidate_Initialized(self):
"""Test validation on message field."""
class MyMessage(messages.Message):
field1 = messages.IntegerField(1, required=True)
field = messages.MessageField(MyMessage, 10)
# Will validate messages where is_initialized() is False.
message = MyMessage()
field.validate(message)
message.field1 = 20
field.validate(message)
def testEnumField(self):
"""Test the construction of enum fields."""
self.assertRaises(messages.FieldDefinitionError,
messages.EnumField,
str,
10)
self.assertRaises(messages.FieldDefinitionError,
messages.EnumField,
messages.Enum,
10)
class Color(messages.Enum):
RED = 1
GREEN = 2
BLUE = 3
field = messages.EnumField(Color, 10)
self.assertEquals(Color, field.type)
class Another(messages.Enum):
VALUE = 1
self.assertRaises(messages.InvalidDefaultError,
messages.EnumField,
Color,
10,
default=Another.VALUE)
def testEnumField_ForwardReference(self):
"""Test the construction of forward reference enum fields."""
global MyMessage
global ForwardEnum
global ForwardMessage
try:
class MyMessage(messages.Message):
forward = messages.EnumField('ForwardEnum', 1)
nested = messages.EnumField('ForwardMessage.NestedEnum', 2)
inner = messages.EnumField('Inner', 3)
class Inner(messages.Enum):
pass
class ForwardEnum(messages.Enum):
pass
class ForwardMessage(messages.Message):
class NestedEnum(messages.Enum):
pass
self.assertEquals(ForwardEnum,
MyMessage.field_by_name('forward').type)
self.assertEquals(ForwardMessage.NestedEnum,
MyMessage.field_by_name('nested').type)
self.assertEquals(MyMessage.Inner,
MyMessage.field_by_name('inner').type)
finally:
try:
del MyMessage
del ForwardEnum
del ForwardMessage
except: # pylint:disable=bare-except
pass
def testEnumField_WrongType(self):
"""Test that forward referencing the wrong type raises an error."""
global AMessage
try:
class AMessage(messages.Message):
pass
class AnotherMessage(messages.Message):
a_field = messages.EnumField('AMessage', 1)
self.assertRaises(messages.FieldDefinitionError,
getattr,
AnotherMessage.field_by_name('a_field'),
'type')
finally:
del AMessage
def testMessageDefinition(self):
"""Test that message definition is set on fields."""
class MyMessage(messages.Message):
my_field = messages.StringField(1)
self.assertEquals(
MyMessage,
MyMessage.field_by_name('my_field').message_definition())
def testNoneAssignment(self):
"""Test that assigning None does not change comparison."""
class MyMessage(messages.Message):
my_field = messages.StringField(1)
m1 = MyMessage()
m2 = MyMessage()
m2.my_field = None
self.assertEquals(m1, m2)
def testNonAsciiStr(self):
"""Test validation fails for non-ascii StringField values."""
class Thing(messages.Message):
string_field = messages.StringField(2)
thing = Thing()
self.assertRaisesWithRegexpMatch(
messages.ValidationError,
'Field string_field encountered non-ASCII string',
setattr, thing, 'string_field', test_util.BINARY)
class MessageTest(test_util.TestCase):
"""Tests for message class."""
def CreateMessageClass(self):
"""Creates a simple message class with 3 fields.
Fields are defined in alphabetical order but with conflicting numeric
order.
"""
class ComplexMessage(messages.Message):
a3 = messages.IntegerField(3)
b1 = messages.StringField(1)
c2 = messages.StringField(2)
return ComplexMessage
def testSameNumbers(self):
"""Test that cannot assign two fields with same numbers."""
def action():
class BadMessage(messages.Message):
f1 = messages.IntegerField(1)
f2 = messages.IntegerField(1)
self.assertRaises(messages.DuplicateNumberError,
action)
def testStrictAssignment(self):
"""Tests that cannot assign to unknown or non-reserved attributes."""
class SimpleMessage(messages.Message):
field = messages.IntegerField(1)
simple_message = SimpleMessage()
self.assertRaises(AttributeError,
setattr,
simple_message,
'does_not_exist',
10)
def testListAssignmentDoesNotCopy(self):
class SimpleMessage(messages.Message):
repeated = messages.IntegerField(1, repeated=True)
message = SimpleMessage()
original = message.repeated
message.repeated = []
self.assertFalse(original is message.repeated)
def testValidate_Optional(self):
"""Tests validation of optional fields."""
class SimpleMessage(messages.Message):
non_required = messages.IntegerField(1)
simple_message = SimpleMessage()
simple_message.check_initialized()
simple_message.non_required = 10
simple_message.check_initialized()
def testValidate_Required(self):
"""Tests validation of required fields."""
class SimpleMessage(messages.Message):
required = messages.IntegerField(1, required=True)
simple_message = SimpleMessage()
self.assertRaises(messages.ValidationError,
simple_message.check_initialized)
simple_message.required = 10
simple_message.check_initialized()
def testValidate_Repeated(self):
"""Tests validation of repeated fields."""
class SimpleMessage(messages.Message):
repeated = messages.IntegerField(1, repeated=True)
simple_message = SimpleMessage()
# Check valid values.
for valid_value in [], [10], [10, 20], (), (10,), (10, 20):
simple_message.repeated = valid_value
simple_message.check_initialized()
# Check cleared.
simple_message.repeated = []
simple_message.check_initialized()
# Check invalid values.
for invalid_value in 10, ['10', '20'], [None], (None,):
self.assertRaises(
messages.ValidationError,
setattr, simple_message, 'repeated', invalid_value)
def testIsInitialized(self):
"""Tests is_initialized."""
class SimpleMessage(messages.Message):
required = messages.IntegerField(1, required=True)
simple_message = SimpleMessage()
self.assertFalse(simple_message.is_initialized())
simple_message.required = 10
self.assertTrue(simple_message.is_initialized())
def testIsInitializedNestedField(self):
"""Tests is_initialized for nested fields."""
class SimpleMessage(messages.Message):
required = messages.IntegerField(1, required=True)
class NestedMessage(messages.Message):
simple = messages.MessageField(SimpleMessage, 1)
simple_message = SimpleMessage()
self.assertFalse(simple_message.is_initialized())
nested_message = NestedMessage(simple=simple_message)
self.assertFalse(nested_message.is_initialized())
simple_message.required = 10
self.assertTrue(simple_message.is_initialized())
self.assertTrue(nested_message.is_initialized())
def testInitializeNestedFieldFromDict(self):
"""Tests initializing nested fields from dict."""
class SimpleMessage(messages.Message):
required = messages.IntegerField(1, required=True)
class NestedMessage(messages.Message):
simple = messages.MessageField(SimpleMessage, 1)
class RepeatedMessage(messages.Message):
simple = messages.MessageField(SimpleMessage, 1, repeated=True)
nested_message1 = NestedMessage(simple={'required': 10})
self.assertTrue(nested_message1.is_initialized())
self.assertTrue(nested_message1.simple.is_initialized())
nested_message2 = NestedMessage()
nested_message2.simple = {'required': 10}
self.assertTrue(nested_message2.is_initialized())
self.assertTrue(nested_message2.simple.is_initialized())
repeated_values = [{}, {'required': 10}, SimpleMessage(required=20)]
repeated_message1 = RepeatedMessage(simple=repeated_values)
self.assertEquals(3, len(repeated_message1.simple))
self.assertFalse(repeated_message1.is_initialized())
repeated_message1.simple[0].required = 0
self.assertTrue(repeated_message1.is_initialized())
repeated_message2 = RepeatedMessage()
repeated_message2.simple = repeated_values
self.assertEquals(3, len(repeated_message2.simple))
self.assertFalse(repeated_message2.is_initialized())
repeated_message2.simple[0].required = 0
self.assertTrue(repeated_message2.is_initialized())
def testNestedMethodsNotAllowed(self):
"""Test that method definitions on Message classes are not allowed."""
def action():
class WithMethods(messages.Message):
def not_allowed(self):
pass
self.assertRaises(messages.MessageDefinitionError,
action)
def testNestedAttributesNotAllowed(self):
"""Test attribute assignment on Message classes is not allowed."""
def int_attribute():
class WithMethods(messages.Message):
not_allowed = 1
def string_attribute():
class WithMethods(messages.Message):
not_allowed = 'not allowed'
def enum_attribute():
class WithMethods(messages.Message):
not_allowed = Color.RED
for action in (int_attribute, string_attribute, enum_attribute):
self.assertRaises(messages.MessageDefinitionError,
action)
def testNameIsSetOnFields(self):
"""Make sure name is set on fields after Message class init."""
class HasNamedFields(messages.Message):
field = messages.StringField(1)
self.assertEquals('field', HasNamedFields.field_by_number(1).name)
def testSubclassingMessageDisallowed(self):
"""Not permitted to create sub-classes of message classes."""
class SuperClass(messages.Message):
pass
def action():
class SubClass(SuperClass):
pass
self.assertRaises(messages.MessageDefinitionError,
action)
def testAllFields(self):
"""Test all_fields method."""
ComplexMessage = self.CreateMessageClass()
fields = list(ComplexMessage.all_fields())
# Order does not matter, so sort now.
fields = sorted(fields, key=lambda f: f.name)
self.assertEquals(3, len(fields))
self.assertEquals('a3', fields[0].name)
self.assertEquals('b1', fields[1].name)
self.assertEquals('c2', fields[2].name)
def testFieldByName(self):
"""Test getting field by name."""
ComplexMessage = self.CreateMessageClass()
self.assertEquals(3, ComplexMessage.field_by_name('a3').number)
self.assertEquals(1, ComplexMessage.field_by_name('b1').number)
self.assertEquals(2, ComplexMessage.field_by_name('c2').number)
self.assertRaises(KeyError,
ComplexMessage.field_by_name,
'unknown')
def testFieldByNumber(self):
"""Test getting field by number."""
ComplexMessage = self.CreateMessageClass()
self.assertEquals('a3', ComplexMessage.field_by_number(3).name)
self.assertEquals('b1', ComplexMessage.field_by_number(1).name)
self.assertEquals('c2', ComplexMessage.field_by_number(2).name)
self.assertRaises(KeyError,
ComplexMessage.field_by_number,
4)
def testGetAssignedValue(self):
"""Test getting the assigned value of a field."""
class SomeMessage(messages.Message):
a_value = messages.StringField(1, default=u'a default')
message = SomeMessage()
self.assertEquals(None, message.get_assigned_value('a_value'))
message.a_value = u'a string'
self.assertEquals(u'a string', message.get_assigned_value('a_value'))
message.a_value = u'a default'
self.assertEquals(u'a default', message.get_assigned_value('a_value'))
self.assertRaisesWithRegexpMatch(
AttributeError,
'Message SomeMessage has no field no_such_field',
message.get_assigned_value,
'no_such_field')
def testReset(self):
"""Test resetting a field value."""
class SomeMessage(messages.Message):
a_value = messages.StringField(1, default=u'a default')
repeated = messages.IntegerField(2, repeated=True)
message = SomeMessage()
self.assertRaises(AttributeError, message.reset, 'unknown')
self.assertEquals(u'a default', message.a_value)
message.reset('a_value')
self.assertEquals(u'a default', message.a_value)
message.a_value = u'a new value'
self.assertEquals(u'a new value', message.a_value)
message.reset('a_value')
self.assertEquals(u'a default', message.a_value)
message.repeated = [1, 2, 3]
self.assertEquals([1, 2, 3], message.repeated)
saved = message.repeated
message.reset('repeated')
self.assertEquals([], message.repeated)
self.assertIsInstance(message.repeated, messages.FieldList)
self.assertEquals([1, 2, 3], saved)
def testAllowNestedEnums(self):
"""Test allowing nested enums in a message definition."""
class Trade(messages.Message):
class Duration(messages.Enum):
GTC = 1
DAY = 2
class Currency(messages.Enum):
USD = 1
GBP = 2
INR = 3
# Sorted by name order seems to be the only feasible option.
self.assertEquals(['Currency', 'Duration'], Trade.__enums__)
# Message definition will now be set on Enumerated objects.
self.assertEquals(Trade, Trade.Duration.message_definition())
def testAllowNestedMessages(self):
"""Test allowing nested messages in a message definition."""
class Trade(messages.Message):
class Lot(messages.Message):
pass
class Agent(messages.Message):
pass
# Sorted by name order seems to be the only feasible option.
self.assertEquals(['Agent', 'Lot'], Trade.__messages__)
self.assertEquals(Trade, Trade.Agent.message_definition())
self.assertEquals(Trade, Trade.Lot.message_definition())
# But not Message itself.
def action():
class Trade(messages.Message):
NiceTry = messages.Message
self.assertRaises(messages.MessageDefinitionError, action)
def testDisallowClassAssignments(self):
"""Test setting class attributes may not happen."""
class MyMessage(messages.Message):
pass
self.assertRaises(AttributeError,
setattr,
MyMessage,
'x',
'do not assign')
def testEquality(self):
"""Test message class equality."""
# Comparison against enums must work.
class MyEnum(messages.Enum):
val1 = 1
val2 = 2
# Comparisons against nested messages must work.
class AnotherMessage(messages.Message):
string = messages.StringField(1)
class MyMessage(messages.Message):
field1 = messages.IntegerField(1)
field2 = messages.EnumField(MyEnum, 2)
field3 = messages.MessageField(AnotherMessage, 3)
message1 = MyMessage()
self.assertNotEquals('hi', message1)
self.assertNotEquals(AnotherMessage(), message1)
self.assertEquals(message1, message1)
message2 = MyMessage()
self.assertEquals(message1, message2)
message1.field1 = 10
self.assertNotEquals(message1, message2)
message2.field1 = 20
self.assertNotEquals(message1, message2)
message2.field1 = 10
self.assertEquals(message1, message2)
message1.field2 = MyEnum.val1
self.assertNotEquals(message1, message2)
message2.field2 = MyEnum.val2
self.assertNotEquals(message1, message2)
message2.field2 = MyEnum.val1
self.assertEquals(message1, message2)
message1.field3 = AnotherMessage()
message1.field3.string = 'value1'
self.assertNotEquals(message1, message2)
message2.field3 = AnotherMessage()
message2.field3.string = 'value2'
self.assertNotEquals(message1, message2)
message2.field3.string = 'value1'
self.assertEquals(message1, message2)
def testEqualityWithUnknowns(self):
"""Test message class equality with unknown fields."""
class MyMessage(messages.Message):
field1 = messages.IntegerField(1)
message1 = MyMessage()
message2 = MyMessage()
self.assertEquals(message1, message2)
message1.set_unrecognized_field('unknown1', 'value1',
messages.Variant.STRING)
self.assertEquals(message1, message2)
message1.set_unrecognized_field('unknown2', ['asdf', 3],
messages.Variant.STRING)
message1.set_unrecognized_field('unknown3', 4.7,
messages.Variant.DOUBLE)
self.assertEquals(message1, message2)
def testUnrecognizedFieldInvalidVariant(self):
class MyMessage(messages.Message):
field1 = messages.IntegerField(1)
message1 = MyMessage()
self.assertRaises(
TypeError, message1.set_unrecognized_field, 'unknown4',
{'unhandled': 'type'}, None)
self.assertRaises(
TypeError, message1.set_unrecognized_field, 'unknown4',
{'unhandled': 'type'}, 123)
def testRepr(self):
"""Test represtation of Message object."""
class MyMessage(messages.Message):
integer_value = messages.IntegerField(1)
string_value = messages.StringField(2)
unassigned = messages.StringField(3)
unassigned_with_default = messages.StringField(
4, default=u'a default')
my_message = MyMessage()
my_message.integer_value = 42
my_message.string_value = u'A string'
pat = re.compile(r"<MyMessage\n integer_value: 42\n"
" string_value: [u]?'A string'>")
self.assertTrue(pat.match(repr(my_message)) is not None)
def testValidation(self):
"""Test validation of message values."""
# Test optional.
class SubMessage(messages.Message):
pass
class Message(messages.Message):
val = messages.MessageField(SubMessage, 1)
message = Message()
message_field = messages.MessageField(Message, 1)
message_field.validate(message)
message.val = SubMessage()
message_field.validate(message)
self.assertRaises(messages.ValidationError,
setattr, message, 'val', [SubMessage()])
# Test required.
class Message(messages.Message):
val = messages.MessageField(SubMessage, 1, required=True)
message = Message()
message_field = messages.MessageField(Message, 1)
message_field.validate(message)
message.val = SubMessage()
message_field.validate(message)
self.assertRaises(messages.ValidationError,
setattr, message, 'val', [SubMessage()])
# Test repeated.
class Message(messages.Message):
val = messages.MessageField(SubMessage, 1, repeated=True)
message = Message()
message_field = messages.MessageField(Message, 1)
message_field.validate(message)
self.assertRaisesWithRegexpMatch(
messages.ValidationError,
"Field val is repeated. Found: <SubMessage>",
setattr, message, 'val', SubMessage())
# pylint: disable=redefined-variable-type
message.val = [SubMessage()]
message_field.validate(message)
def testDefinitionName(self):
"""Test message name."""
class MyMessage(messages.Message):
pass
module_name = test_util.get_module_name(FieldTest)
self.assertEquals('%s.MyMessage' % module_name,
MyMessage.definition_name())
self.assertEquals(module_name, MyMessage.outer_definition_name())
self.assertEquals(module_name, MyMessage.definition_package())
self.assertEquals(six.text_type, type(MyMessage.definition_name()))
self.assertEquals(six.text_type, type(
MyMessage.outer_definition_name()))
self.assertEquals(six.text_type, type(MyMessage.definition_package()))
def testDefinitionName_OverrideModule(self):
"""Test message module is overriden by module package name."""
class MyMessage(messages.Message):
pass
global package
package = 'my.package'
try:
self.assertEquals('my.package.MyMessage',
MyMessage.definition_name())
self.assertEquals('my.package', MyMessage.outer_definition_name())
self.assertEquals('my.package', MyMessage.definition_package())
self.assertEquals(six.text_type, type(MyMessage.definition_name()))
self.assertEquals(six.text_type, type(
MyMessage.outer_definition_name()))
self.assertEquals(six.text_type, type(
MyMessage.definition_package()))
finally:
del package
def testDefinitionName_NoModule(self):
"""Test what happens when there is no module for message."""
class MyMessage(messages.Message):
pass
original_modules = sys.modules
sys.modules = dict(sys.modules)
try:
del sys.modules[__name__]
self.assertEquals('MyMessage', MyMessage.definition_name())
self.assertEquals(None, MyMessage.outer_definition_name())
self.assertEquals(None, MyMessage.definition_package())
self.assertEquals(six.text_type, type(MyMessage.definition_name()))
finally:
sys.modules = original_modules
def testDefinitionName_Nested(self):
"""Test nested message names."""
class MyMessage(messages.Message):
class NestedMessage(messages.Message):
class NestedMessage(messages.Message):
pass
module_name = test_util.get_module_name(MessageTest)
self.assertEquals('%s.MyMessage.NestedMessage' % module_name,
MyMessage.NestedMessage.definition_name())
self.assertEquals('%s.MyMessage' % module_name,
MyMessage.NestedMessage.outer_definition_name())
self.assertEquals(module_name,
MyMessage.NestedMessage.definition_package())
self.assertEquals(
'%s.MyMessage.NestedMessage.NestedMessage' % module_name,
MyMessage.NestedMessage.NestedMessage.definition_name())
self.assertEquals(
'%s.MyMessage.NestedMessage' % module_name,
MyMessage.NestedMessage.NestedMessage.outer_definition_name())
self.assertEquals(
module_name,
MyMessage.NestedMessage.NestedMessage.definition_package())
def testMessageDefinition(self):
"""Test that enumeration knows its enclosing message definition."""
class OuterMessage(messages.Message):
class InnerMessage(messages.Message):
pass
self.assertEquals(None, OuterMessage.message_definition())
self.assertEquals(OuterMessage,
OuterMessage.InnerMessage.message_definition())
def testConstructorKwargs(self):
"""Test kwargs via constructor."""
class SomeMessage(messages.Message):
name = messages.StringField(1)
number = messages.IntegerField(2)
expected = SomeMessage()
expected.name = 'my name'
expected.number = 200
self.assertEquals(expected, SomeMessage(name='my name', number=200))
def testConstructorNotAField(self):
"""Test kwargs via constructor with wrong names."""
class SomeMessage(messages.Message):
pass
self.assertRaisesWithRegexpMatch(
AttributeError,
('May not assign arbitrary value does_not_exist to message '
'SomeMessage'),
SomeMessage,
does_not_exist=10)
def testGetUnsetRepeatedValue(self):
class SomeMessage(messages.Message):
repeated = messages.IntegerField(1, repeated=True)
instance = SomeMessage()
self.assertEquals([], instance.repeated)
self.assertTrue(isinstance(instance.repeated, messages.FieldList))
def testCompareAutoInitializedRepeatedFields(self):
class SomeMessage(messages.Message):
repeated = messages.IntegerField(1, repeated=True)
message1 = SomeMessage(repeated=[])
message2 = SomeMessage()
self.assertEquals(message1, message2)
def testUnknownValues(self):
"""Test message class equality with unknown fields."""
class MyMessage(messages.Message):
field1 = messages.IntegerField(1)
message = MyMessage()
self.assertEquals([], message.all_unrecognized_fields())
self.assertEquals((None, None),
message.get_unrecognized_field_info('doesntexist'))
self.assertEquals((None, None),
message.get_unrecognized_field_info(
'doesntexist', None, None))
self.assertEquals(('defaultvalue', 'defaultwire'),
message.get_unrecognized_field_info(
'doesntexist', 'defaultvalue', 'defaultwire'))
self.assertEquals((3, None),
message.get_unrecognized_field_info(
'doesntexist', value_default=3))
message.set_unrecognized_field('exists', 9.5, messages.Variant.DOUBLE)
self.assertEquals(1, len(message.all_unrecognized_fields()))
self.assertTrue('exists' in message.all_unrecognized_fields())
self.assertEquals((9.5, messages.Variant.DOUBLE),
message.get_unrecognized_field_info('exists'))
self.assertEquals((9.5, messages.Variant.DOUBLE),
message.get_unrecognized_field_info('exists', 'type',
1234))
self.assertEquals(
(1234, None),
message.get_unrecognized_field_info('doesntexist', 1234))
message.set_unrecognized_field(
'another', 'value', messages.Variant.STRING)
self.assertEquals(2, len(message.all_unrecognized_fields()))
self.assertTrue('exists' in message.all_unrecognized_fields())
self.assertTrue('another' in message.all_unrecognized_fields())
self.assertEquals((9.5, messages.Variant.DOUBLE),
message.get_unrecognized_field_info('exists'))
self.assertEquals(('value', messages.Variant.STRING),
message.get_unrecognized_field_info('another'))
message.set_unrecognized_field('typetest1', ['list', 0, ('test',)],
messages.Variant.STRING)
self.assertEquals((['list', 0, ('test',)], messages.Variant.STRING),
message.get_unrecognized_field_info('typetest1'))
message.set_unrecognized_field(
'typetest2', '', messages.Variant.STRING)
self.assertEquals(('', messages.Variant.STRING),
message.get_unrecognized_field_info('typetest2'))
def testPickle(self):
"""Testing pickling and unpickling of Message instances."""
global MyEnum
global AnotherMessage
global MyMessage
class MyEnum(messages.Enum):
val1 = 1
val2 = 2
class AnotherMessage(messages.Message):
string = messages.StringField(1, repeated=True)
class MyMessage(messages.Message):
field1 = messages.IntegerField(1)
field2 = messages.EnumField(MyEnum, 2)
field3 = messages.MessageField(AnotherMessage, 3)
message = MyMessage(field1=1, field2=MyEnum.val2,
field3=AnotherMessage(string=['a', 'b', 'c']))
message.set_unrecognized_field(
'exists', 'value', messages.Variant.STRING)
message.set_unrecognized_field('repeated', ['list', 0, ('test',)],
messages.Variant.STRING)
unpickled = pickle.loads(pickle.dumps(message))
self.assertEquals(message, unpickled)
self.assertTrue(AnotherMessage.string is unpickled.field3.string.field)
self.assertTrue('exists' in message.all_unrecognized_fields())
self.assertEquals(('value', messages.Variant.STRING),
message.get_unrecognized_field_info('exists'))
self.assertEquals((['list', 0, ('test',)], messages.Variant.STRING),
message.get_unrecognized_field_info('repeated'))
class FindDefinitionTest(test_util.TestCase):
"""Test finding definitions relative to various definitions and modules."""
def setUp(self):
"""Set up module-space. Starts off empty."""
self.modules = {}
def DefineModule(self, name):
"""Define a module and its parents in module space.
Modules that are already defined in self.modules are not re-created.
Args:
name: Fully qualified name of modules to create.
Returns:
Deepest nested module. For example:
DefineModule('a.b.c') # Returns c.
"""
name_path = name.split('.')
full_path = []
for node in name_path:
full_path.append(node)
full_name = '.'.join(full_path)
self.modules.setdefault(full_name, types.ModuleType(full_name))
return self.modules[name]
def DefineMessage(self, module, name, children=None, add_to_module=True):
"""Define a new Message class in the context of a module.
Used for easily describing complex Message hierarchy. Message
is defined including all child definitions.
Args:
module: Fully qualified name of module to place Message class in.
name: Name of Message to define within module.
children: Define any level of nesting of children
definitions. To define a message, map the name to another
dictionary. The dictionary can itself contain additional
definitions, and so on. To map to an Enum, define the Enum
class separately and map it by name.
add_to_module: If True, new Message class is added to
module. If False, new Message is not added.
"""
children = children or {}
# Make sure module exists.
module_instance = self.DefineModule(module)
# Recursively define all child messages.
for attribute, value in children.items():
if isinstance(value, dict):
children[attribute] = self.DefineMessage(
module, attribute, value, False)
# Override default __module__ variable.
children['__module__'] = module
# Instantiate and possibly add to module.
message_class = type(name, (messages.Message,), dict(children))
if add_to_module:
setattr(module_instance, name, message_class)
return message_class
# pylint:disable=unused-argument
# pylint:disable=redefined-builtin
def Importer(self, module, globals='', locals='', fromlist=None):
"""Importer function.
Acts like __import__. Only loads modules from self.modules.
Does not try to load real modules defined elsewhere. Does not
try to handle relative imports.
Args:
module: Fully qualified name of module to load from self.modules.
"""
if fromlist is None:
module = module.split('.')[0]
try:
return self.modules[module]
except KeyError:
raise ImportError()
# pylint:disable=unused-argument
def testNoSuchModule(self):
"""Test searching for definitions that do no exist."""
self.assertRaises(messages.DefinitionNotFoundError,
messages.find_definition,
'does.not.exist',
importer=self.Importer)
def testRefersToModule(self):
"""Test that referring to a module does not return that module."""
self.DefineModule('i.am.a.module')
self.assertRaises(messages.DefinitionNotFoundError,
messages.find_definition,
'i.am.a.module',
importer=self.Importer)
def testNoDefinition(self):
"""Test not finding a definition in an existing module."""
self.DefineModule('i.am.a.module')
self.assertRaises(messages.DefinitionNotFoundError,
messages.find_definition,
'i.am.a.module.MyMessage',
importer=self.Importer)
def testNotADefinition(self):
"""Test trying to fetch something that is not a definition."""
module = self.DefineModule('i.am.a.module')
setattr(module, 'A', 'a string')
self.assertRaises(messages.DefinitionNotFoundError,
messages.find_definition,
'i.am.a.module.A',
importer=self.Importer)
def testGlobalFind(self):
"""Test finding definitions from fully qualified module names."""
A = self.DefineMessage('a.b.c', 'A', {})
self.assertEquals(A, messages.find_definition('a.b.c.A',
importer=self.Importer))
B = self.DefineMessage('a.b.c', 'B', {'C': {}})
self.assertEquals(
B.C,
messages.find_definition('a.b.c.B.C', importer=self.Importer))
def testRelativeToModule(self):
"""Test finding definitions relative to modules."""
# Define modules.
a = self.DefineModule('a')
b = self.DefineModule('a.b')
c = self.DefineModule('a.b.c')
# Define messages.
A = self.DefineMessage('a', 'A')
B = self.DefineMessage('a.b', 'B')
C = self.DefineMessage('a.b.c', 'C')
D = self.DefineMessage('a.b.d', 'D')
# Find A, B, C and D relative to a.
self.assertEquals(A, messages.find_definition(
'A', a, importer=self.Importer))
self.assertEquals(B, messages.find_definition(
'b.B', a, importer=self.Importer))
self.assertEquals(C, messages.find_definition(
'b.c.C', a, importer=self.Importer))
self.assertEquals(D, messages.find_definition(
'b.d.D', a, importer=self.Importer))
# Find A, B, C and D relative to b.
self.assertEquals(A, messages.find_definition(
'A', b, importer=self.Importer))
self.assertEquals(B, messages.find_definition(
'B', b, importer=self.Importer))
self.assertEquals(C, messages.find_definition(
'c.C', b, importer=self.Importer))
self.assertEquals(D, messages.find_definition(
'd.D', b, importer=self.Importer))
# Find A, B, C and D relative to c. Module d is the same case as c.
self.assertEquals(A, messages.find_definition(
'A', c, importer=self.Importer))
self.assertEquals(B, messages.find_definition(
'B', c, importer=self.Importer))
self.assertEquals(C, messages.find_definition(
'C', c, importer=self.Importer))
self.assertEquals(D, messages.find_definition(
'd.D', c, importer=self.Importer))
def testRelativeToMessages(self):
"""Test finding definitions relative to Message definitions."""
A = self.DefineMessage('a.b', 'A', {'B': {'C': {}, 'D': {}}})
B = A.B
C = A.B.C
D = A.B.D
# Find relative to A.
self.assertEquals(A, messages.find_definition(
'A', A, importer=self.Importer))
self.assertEquals(B, messages.find_definition(
'B', A, importer=self.Importer))
self.assertEquals(C, messages.find_definition(
'B.C', A, importer=self.Importer))
self.assertEquals(D, messages.find_definition(
'B.D', A, importer=self.Importer))
# Find relative to B.
self.assertEquals(A, messages.find_definition(
'A', B, importer=self.Importer))
self.assertEquals(B, messages.find_definition(
'B', B, importer=self.Importer))
self.assertEquals(C, messages.find_definition(
'C', B, importer=self.Importer))
self.assertEquals(D, messages.find_definition(
'D', B, importer=self.Importer))
# Find relative to C.
self.assertEquals(A, messages.find_definition(
'A', C, importer=self.Importer))
self.assertEquals(B, messages.find_definition(
'B', C, importer=self.Importer))
self.assertEquals(C, messages.find_definition(
'C', C, importer=self.Importer))
self.assertEquals(D, messages.find_definition(
'D', C, importer=self.Importer))
# Find relative to C searching from c.
self.assertEquals(A, messages.find_definition(
'b.A', C, importer=self.Importer))
self.assertEquals(B, messages.find_definition(
'b.A.B', C, importer=self.Importer))
self.assertEquals(C, messages.find_definition(
'b.A.B.C', C, importer=self.Importer))
self.assertEquals(D, messages.find_definition(
'b.A.B.D', C, importer=self.Importer))
def testAbsoluteReference(self):
"""Test finding absolute definition names."""
# Define modules.
a = self.DefineModule('a')
b = self.DefineModule('a.a')
# Define messages.
aA = self.DefineMessage('a', 'A')
aaA = self.DefineMessage('a.a', 'A')
# Always find a.A.
self.assertEquals(aA, messages.find_definition('.a.A', None,
importer=self.Importer))
self.assertEquals(aA, messages.find_definition('.a.A', a,
importer=self.Importer))
self.assertEquals(aA, messages.find_definition('.a.A', aA,
importer=self.Importer))
self.assertEquals(aA, messages.find_definition('.a.A', aaA,
importer=self.Importer))
def testFindEnum(self):
"""Test that Enums are found."""
class Color(messages.Enum):
pass
A = self.DefineMessage('a', 'A', {'Color': Color})
self.assertEquals(
Color,
messages.find_definition('Color', A, importer=self.Importer))
def testFalseScope(self):
"""Test Message definitions nested in strange objects are hidden."""
global X
class X(object):
class A(messages.Message):
pass
self.assertRaises(TypeError, messages.find_definition, 'A', X)
self.assertRaises(messages.DefinitionNotFoundError,
messages.find_definition,
'X.A', sys.modules[__name__])
def testSearchAttributeFirst(self):
"""Make sure not faked out by module, but continues searching."""
A = self.DefineMessage('a', 'A')
module_A = self.DefineModule('a.A')
self.assertEquals(A, messages.find_definition(
'a.A', None, importer=self.Importer))
def main():
unittest.main()
if __name__ == '__main__':
main()
|
the-stack_106_20186
|
'''RL agent implementing hierarchical spatial attention (HSA).'''
# python
import os
import pickle
# scipy
from numpy.random import rand, randint
from numpy import array, delete, log2, meshgrid, ravel_multi_index, reshape, unravel_index, zeros
# drawing
from matplotlib import pyplot
from mpl_toolkits.mplot3d import Axes3D
# self
# AGENT ============================================================================================
class RlAgentHsa:
def __init__(self, params):
'''Initializes the agent with an optimistic policy.
- Input params: System parameters data structure.
'''
# parameters
self.n = params["nObjects"]
self.m = params["worldSize"]
self.tMax = params["tMax"]
self.initQ = params["initQ"]
self.alpha = params["alpha"]
self.gamma = params["gamma"]
self.plotImages = params["plotImages"]
# determine parameters
self.worldSize = (self.m, self.m, self.m)
self.endEffectorIdx = self.m**3
self.observationSize = (2, 2, 2)
self.L = int(log2(self.m))
# some input checking
if self.m < 2:
raise Exception("Because the observation is 2x2x2, the smallest supported world size is 2.")
if log2(self.m) != self.L:
raise Exception("The current implementation only supports grid sizes that are powers of 2.")
# initialize q-function
# Q = (G1_0, ..., G1_8, G2_0, ..., G2_8, h, l, t, a)
self.Q = {None:0.0}
def GetQTableSize(self):
'''Returns the number of Q-values stored in the lookup table.'''
return len(self.Q)
def GetAction(self, a, l, i):
'''Gets the next underlying action coordinate (x, y, z) given the current underlying action a,
level l and abstract action i.
- Input a: Underlying action coordinate (x, y, z).
- Input l: The current level in 0, ..., L - 1.
- Input i: The abstract action index.
- Returns aa: The next underlying action.
'''
d = self.m / 2 ** (l+1)
iCoord = unravel_index(i, self.observationSize)
return (a[0] + d * iCoord[0], a[1] + d * iCoord[1], a[2] + d * iCoord[2])
def GetActionsAndObservations(self, s, epsilon):
'''Gets a list of actions, abstract actions, and observations for each level in the sense sequence.
- Input s: The current state = (pegs, holes, time).
- Input epsilon: Takes a random action with probability epsilon.
- Returns a: The underlying action (world coordinates flattened index).
- Returns i: The abstract actions, one for each sense level, each of which is an index
indicating which cell in the observation was selected.
- Returns o: The observations, one for each sense level l, each of which is the tuple from
GetObservation.
'''
# reached terminal state
if s is None:
return None, None, None
# decompose state information
pegs = s[0:self.n]; disks = s[self.n:2*self.n]; t = s[-1]
# ignore peg-disks
ignorePegs = []; ignoreDisks = []; i = 0; j = 0
while i < self.n and j < self.n:
if pegs[i] == disks[j]:
ignorePegs.append(i)
ignoreDisks.append(j)
i += 1; j += 1
elif pegs[i] < disks[j]: i += 1
else: j += 1
unplacedPegs = delete(pegs, ignorePegs)
unoccupiedDisks = delete(disks, ignoreDisks)
# compute coordinates for pegs and disks
pegCoords = []; diskCoords = []; h = False
for i in xrange(len(unplacedPegs)):
if unplacedPegs[i] == self.endEffectorIdx:
h = True
else:
pegCoords.append(unravel_index(unplacedPegs[i], self.worldSize))
diskCoords.append(unravel_index(unoccupiedDisks[i], self.worldSize))
# initialize outputs
a = (0, 0, 0); aPrev = None; i = []; o = []
# take best action, breaking ties randomly
for l in xrange(self.L):
observation = self.GetObservation(pegCoords, diskCoords, h, l, t, a)
if rand() < epsilon:
abstractAction = randint(8)
idx = observation + (abstractAction,)
if idx not in self.Q: self.Q[idx] = self.initQ[t]
else:
bestValue = -float('inf'); abstractActions = None
for abstractAction in xrange(8):
idx = observation + (abstractAction,)
if idx not in self.Q: self.Q[idx] = self.initQ[t]
if self.Q[idx] > bestValue:
bestValue = self.Q[idx]
abstractActions = [abstractAction]
elif self.Q[idx] == bestValue:
abstractActions.append(abstractAction)
# break ties randomly
abstractAction = abstractActions[randint(len(abstractActions))]
# compute new sensor location and append observation and abstract action to list
aPrev = a
a = self.GetAction(a, l, abstractAction)
o.append(observation); i.append(abstractAction)
# visualization
if self.plotImages:
print("Best value: {}".format(bestValue))
self.PlotImages(pegs, disks, t, l, h, a, aPrev)
return ravel_multi_index(a, self.worldSize), i, o
def GetObservation(self, pegs, disks, h, l, t, a):
'''Gets an HVS observation given the underlying state and current underlying point of focus/action.
- Input pegs: List of unplaced pegs in global coordinates (x, y, z), excluding any held peg.
- Input disks: List of unplaced disks in global coordinates (x, y, z).
- Input h: True if a peg is in the end effector and False otherwise.
- Input l: The current level.
- Input t: The current overt time step.
- Input a: The current point of focus/action (x, y, z).
- Returns o: The current observation (g1_0, ..., g1_8, g2_0, ..., g2_8, h, l, t).
'''
# cell size for this level
d = self.m / 2 ** (l+1)
# initialize grids to empty
G1 = zeros(self.observationSize, dtype='bool')
G2 = zeros(self.observationSize, dtype='bool')
# check each partition for a peg/disk
for i in xrange(2):
for j in xrange(2):
for k in xrange(2):
x0 = a[0] + i * d; x1 = x0 + d
y0 = a[1] + j * d; y1 = y0 + d
z0 = a[2] + k * d; z1 = z0 + d
for peg in pegs:
if peg[0] >= x0 and peg[0] < x1 and \
peg[1] >= y0 and peg[1] < y1 and \
peg[2] >= z0 and peg[2] < z1:
G1[i, j, k] = True
break
for disk in disks:
if disk[0] >= x0 and disk[0] < x1 and \
disk[1] >= y0 and disk[1] < y1 and \
disk[2] >= z0 and disk[2] < z1:
G2[i, j, k] = True
break
return tuple(G1.flatten()) + tuple(G2.flatten()) + (h, l, t)
def LoadQFunction(self):
'''Loads a previously saved dictionary of Q-values.'''
path = os.getcwd() + "/" + "model-hvs.pkl"
self.Q = pickle.load(open(path, "rb"))
print("Loaded {}.".format(path))
def PlotCube(self, ax, xMinMax, yMinMax, zMinMax, color, alpha):
'''https://codereview.stackexchange.com/questions/155585/plotting-a-rectangular-prism'''
xx, yy = meshgrid(xMinMax, yMinMax)
ax.plot_wireframe(xx, yy, reshape(zMinMax[0], (1, 1)), color=color)
ax.plot_surface(xx, yy, reshape(zMinMax[0], (1, 1)), color=color, alpha=alpha)
ax.plot_wireframe(xx, yy, reshape(zMinMax[1], (1, 1)), color=color)
ax.plot_surface(xx, yy, reshape(zMinMax[1], (1, 1)), color=color, alpha=alpha)
yy, zz = meshgrid(yMinMax, zMinMax)
ax.plot_wireframe(xMinMax[0], yy, zz, color=color)
ax.plot_surface(xMinMax[0], yy, zz, color=color, alpha=alpha)
ax.plot_wireframe(xMinMax[1], yy, zz, color=color)
ax.plot_surface(xMinMax[1], yy, zz, color=color, alpha=alpha)
xx, zz = meshgrid(xMinMax, zMinMax)
ax.plot_wireframe(xx, yMinMax[0], zz, color=color)
ax.plot_surface(xx, yMinMax[0], zz, color=color, alpha=alpha)
ax.plot_wireframe(xx, yMinMax[1], zz, color=color)
ax.plot_surface(xx, yMinMax[1], zz, color=color, alpha=alpha)
def PlotImages(self, pegs, disks, t, l, h, a, aPrev):
'''Visualizes a current situation.
- Input pegs: List of all pegs as a flat coordinate.
- Input disks: List of all disks as a flat coordinate.
- Input t: The overt time step.
- Input l: The current level.
- Input h: The holding bit.
- Input a: The underlying action coordinate.
- Input aPrev: The previous action coordinate.
- Returns None. An image is shown and the thread is blocked until it is closed.
'''
# setup plot
fig = pyplot.figure()
ax = fig.add_subplot(111, projection='3d')
# draw grid linse
dxyz = 1.0 / self.m
for i in xrange(self.m + 1):
for j in xrange(self.m + 1):
for k in xrange(3):
x = [i * dxyz]*2
y = [j * dxyz]*2
z = [0, 1]
if k == 0:
ax.plot(x, y, z, 'k', alpha=0.25)
elif k == 1:
ax.plot(z, x, y, 'k', alpha=0.25)
else:
ax.plot(y, z, x, 'k', alpha=0.25)
# draw objects
for disk in disks:
coord = array(unravel_index(disk, self.worldSize)) / float(self.m) + dxyz / 2.0
ax.scatter(coord[0], coord[1], coord[2], c='b', s=100, marker='o')
for peg in pegs:
if peg == self.endEffectorIdx: continue
coord = array(unravel_index(peg, self.worldSize)) / float(self.m) + dxyz / 2.0
ax.scatter(coord[0], coord[1], coord[2], c='r', s=100, marker='^')
# draw observation area
corner = array(aPrev) / float(self.m)
size = 1.0 / 2.0**l
self.PlotCube(ax, [corner[0], corner[0] + size], [corner[1], corner[1] + size],
[corner[2], corner[2] + size], 'y', 0.2)
# draw area selected by robot
corner = array(a) / float(self.m)
size = 1.0 / 2.0**(l+1)
self.PlotCube(ax, [corner[0], corner[0] + size], [corner[1], corner[1] + size],
[corner[2], corner[2] + size], 'g', 0.2)
# plot properties
ax.set_xlim3d(0.12, 0.88)
ax.set_ylim3d(0.12, 0.88)
ax.set_zlim3d(0.12, 0.88)
ax.view_init(elev=15, azim=-10)
ax._axis3don = False
ax.set_aspect('equal')
fig.suptitle("t={}. l={}. h={}.".format(t, l, h))
pyplot.show(block=True)
def SaveQFunction(self):
'''Saves the current Q-value dictionary to a Python pickle file, model-hvs.pkl.'''
path = os.getcwd() + "/" + "model-hvs.pkl"
pickle.dump(self.Q, open(path, "wb"))
print("Saved {}.".format(path))
def UpdateQFunction(self, o, i, r, oo, ii):
'''Updates the current q-estimates, according to the Sarsa update rule, given an (overt) time
step of experience.
- Input o: A list of observations, [o_0, ..., o_L-1].
- Input i: A list of abstract actions, [i_0, ..., i_L-1].
- Input r: The (scalar) reward received after taking this action from this state.
- Input oo: List of observations in the next (overt) time step, [oo_0, ..., oo_L-1].
- Input ii: List of abstract actions taken in the next (overt) time step, [oo_0, ..., oo_L-1].
- Returns None.
'''
for l in xrange(self.L):
idx = o[l] + (i[l],)
if l != self.L - 1:
ll = l + 1
jdx = o[ll] + (i[ll],)
rr = 0
else:
ll = 0
jdx = None if oo is None else oo[ll] + (ii[ll],)
rr = r
# update Q
self.Q[idx] = (1.0 - self.alpha) * self.Q[idx] + self.alpha * (rr + self.gamma * self.Q[jdx])
|
the-stack_106_20187
|
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for sonnet.python.modules.base."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import inspect
import pickle
# Dependency imports
from absl.testing import parameterized
import numpy as np
import six
from sonnet.python.modules import base
from sonnet.python.modules.base_errors import NotSupportedError
import tensorflow as tf
tfe = tf.contrib.eager
logging = tf.logging
class ModuleWithClassKeys(base.AbstractModule):
"""Dummy module that defines some keys as class attributes."""
POSSIBLE_INITIALIZER_KEYS = {"foo", "bar"}
class ModuleWithNoInitializerKeys(base.AbstractModule):
"""Dummy module without any intiailizer keys."""
pass
class ModuleWithCustomInitializerKeys(base.AbstractModule):
"""Dummy module that overrides get_possible_initializer_keys."""
@classmethod
def get_possible_initializer_keys(cls, custom_key):
return {"foo"} if custom_key else {"bar"}
class IdentityModule(base.AbstractModule):
"""Sonnet module that builds a single `tf.identity` op."""
def _build(self, inputs):
return tf.identity(inputs)
class NoInitIdentityModule(base.AbstractModule):
"""Sonnet module that inherits `base.AbstractModule.__init__`."""
def _build(self, inputs):
return tf.identity(inputs)
class NoSuperInitIdentityModule(base.AbstractModule):
"""Sonnet module that doesn't call `base.AbstractModule.__init__`."""
def __init__(self):
pass # Don't call superclass initializer.
def _build(self, inputs):
return tf.identity(inputs)
class SimpleModule(base.AbstractModule):
"""Simple module with variables created in constructor and build."""
def __init__(self, custom_getter=None, name="simple_module"):
super(SimpleModule, self).__init__(custom_getter=custom_getter,
name=name)
with self._enter_variable_scope():
self._b = tf.get_variable("b", dtype=tf.float32, shape=[10, 10])
def _build(self, inputs):
"""Connect a simple module to the graph."""
self._w = tf.get_variable("w", dtype=tf.float32, shape=[10, 10])
return self._w * inputs + self._b
class ComplexModule(base.AbstractModule):
"""Complex module consisting of two sub modules."""
def __init__(self, custom_getter=None, name="complex_module"):
super(ComplexModule, self).__init__(custom_getter=custom_getter,
name=name)
with self._enter_variable_scope():
self._a = SimpleModule(name="linear_1")
def _build(self, inputs):
self._b = SimpleModule(name="linear_2")
return self._b(self._a(inputs)) # pylint: disable=not-callable
class ModuleWithSubmodules(base.AbstractModule):
def __init__(self,
submodule_a,
submodule_b,
custom_getter=None,
name="module_with_submodules"):
super(ModuleWithSubmodules, self).__init__(
custom_getter=custom_getter, name=name)
self._submodule_a = submodule_a
self._submodule_b = submodule_b
def _build(self, inputs):
c = SimpleModule(name="simple_build")
d = ComplexModule(name="complex_build")
return d(self._submodule_a(inputs)) + self._submodule_b(c(inputs)) # pylint: disable=not-callable
# @tf.contrib.eager.run_all_tests_in_graph_and_eager_modes
class AbstractModuleTest(parameterized.TestCase, tf.test.TestCase):
def testInitializerKeys(self):
keys = ModuleWithClassKeys.get_possible_initializer_keys()
self.assertEqual(keys, {"foo", "bar"})
keys = ModuleWithNoInitializerKeys.get_possible_initializer_keys()
self.assertEqual(keys, set())
if six.PY2:
msg = "takes exactly 2 arguments"
else:
msg = "missing 1 required positional argument"
self.assertRaisesRegexp(
TypeError, msg,
ModuleWithCustomInitializerKeys.get_possible_initializer_keys)
keys = ModuleWithCustomInitializerKeys.get_possible_initializer_keys(True)
self.assertEqual(keys, {"foo"})
keys = ModuleWithCustomInitializerKeys.get_possible_initializer_keys(False)
self.assertEqual(keys, {"bar"})
def testMultipleGraphs(self):
id_mod = IdentityModule(name="identity")
# gpylint incorrectly thinks IdentityModule is not callable, so disable.
# pylint: disable=not-callable
with tf.Graph().as_default() as graph:
id_mod(tf.ones(dtype=tf.float32, shape=[42]))
self.assertEqual(id_mod._graph, graph)
with tf.Graph().as_default():
with self.assertRaisesRegexp(base.DifferentGraphError,
"Cannot connect module"):
id_mod(tf.ones(dtype=tf.float32, shape=[42]))
# pylint: enable=not-callable
def testNameScopeRecording(self):
if tf.executing_eagerly():
self.skipTest("Name scopes are not recorded in eager mode.")
id_mod = IdentityModule(name="foo")
# Connect inside different name scope contexts, check that each is recorded.
# pylint: disable=not-callable
id_mod(tf.ones(dtype=tf.float32, shape=[22]))
self.assertIn(id_mod.name_scopes, (("foo",), ("foo_1",)))
with tf.name_scope("blah"):
id_mod(tf.ones(dtype=tf.float32, shape=[23]))
self.assertIn(id_mod.name_scopes,
(("foo", "blah/foo"), ("foo_1", "blah/foo")))
with tf.name_scope("baz"):
id_mod(tf.ones(dtype=tf.float32, shape=[24]))
# pylint: enable=not-callable
self.assertIn(id_mod.name_scopes,
(("foo", "blah/foo", "baz/foo"),
("foo_1", "blah/foo", "baz/foo")))
def testNameScopeRecordingNotSupportedEager(self):
if not tf.executing_eagerly():
self.skipTest("Name scopes are recorded in graph mode.")
id_mod = IdentityModule(name="foo")
id_mod(tf.ones(dtype=tf.float32, shape=[22]))
with self.assertRaisesRegexp(base.NotSupportedError,
"not supported in eager"):
id_mod.name_scopes # pylint: disable=pointless-statement
def testSubgraphsRecording(self):
if tf.executing_eagerly():
self.skipTest("Subgraphs are not recorded in eager mode.")
id_mod = IdentityModule(name="foo")
with self.assertRaisesRegexp(base.NotConnectedError,
"not instantiated yet"):
id_mod.last_connected_subgraph()
# pylint: disable=not-callable
inputs = tf.ones(dtype=tf.float32, shape=[21])
outputs = id_mod(inputs)
with tf.name_scope("blah"):
blah_inputs = tf.ones(dtype=tf.float32, shape=[22])
blah_outputs = id_mod(blah_inputs)
with tf.name_scope("baz"):
baz_inputs = tf.ones(dtype=tf.float32, shape=[23])
baz_outputs = id_mod(baz_inputs)
# pylint: enable=not-callable
subgraphs = id_mod.connected_subgraphs
self.assertEqual(id_mod.last_connected_subgraph.name_scope, "baz/foo")
self.assertIs(id_mod.last_connected_subgraph, subgraphs[2])
self.assertIs(subgraphs[0].module, id_mod)
self.assertIn(subgraphs[0].name_scope, ("foo", "foo_1"))
self.assertEqual(subgraphs[1].name_scope, "blah/foo")
self.assertEqual(subgraphs[2].name_scope, "baz/foo")
self.assertIs(subgraphs[0].inputs["inputs"], inputs)
self.assertIs(subgraphs[1].inputs["inputs"], blah_inputs)
self.assertIs(subgraphs[2].inputs["inputs"], baz_inputs)
self.assertIs(subgraphs[0].outputs, outputs)
self.assertIs(subgraphs[1].outputs, blah_outputs)
self.assertIs(subgraphs[2].outputs, baz_outputs)
def testSubgraphsNotRecordedEager(self):
if not tf.executing_eagerly():
self.skipTest("Subgraphs are recorded in graph mode")
id_mod = IdentityModule(name="foo")
with self.assertRaisesRegexp(base.NotSupportedError,
"not tracked in eager mode"):
id_mod.last_connected_subgraph()
# pylint: disable=not-callable
inputs = tf.ones(dtype=tf.float32, shape=[21])
id_mod(inputs)
with tf.name_scope("blah"):
blah_inputs = tf.ones(dtype=tf.float32, shape=[22])
id_mod(blah_inputs)
with tf.name_scope("baz"):
baz_inputs = tf.ones(dtype=tf.float32, shape=[23])
id_mod(baz_inputs)
# pylint: enable=not-callable
with self.assertRaisesRegexp(base.NotSupportedError,
"not tracked in eager mode"):
id_mod.connected_subgraphs # pylint: disable=pointless-statement
def testInitNoNamedArgs(self):
"""Tests if calling __init__ without named args raises a ValueError."""
with self.assertRaises(ValueError):
NoInitIdentityModule("foobar")
def testInitInvalidTypeArgs(self):
"""Tests if calling __init__ without a string name raises a TypeError."""
with self.assertRaises(TypeError):
NoInitIdentityModule(name=123)
def testInitNoArgs(self):
"""Tests if calling __init__ with no args uses correct defaults."""
module = NoInitIdentityModule()
self.assertEqual(module.module_name, "no_init_identity_module")
def testInitNoSuper(self):
"""Tests if a __call__ with no __init__ raises an error."""
module = NoSuperInitIdentityModule()
with self.assertRaises(base.NotInitializedError):
module(tf.constant([1])) # pylint: disable=not-callable
def testPicklingNotSupported(self):
module = IdentityModule()
with self.assertRaisesRegexp(base.NotSupportedError,
"cannot be serialized"):
# Writing the object to a string will fail.
pickle.dumps(module)
def testCustomGetter(self):
connection_count = {"x": 0}
def custom_getter(getter, name, *args, **kwargs):
connection_count["x"] += 1
return getter(name, *args, **kwargs)
inputs = tf.ones(dtype=tf.float32, shape=[10, 10])
with tf.variable_scope("scope"):
module = SimpleModule(name="mod1")
module(inputs) # pylint: disable=not-callable
self.assertEqual(0, connection_count["x"])
module = SimpleModule(custom_getter=custom_getter, name="mod2")
module(inputs) # pylint: disable=not-callable
self.assertEqual(2, connection_count["x"]) # w & b
module = SimpleModule(custom_getter={"w": custom_getter}, name="mod3")
module(inputs) # pylint: disable=not-callable
self.assertEqual(3, connection_count["x"]) # w
module = SimpleModule(custom_getter={"w.*": custom_getter}, name="mod3")
module(inputs) # pylint: disable=not-callable
self.assertEqual(4, connection_count["x"]) # w
module = SimpleModule(custom_getter={".*": custom_getter}, name="mod4")
module(inputs) # pylint: disable=not-callable
self.assertEqual(6, connection_count["x"]) # w & b
err = r"More than one custom_getter matched scope/mod5/w \(w\):.*"
with self.assertRaisesRegexp(KeyError, err):
module = SimpleModule(
custom_getter={".*": custom_getter, "w.*": custom_getter},
name="mod5")
module(inputs) # pylint: disable=not-callable
err = "Given custom_getter is not callable."
with self.assertRaisesRegexp(TypeError, err):
module = SimpleModule(custom_getter=0, name="mod6")
with self.assertRaisesRegexp(TypeError, err):
module = SimpleModule(custom_getter={"w": 0}, name="mod7")
def testCustomGetterNested(self):
def custom_getter(getter, name, *args, **kwargs):
kwargs["trainable"] = False
return getter(name, *args, **kwargs)
inputs = tf.ones(dtype=tf.float32, shape=[10, 10])
with tf.variable_scope("scope"):
module = ComplexModule(name="mod1")
module(inputs) # pylint: disable=not-callable
self.assertLen(tf.trainable_variables(), 4)
module = ComplexModule(custom_getter=custom_getter, name="mod2")
module(inputs) # pylint: disable=not-callable
self.assertLen(tf.trainable_variables(), 4) # All variables.
module = ComplexModule(custom_getter={".*/w": custom_getter},
name="mod3")
module(inputs) # pylint: disable=not-callable
trainable_names = [v.name for v in tf.trainable_variables()]
self.assertLen(trainable_names, 6) # linear_1/w and linear_2/w.
self.assertIn("scope/mod3/linear_1/b:0", trainable_names)
self.assertIn("scope/mod3/linear_2/b:0", trainable_names)
module = ComplexModule(custom_getter={".*/b": custom_getter}, name="mod4")
module(inputs) # pylint: disable=not-callable
trainable_names = [v.name for v in tf.trainable_variables()]
self.assertLen(trainable_names, 8) # linear_1/b and linear_2/b.
self.assertIn("scope/mod4/linear_1/w:0", trainable_names)
self.assertIn("scope/mod4/linear_2/w:0", trainable_names)
module = ComplexModule(custom_getter={".*": custom_getter}, name="mod5")
module(inputs) # pylint: disable=not-callable
self.assertLen(tf.trainable_variables(), 8) # All variables.
module = ComplexModule(custom_getter={"w": custom_getter}, name="mod6")
module(inputs) # pylint: disable=not-callable
self.assertLen(tf.trainable_variables(), 12) # No variables.
@parameterized.parameters(
[lambda m: m.get_all_variables(),
lambda m: m.variables,
lambda m: m.trainable_variables]
)
def testGetAllTrainableVariables(self, all_trainable_variables):
inputs = tf.ones(dtype=tf.float32, shape=[10, 10])
submodule_a = SimpleModule(name="simple_submodule")
submodule_b = ComplexModule(name="complex_submodule")
module = ModuleWithSubmodules(
submodule_a=submodule_a, submodule_b=submodule_b)
with self.assertRaisesRegexp(base.NotConnectedError,
"not instantiated yet"):
all_trainable_variables(module)
module(inputs) # pylint: disable=not-callable
# Check correct for SimpleModule.
submodule_a_variables = submodule_a.get_variables()
submodule_a_variable_names = sorted(
[str(v.name) for v in submodule_a_variables])
submodule_a_all_variables = all_trainable_variables(submodule_a)
submodule_a_all_variable_names = sorted(
[str(v.name) for v in submodule_a_all_variables])
self.assertEqual(submodule_a_variable_names, submodule_a_all_variable_names)
self.assertEqual([
"simple_submodule/b:0",
"simple_submodule/w:0",
], submodule_a_variable_names)
# Check correct for ComplexModule
submodule_b_variables = all_trainable_variables(submodule_b)
submodule_b_variable_names = sorted(
[str(v.name) for v in submodule_b_variables])
self.assertEqual([
"complex_submodule/linear_1/b:0",
"complex_submodule/linear_1/w:0",
"complex_submodule/linear_2/b:0",
"complex_submodule/linear_2/w:0",
], submodule_b_variable_names)
all_variables = all_trainable_variables(module)
all_variable_names = sorted([str(v.name) for v in all_variables])
self.assertEqual([
"complex_submodule/linear_1/b:0",
"complex_submodule/linear_1/w:0",
"complex_submodule/linear_2/b:0",
"complex_submodule/linear_2/w:0",
"module_with_submodules/complex_build/linear_1/b:0",
"module_with_submodules/complex_build/linear_1/w:0",
"module_with_submodules/complex_build/linear_2/b:0",
"module_with_submodules/complex_build/linear_2/w:0",
"module_with_submodules/simple_build/b:0",
"module_with_submodules/simple_build/w:0",
"simple_submodule/b:0",
"simple_submodule/w:0",
], all_variable_names)
self.assertEmpty(
module.get_all_variables(collection=tf.GraphKeys.LOCAL_VARIABLES))
# Create another ModuleWithSubmodules with the same submodules
module = ModuleWithSubmodules(
submodule_a=submodule_a, submodule_b=submodule_b)
module(inputs) # pylint: disable=not-callable
all_variables = all_trainable_variables(module)
all_variable_names = sorted([str(v.name) for v in all_variables])
self.assertEqual([
"complex_submodule/linear_1/b:0",
"complex_submodule/linear_1/w:0",
"complex_submodule/linear_2/b:0",
"complex_submodule/linear_2/w:0",
"module_with_submodules_1/complex_build/linear_1/b:0",
"module_with_submodules_1/complex_build/linear_1/w:0",
"module_with_submodules_1/complex_build/linear_2/b:0",
"module_with_submodules_1/complex_build/linear_2/w:0",
"module_with_submodules_1/simple_build/b:0",
"module_with_submodules_1/simple_build/w:0",
"simple_submodule/b:0",
"simple_submodule/w:0",
], all_variable_names)
@parameterized.parameters(
[lambda m: m.get_all_variables(tf.GraphKeys.LOCAL_VARIABLES),
lambda m: m.non_trainable_variables])
def testGetAllLocalVariables(self, get_non_trainable_variables):
def local_custom_getter(getter, *args, **kwargs):
kwargs["trainable"] = False
if "collections" in kwargs and kwargs["collections"] is not None:
kwargs["collections"] += [tf.GraphKeys.LOCAL_VARIABLES]
else:
kwargs["collections"] = [tf.GraphKeys.LOCAL_VARIABLES]
return getter(*args, **kwargs)
inputs = tf.ones(dtype=tf.float32, shape=[10, 10])
# Create a new ModuleWithSubmodules that uses all local variables
with tf.variable_scope("", custom_getter=local_custom_getter):
submodule_a = SimpleModule(name="simple_submodule")
submodule_b = ComplexModule(name="complex_submodule")
local_module = ModuleWithSubmodules(
submodule_a=submodule_a, submodule_b=submodule_b)
local_module(inputs) # pylint: disable=not-callable
self.assertEmpty(local_module.get_all_variables())
self.assertEmpty(tf.all_variables())
self.assertLen(tf.local_variables(), 12)
all_variables = get_non_trainable_variables(local_module)
all_variable_names = sorted([str(v.name) for v in all_variables])
self.assertEqual([
"complex_submodule/linear_1/b:0",
"complex_submodule/linear_1/w:0",
"complex_submodule/linear_2/b:0",
"complex_submodule/linear_2/w:0",
"module_with_submodules/complex_build/linear_1/b:0",
"module_with_submodules/complex_build/linear_1/w:0",
"module_with_submodules/complex_build/linear_2/b:0",
"module_with_submodules/complex_build/linear_2/w:0",
"module_with_submodules/simple_build/b:0",
"module_with_submodules/simple_build/w:0",
"simple_submodule/b:0",
"simple_submodule/w:0",
], all_variable_names)
def testGetAllVariablesWithConditionalConstruction(self):
inputs = tf.ones(dtype=tf.float32, shape=[10, 10])
cond = tf.constant(0.)
module_a = SimpleModule(name="module_a")
module_b = SimpleModule(name="module_b")
_ = tf.cond(cond > 0, lambda: module_a(inputs), lambda: module_b(inputs)) # pylint: disable=not-callable
if tf.executing_eagerly():
# In eager mode only the true branch is taken.
msg = "module_a not instantiated yet"
with self.assertRaisesRegexp(base.NotConnectedError, msg):
module_a.get_all_variables()
else:
# check module_a
all_variables = module_a.get_all_variables()
all_variable_names = sorted([str(v.name) for v in all_variables])
self.assertEqual(["module_a/b:0", "module_a/w:0"], all_variable_names)
# check module_b
all_variables = module_b.get_all_variables()
all_variable_names = sorted([str(v.name) for v in all_variables])
self.assertEqual(["module_b/b:0", "module_b/w:0"], all_variable_names)
@parameterized.parameters(None, "", "complex_module")
def testVariablesFromNestedModule(self, name):
outer = ComplexModule(name=name)
outer(tf.zeros([10, 10]))
inner1 = outer._b
outer(tf.zeros([10, 10]))
inner2 = outer._b
# Calling the outer module triggers the inner module to re-constructed. The
# new inner module should have literally the same variables as the old one.
self.assertIsNot(inner1, inner2)
self.assertNotEmpty(inner1.variables)
self.assertLen(inner2.variables, len(inner1.variables))
for v1, v2 in zip(inner1.variables, inner2.variables):
self.assertIs(v1, v2)
def testCallSignatureAndDocstring(self):
my_module = SimpleModule()
self.assertEqual(
inspect.getargspec(my_module.__call__),
inspect.getargspec(my_module._build))
self.assertEqual(my_module.__call__.__doc__, my_module._build.__doc__)
def _make_model_with_params(inputs, output_size):
weight_shape = [inputs.get_shape().as_list()[-1], output_size]
weight = tf.get_variable("w", shape=weight_shape, dtype=inputs.dtype)
return tf.matmul(inputs, weight)
# @tf.contrib.eager.run_all_tests_in_graph_and_eager_modes
class ModuleTest(tf.test.TestCase):
def testFunctionType(self):
with self.assertRaises(TypeError) as cm:
base.Module(build="not_a_function")
self.assertEqual(str(cm.exception), "Input 'build' must be callable.")
def testSharing(self):
batch_size = 3
in_size = 4
input_data = np.random.rand(batch_size, in_size)
inputs1 = tf.constant(input_data)
inputs2 = tf.constant(input_data)
build = functools.partial(_make_model_with_params, output_size=10)
model = base.Module(build)
self.assertEqual(model.scope_name, "make_model_with_params")
outputs1 = model(inputs1)
outputs2 = model(inputs2)
self.evaluate(tf.global_variables_initializer())
outputs1, outputs2 = self.evaluate([outputs1, outputs2])
self.assertAllClose(outputs1, outputs2)
def testCustomGetter(self):
def simple_module_build(inputs):
w = tf.get_variable("w", dtype=tf.float32, shape=[10, 10])
b = tf.get_variable("b", dtype=tf.float32, shape=[10, 10])
return w * inputs + b
connection_count = {"x": 0}
def custom_getter(getter, name, *args, **kwargs):
connection_count["x"] += 1
return getter(name, *args, **kwargs)
create_module = functools.partial(base.Module, build=simple_module_build)
inputs = tf.ones(dtype=tf.float32, shape=[10, 10])
with tf.variable_scope("scope"):
module = create_module(name="mod1")
module(inputs) # pylint: disable=not-callable
self.assertEqual(0, connection_count["x"])
module = create_module(custom_getter=custom_getter, name="mod2")
module(inputs) # pylint: disable=not-callable
self.assertEqual(2, connection_count["x"]) # w & b
module = create_module(custom_getter={"w": custom_getter}, name="mod3")
module(inputs) # pylint: disable=not-callable
self.assertEqual(3, connection_count["x"]) # w
module = create_module(custom_getter={"w.*": custom_getter}, name="mod3")
module(inputs) # pylint: disable=not-callable
self.assertEqual(4, connection_count["x"]) # w
module = create_module(custom_getter={".*": custom_getter}, name="mod4")
module(inputs) # pylint: disable=not-callable
self.assertEqual(6, connection_count["x"]) # w & b
err = r"More than one custom_getter matched scope/mod5/w \(w\):.*"
with self.assertRaisesRegexp(KeyError, err):
module = create_module(
custom_getter={".*": custom_getter, "w.*": custom_getter},
name="mod5")
module(inputs) # pylint: disable=not-callable
err = "Given custom_getter is not callable."
with self.assertRaisesRegexp(TypeError, err):
module = create_module(custom_getter=0, name="mod6")
with self.assertRaisesRegexp(TypeError, err):
module = create_module(custom_getter={"w": 0}, name="mod7")
def testGetVariablesDifferentGraphScope(self):
with tf.Graph().as_default():
inputs = tf.constant(np.random.rand(10, 10), dtype=tf.float32)
simple_module = SimpleModule()
simple_module(inputs) # pylint: disable=not-callable
# Should have 2 variables whether queried in or out of the Graph scope.
self.assertEqual(len(simple_module.get_variables()), 2)
self.assertEqual(len(simple_module.get_variables()), 2)
def testGraphProperty(self):
with tf.Graph().as_default() as graph_1:
id_a = IdentityModule()
id_a(tf.constant(np.zeros(10))) # pylint: disable=not-callable
id_b = IdentityModule()
id_b(tf.constant(np.ones(5))) # pylint: disable=not-callable
with tf.Graph().as_default() as graph_2:
id_c = IdentityModule()
id_c(tf.constant(np.eye(3))) # pylint: disable=not-callable
self.assertEqual(id_a.graph, id_b.graph)
self.assertEqual(id_a.graph, graph_1)
self.assertNotEqual(id_a.graph, id_c.graph)
self.assertEqual(id_c.graph, graph_2)
class ConnectionObserverTest(tf.test.TestCase):
def _connection_observer(self, subgraph):
self._connected_subgraphs.append(subgraph)
def setUp(self):
self._inputs = tf.zeros(shape=(10, 10), dtype=tf.float32)
self._connected_subgraphs = []
def testObservesWrappedFunction(self):
activation_module = base.Module(tf.nn.relu)
with base.observe_connections(self._connection_observer):
outputs = activation_module(self._inputs)
self.assertEqual(1, len(self._connected_subgraphs))
self.assertIs(activation_module, self._connected_subgraphs[0].module)
self.assertIs(self._inputs, self._connected_subgraphs[0].inputs["args"][0])
self.assertIs(self._connected_subgraphs[0].outputs, outputs)
def testObservesSimpleModule(self):
simple_module = SimpleModule()
with base.observe_connections(self._connection_observer):
outputs = simple_module(self._inputs)
self.assertEqual(1, len(self._connected_subgraphs))
self.assertIs(simple_module, self._connected_subgraphs[0].module)
self.assertIs(self._inputs, self._connected_subgraphs[0].inputs["inputs"])
self.assertIs(self._connected_subgraphs[0].outputs, outputs)
def testObservesComplexModule(self):
complex_module = ComplexModule()
with base.observe_connections(self._connection_observer):
outputs = complex_module(self._inputs)
self.assertEqual(3, len(self._connected_subgraphs))
self.assertIsInstance(self._connected_subgraphs[0].module, SimpleModule)
self.assertIs(self._inputs, self._connected_subgraphs[0].inputs["inputs"])
self.assertIsInstance(self._connected_subgraphs[1].module, SimpleModule)
self.assertIs(self._connected_subgraphs[0].outputs,
self._connected_subgraphs[1].inputs["inputs"])
self.assertIs(self._connected_subgraphs[1].outputs, outputs)
self.assertIs(complex_module, self._connected_subgraphs[2].module)
self.assertIs(self._connected_subgraphs[2].outputs, outputs)
class MatMulModule(base.AbstractModule):
call_count = 0
def _build(self, x):
self.call_count += 1
self.w = tf.get_variable("w", [x.shape[1], 32])
return x * self.w
# @tf.contrib.eager.run_all_tests_in_graph_and_eager_modes
class DefunTest(tf.test.TestCase):
def testDefunWrappedProperty(self):
module = MatMulModule()
self.assertFalse(module.defun_wrapped)
for _ in range(2):
module.defun()
self.assertTrue(module.defun_wrapped)
def testCallWithDefun(self):
module = MatMulModule()
module.defun()
batch_size = 10
output = module(tf.zeros([batch_size, 1]))
self.assertListEqual(output.shape.as_list(), [batch_size, 32])
def testCallWithDefunTracingTwice(self):
module = MatMulModule()
module.defun()
batch_size = 10
for _ in range(2):
output = module(tf.zeros([batch_size, 1]))
self.assertListEqual(output.shape.as_list(), [batch_size, 32])
self.assertEqual(module.call_count, 1)
# Calling with a different batch_size causes `defun` to re-trace our module.
batch_size *= 2
for _ in range(2):
output = module(tf.zeros([batch_size, 1]))
self.assertListEqual(output.shape.as_list(), [batch_size, 32])
self.assertEqual(module.call_count, 2)
def testGetVariablesDisabledWhenUsingDefun(self):
module = MatMulModule()
module.defun()
module(tf.zeros([1, 1]))
if tf.executing_eagerly():
msg = ".*get_variables.*not supported .* wrapped with defun"
with self.assertRaisesRegexp(NotSupportedError, msg):
module.get_variables()
else:
self.assertEqual(module.get_variables(), (module.w,))
if __name__ == "__main__":
tf.test.main()
|
the-stack_106_20188
|
from __future__ import unicode_literals
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models.signals import pre_save
from django.utils import timezone
from django.utils.safestring import mark_safe
from django.utils.text import slugify
from markdown_deux import markdown
from comments.models import Comment
from .utils import get_read_time
# Create your models here.
# MVC MODEL VIEW CONTROLLER
#Post.objects.all()
#Post.objects.create(user=user, title="Some time")
class PostManager(models.Manager):
def active(self, *args, **kwargs):
# Post.objects.all() = super(PostManager, self).all()
return super(PostManager, self).filter(draft=False).filter(publish__lte=timezone.now())
def upload_location(instance, filename):
#filebase, extension = filename.split(".")
#return "%s/%s.%s" %(instance.id, instance.id, extension)
PostModel = instance.__class__
new_id = PostModel.objects.order_by("id").last().id + 1
"""
instance.__class__ gets the model Post. We must use this method because the model is defined below.
Then create a queryset ordered by the "id"s of each object,
Then we get the last object in the queryset with `.last()`
Which will give us the most recently created Model instance
We add 1 to it, so we get what should be the same id as the the post we are creating.
"""
return "%s/%s" %(new_id, filename)
class Post(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, default=1)
title = models.CharField(max_length=120)
slug = models.SlugField(unique=True)
image = models.ImageField(upload_to=upload_location,
null=True,
blank=True,
width_field="width_field",
height_field="height_field")
height_field = models.IntegerField(default=0)
width_field = models.IntegerField(default=0)
content = models.TextField()
draft = models.BooleanField(default=False)
publish = models.DateField(auto_now=False, auto_now_add=False)
read_time = models.IntegerField(default=0) # models.TimeField(null=True, blank=True) #assume minutes
updated = models.DateTimeField(auto_now=True, auto_now_add=False)
timestamp = models.DateTimeField(auto_now=False, auto_now_add=True)
objects = PostManager()
def __unicode__(self):
return self.title
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("posts:detail", kwargs={"slug": self.slug})
class Meta:
ordering = ["-timestamp", "-updated"]
def get_markdown(self):
content = self.content
markdown_text = markdown(content)
return mark_safe(markdown_text)
@property
def comments(self):
instance = self
qs = Comment.objects.filter_by_instance(instance)
return qs
@property
def get_content_type(self):
instance = self
content_type = ContentType.objects.get_for_model(instance.__class__)
return content_type
def create_slug(instance, new_slug=None):
slug = slugify(instance.title)
if new_slug is not None:
slug = new_slug
qs = Post.objects.filter(slug=slug).order_by("-id")
exists = qs.exists()
if exists:
new_slug = "%s-%s" %(slug, qs.first().id)
return create_slug(instance, new_slug=new_slug)
return slug
def pre_save_post_receiver(sender, instance, *args, **kwargs):
if not instance.slug:
instance.slug = create_slug(instance)
if instance.content:
html_string = instance.get_markdown()
read_time_var = get_read_time(html_string)
instance.read_time = read_time_var
pre_save.connect(pre_save_post_receiver, sender=Post)
|
the-stack_106_20192
|
"""
Discogs API Query Tool: Connect to Discogs API and pull merchant listing data
"""
import sys
import json
import csv
import argparse
import logging
import requests
class MissingAPIKeyOrSecret(Exception):
"""Error logging for no input value"""
def __init__(self, error_field):
super(MissingAPIKeyOrSecret, self).__init__()
self.msg = f'Missing API {error_field}. Aborting script.'
class InvalidAPIKeyOrSecret(Exception):
"""Error logging for no input value"""
def __init__(self):
super(InvalidAPIKeyOrSecret, self).__init__()
self.msg = f'Invalid API key/secret. Aborting script.'
class SellerDoesNotExistError(Exception):
"""Error logging when Discogs seller does not exist"""
def __init__(self, seller):
super(SellerDoesNotExistError, self).__init__()
self.seller = seller
self.msg = f'Could not find seller "{seller}". Aborting script.'
class APICaller:
"""Class to make API calls"""
def __init__(self, seller, key, secret):
"""Initialize API call config
Args:
seller (str): Username of the seller
key (str): API key to access API
secret (str): API secret to access API
"""
self.seller = seller
self.key = key
self.secret = secret
self.api_query = None
self.response_status_code = None
self.response = None
self.top_listing = None
def create_api_query(self):
"""Create API request URL"""
status, sort, sort_order = 'for sale', 'price', 'desc'
api_query = f'https://api.discogs.com/users/{self.seller}' \
+ f'/inventory?status={status}&sort={sort}' \
+ f'&sort_order={sort_order}&page=1&per_page=100' \
+ f'&key={self.key}&secret={self.secret}'
self.api_query = api_query
def validate_request(self):
"""Check if provided API key and secret are valid"""
if self.key is None:
raise MissingAPIKeyOrSecret('key')
if self.secret is None:
raise MissingAPIKeyOrSecret('secret')
if self.response_status_code == 401:
raise InvalidAPIKeyOrSecret()
if self.response_status_code == 404:
raise SellerDoesNotExistError(self.seller)
def get_seller_listings(self):
"""Makes an API call to Discogs, validates API key/secret and
seller, and then sets the seller's listing data and top listing record
as class attributes.
"""
try:
req = requests.get(self.api_query)
self.response_status_code = req.status_code
self.response = json.loads(req.text)
self.validate_request()
except requests.exceptions.RequestException as error:
logging.error(error)
sys.exit()
except MissingAPIKeyOrSecret as err:
logging.error(err.msg, exc_info=True)
sys.exit()
except InvalidAPIKeyOrSecret as err:
logging.error(err.msg, exc_info=True)
sys.exit()
except SellerDoesNotExistError as err:
logging.error(err.msg, exc_info=True)
sys.exit()
def get_top_listing(self):
"""Get most expensive listing from API response"""
self.top_listing = self.response['listings'][0]
def dump_top_listing(self):
"""Dump most expensive record in seller's inventory into JSON file
Args:
seller: Username of the seller
seller_data: Dictionary of seller's listings
"""
output_path = f'./output/{self.seller}_top_listing.json'
self.get_top_listing()
with open(output_path, 'w') as output:
json.dump(self.top_listing,
output,
indent=4,
ensure_ascii=False)
print(f'Top listing exported to {output_path}')
def dump_listings_to_csv(self):
"""Dump seller's inventory into CSV file
Args:
seller: Username of the seller
seller_data: Dictionary of seller's listings
"""
output_path = f'./output/{self.seller}_listings.csv'
with open(output_path, 'w') as output:
writer = csv.writer(output)
self.write_header(writer)
for item in self.response['listings']:
artist = item['release']['artist']
title = item['release']['title']
price = item['price']['value']
currency = item['price']['currency']
url = item['uri']
writer.writerow([artist, title, price, currency, url])
print(f'Listings exported to {output_path}')
@staticmethod
def write_header(csv_writer):
"""Write the header for the CSV output for the seller's listings
Args:
csv_writer: csv.writer() object
"""
csv_headers = ['listings', 'title', 'price', 'currency', 'url']
csv_writer.writerow(csv_headers)
def parse_args():
"""Parse arguments from the CLI"""
parser = argparse.ArgumentParser(description='Look up Discogs seller\'s \
most expensive records')
parser.add_argument('--seller',
help='Discogs seller username to query',
default='black_snake_moan')
parser.add_argument('--key',
help='Discogs API key (required)')
parser.add_argument('--secret',
help='Discogs API secret (required)')
return parser.parse_args()
def main():
"""Run the module"""
args = parse_args()
api = APICaller(args.seller, args.key, args.secret)
api.create_api_query()
api.get_seller_listings()
api.dump_top_listing()
api.dump_listings_to_csv()
if __name__ == '__main__':
main()
|
the-stack_106_20193
|
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005-2016 (ita)
"""
Utilities and platform-specific fixes
The portability fixes try to provide a consistent behavior of the Waf API
through Python versions 2.5 to 3.X and across different platforms (win32, linux, etc)
"""
import os, sys, errno, traceback, inspect, re, datetime, platform, base64
try:
import cPickle
except ImportError:
import pickle as cPickle
# leave this
if os.name == 'posix' and sys.version_info[0] < 3:
try:
import subprocess32 as subprocess
except ImportError:
import subprocess
else:
import subprocess
from collections import deque, defaultdict
try:
import _winreg as winreg
except ImportError:
try:
import winreg
except ImportError:
winreg = None
from waflib import Errors
try:
from hashlib import md5
except ImportError:
try:
from md5 import md5
except ImportError:
# never fail to enable fixes from another module
pass
try:
import threading
except ImportError:
if not 'JOBS' in os.environ:
# no threading :-(
os.environ['JOBS'] = '1'
class threading(object):
"""
A fake threading class for platforms lacking the threading module.
Use ``waf -j1`` on those platforms
"""
pass
class Lock(object):
"""Fake Lock class"""
def acquire(self):
pass
def release(self):
pass
threading.Lock = threading.Thread = Lock
SIG_NIL = 'SIG_NIL_SIG_NIL_'.encode()
"""Arbitrary null value for hashes. Modify this value according to the hash function in use"""
O644 = 420
"""Constant representing the permissions for regular files (0644 raises a syntax error on python 3)"""
O755 = 493
"""Constant representing the permissions for executable files (0755 raises a syntax error on python 3)"""
rot_chr = ['\\', '|', '/', '-']
"List of characters to use when displaying the throbber (progress bar)"
rot_idx = 0
"Index of the current throbber character (progress bar)"
class ordered_iter_dict(dict):
"""Ordered dictionary that provides iteration from the most recently inserted keys first"""
def __init__(self, *k, **kw):
self.lst = deque()
dict.__init__(self, *k, **kw)
def clear(self):
dict.clear(self)
self.lst = deque()
def __setitem__(self, key, value):
if key in dict.keys(self):
self.lst.remove(key)
dict.__setitem__(self, key, value)
self.lst.append(key)
def __delitem__(self, key):
dict.__delitem__(self, key)
try:
self.lst.remove(key)
except ValueError:
pass
def __iter__(self):
return reversed(self.lst)
def keys(self):
return reversed(self.lst)
class lru_node(object):
"""
Used by :py:class:`waflib.Utils.lru_cache`
"""
__slots__ = ('next', 'prev', 'key', 'val')
def __init__(self):
self.next = self
self.prev = self
self.key = None
self.val = None
class lru_cache(object):
"""A simple least-recently used cache that suits our purposes"""
__slots__ = ('maxlen', 'table', 'head')
def __init__(self, maxlen=100):
self.maxlen = maxlen
"""
Maximum amount of elements in the cache
"""
self.table = {}
"""
Mapping key-value
"""
self.head = lru_node()
for x in range(maxlen - 1):
node = lru_node()
node.prev = self.head.prev
node.next = self.head
node.prev.next = node
node.next.prev = node
def __getitem__(self, key):
node = self.table[key]
# assert(key==node.key)
if node is self.head:
return node.val
# detach the node found
node.prev.next = node.next
node.next.prev = node.prev
# replace the head
node.next = self.head.next
node.prev = self.head
node.next.prev = node
node.prev.next = node
self.head = node
return node.val
def __setitem__(self, key, val):
# go past the head
node = self.head = self.head.next
try:
# remove existing keys if present
del self.table[node.key]
except KeyError:
pass
node.key = key
node.val = val
self.table[key] = node
is_win32 = os.sep == '\\' or sys.platform == 'win32' # msys2
"""
Whether this system is a Windows series
"""
def readf(fname, m='r', encoding='ISO8859-1'):
"""
Reads an entire file into a string. See also :py:meth:`waflib.Node.Node.readf`::
def build(ctx):
from waflib import Utils
txt = Utils.readf(self.path.find_node('wscript').abspath())
txt = ctx.path.find_node('wscript').read()
:type fname: string
:param fname: Path to file
:type m: string
:param m: Open mode
:type encoding: string
:param encoding: encoding value, only used for python 3
:rtype: string
:return: Content of the file
"""
if sys.hexversion > 0x3000000 and not 'b' in m:
m += 'b'
f = open(fname, m)
try:
txt = f.read()
finally:
f.close()
if encoding:
txt = txt.decode(encoding)
else:
txt = txt.decode()
else:
f = open(fname, m)
try:
txt = f.read()
finally:
f.close()
return txt
def writef(fname, data, m='w', encoding='ISO8859-1'):
"""
Writes an entire file from a string.
See also :py:meth:`waflib.Node.Node.writef`::
def build(ctx):
from waflib import Utils
txt = Utils.writef(self.path.make_node('i_like_kittens').abspath(), 'some data')
self.path.make_node('i_like_kittens').write('some data')
:type fname: string
:param fname: Path to file
:type data: string
:param data: The contents to write to the file
:type m: string
:param m: Open mode
:type encoding: string
:param encoding: encoding value, only used for python 3
"""
if sys.hexversion > 0x3000000 and not 'b' in m:
data = data.encode(encoding)
m += 'b'
f = open(fname, m)
try:
f.write(data)
finally:
f.close()
def h_file(fname):
"""
Computes a hash value for a file by using md5. Use the md5_tstamp
extension to get faster build hashes if necessary.
:type fname: string
:param fname: path to the file to hash
:return: hash of the file contents
:rtype: string or bytes
"""
f = open(fname, 'rb')
m = md5()
try:
while fname:
fname = f.read(200000)
m.update(fname)
finally:
f.close()
return m.digest()
def readf_win32(f, m='r', encoding='ISO8859-1'):
flags = os.O_NOINHERIT | os.O_RDONLY
if 'b' in m:
flags |= os.O_BINARY
if '+' in m:
flags |= os.O_RDWR
try:
fd = os.open(f, flags)
except OSError:
raise IOError('Cannot read from %r' % f)
if sys.hexversion > 0x3000000 and not 'b' in m:
m += 'b'
f = os.fdopen(fd, m)
try:
txt = f.read()
finally:
f.close()
if encoding:
txt = txt.decode(encoding)
else:
txt = txt.decode()
else:
f = os.fdopen(fd, m)
try:
txt = f.read()
finally:
f.close()
return txt
def writef_win32(f, data, m='w', encoding='ISO8859-1'):
if sys.hexversion > 0x3000000 and not 'b' in m:
data = data.encode(encoding)
m += 'b'
flags = os.O_CREAT | os.O_TRUNC | os.O_WRONLY | os.O_NOINHERIT
if 'b' in m:
flags |= os.O_BINARY
if '+' in m:
flags |= os.O_RDWR
try:
fd = os.open(f, flags)
except OSError:
raise OSError('Cannot write to %r' % f)
f = os.fdopen(fd, m)
try:
f.write(data)
finally:
f.close()
def h_file_win32(fname):
try:
fd = os.open(fname, os.O_BINARY | os.O_RDONLY | os.O_NOINHERIT)
except OSError:
raise OSError('Cannot read from %r' % fname)
f = os.fdopen(fd, 'rb')
m = md5()
try:
while fname:
fname = f.read(200000)
m.update(fname)
finally:
f.close()
return m.digest()
# always save these
readf_unix = readf
writef_unix = writef
h_file_unix = h_file
if hasattr(os, 'O_NOINHERIT') and sys.hexversion < 0x3040000:
# replace the default functions
readf = readf_win32
writef = writef_win32
h_file = h_file_win32
try:
x = ''.encode('hex')
except LookupError:
import binascii
def to_hex(s):
ret = binascii.hexlify(s)
if not isinstance(ret, str):
ret = ret.decode('utf-8')
return ret
else:
def to_hex(s):
return s.encode('hex')
to_hex.__doc__ = """
Return the hexadecimal representation of a string
:param s: string to convert
:type s: string
"""
def listdir_win32(s):
"""
Lists the contents of a folder in a portable manner.
On Win32, returns the list of drive letters: ['C:', 'X:', 'Z:'] when an empty string is given.
:type s: string
:param s: a string, which can be empty on Windows
"""
if not s:
try:
import ctypes
except ImportError:
# there is nothing much we can do
return [x + ':\\' for x in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ']
else:
dlen = 4 # length of "?:\\x00"
maxdrives = 26
buf = ctypes.create_string_buffer(maxdrives * dlen)
ndrives = ctypes.windll.kernel32.GetLogicalDriveStringsA(maxdrives*dlen, ctypes.byref(buf))
return [ str(buf.raw[4*i:4*i+2].decode('ascii')) for i in range(int(ndrives/dlen)) ]
if len(s) == 2 and s[1] == ":":
s += os.sep
if not os.path.isdir(s):
e = OSError('%s is not a directory' % s)
e.errno = errno.ENOENT
raise e
return os.listdir(s)
listdir = os.listdir
if is_win32:
listdir = listdir_win32
def num2ver(ver):
"""
Converts a string, tuple or version number into an integer. The number is supposed to have at most 4 digits::
from waflib.Utils import num2ver
num2ver('1.3.2') == num2ver((1,3,2)) == num2ver((1,3,2,0))
:type ver: string or tuple of numbers
:param ver: a version number
"""
if isinstance(ver, str):
ver = tuple(ver.split('.'))
if isinstance(ver, tuple):
ret = 0
for i in range(4):
if i < len(ver):
ret += 256**(3 - i) * int(ver[i])
return ret
return ver
def ex_stack():
"""
Extracts the stack to display exceptions. Deprecated: use traceback.format_exc()
:return: a string represening the last exception
"""
# TODO remove in waf 2.0
return traceback.format_exc()
def to_list(val):
"""
Converts a string argument to a list by splitting it by spaces.
Returns the object if not a string::
from waflib.Utils import to_list
lst = to_list('a b c d')
:param val: list of string or space-separated string
:rtype: list
:return: Argument converted to list
"""
if isinstance(val, str):
return val.split()
else:
return val
def split_path_unix(path):
return path.split('/')
def split_path_cygwin(path):
if path.startswith('//'):
ret = path.split('/')[2:]
ret[0] = '/' + ret[0]
return ret
return path.split('/')
re_sp = re.compile('[/\\\\]+')
def split_path_win32(path):
if path.startswith('\\\\'):
ret = re_sp.split(path)[2:]
ret[0] = '\\' + ret[0]
return ret
return re_sp.split(path)
msysroot = None
def split_path_msys(path):
if path.startswith(('/', '\\')) and not path.startswith(('\\', '\\\\')):
# msys paths can be in the form /usr/bin
global msysroot
if not msysroot:
# msys has python 2.7 or 3, so we can use this
msysroot = subprocess.check_output(['cygpath', '-w', '/']).decode(sys.stdout.encoding or 'iso8859-1')
msysroot = msysroot.strip()
path = os.path.normpath(msysroot + os.sep + path)
return split_path_win32(path)
if sys.platform == 'cygwin':
split_path = split_path_cygwin
elif is_win32:
if os.environ.get('MSYSTEM'):
split_path = split_path_msys
else:
split_path = split_path_win32
else:
split_path = split_path_unix
split_path.__doc__ = """
Splits a path by / or \\; do not confuse this function with with ``os.path.split``
:type path: string
:param path: path to split
:return: list of string
"""
def check_dir(path):
"""
Ensures that a directory exists (similar to ``mkdir -p``).
:type path: string
:param path: Path to directory
:raises: :py:class:`waflib.Errors.WafError` if the folder cannot be added.
"""
if not os.path.isdir(path):
try:
os.makedirs(path)
except OSError as e:
if not os.path.isdir(path):
raise Errors.WafError('Cannot create the folder %r' % path, ex=e)
def check_exe(name, env=None):
"""
Ensures that a program exists
:type name: string
:param name: path to the program
:param env: configuration object
:type env: :py:class:`waflib.ConfigSet.ConfigSet`
:return: path of the program or None
:raises: :py:class:`waflib.Errors.WafError` if the folder cannot be added.
"""
if not name:
raise ValueError('Cannot execute an empty string!')
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(name)
if fpath and is_exe(name):
return os.path.abspath(name)
else:
env = env or os.environ
for path in env['PATH'].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, name)
if is_exe(exe_file):
return os.path.abspath(exe_file)
return None
def def_attrs(cls, **kw):
"""
Sets default attributes on a class instance
:type cls: class
:param cls: the class to update the given attributes in.
:type kw: dict
:param kw: dictionary of attributes names and values.
"""
for k, v in kw.items():
if not hasattr(cls, k):
setattr(cls, k, v)
def quote_define_name(s):
"""
Converts a string into an identifier suitable for C defines.
:type s: string
:param s: String to convert
:rtype: string
:return: Identifier suitable for C defines
"""
fu = re.sub('[^a-zA-Z0-9]', '_', s)
fu = re.sub('_+', '_', fu)
fu = fu.upper()
return fu
def h_list(lst):
"""
Hash lists. We would prefer to use hash(tup) for tuples because it is much more efficient,
but Python now enforces hash randomization by assuming everybody is running a web application.
:param lst: list to hash
:type lst: list of strings
:return: hash of the list
"""
return md5(repr(lst).encode()).digest()
def h_fun(fun):
"""
Hash functions
:param fun: function to hash
:type fun: function
:return: hash of the function
:rtype: string or bytes
"""
try:
return fun.code
except AttributeError:
try:
h = inspect.getsource(fun)
except EnvironmentError:
h = 'nocode'
try:
fun.code = h
except AttributeError:
pass
return h
def h_cmd(ins):
"""
Hashes objects recursively
:param ins: input object
:type ins: string or list or tuple or function
:rtype: string or bytes
"""
# this function is not meant to be particularly fast
if isinstance(ins, str):
# a command is either a string
ret = ins
elif isinstance(ins, list) or isinstance(ins, tuple):
# or a list of functions/strings
ret = str([h_cmd(x) for x in ins])
else:
# or just a python function
ret = str(h_fun(ins))
if sys.hexversion > 0x3000000:
ret = ret.encode('iso8859-1', 'xmlcharrefreplace')
return ret
reg_subst = re.compile(r"(\\\\)|(\$\$)|\$\{([^}]+)\}")
def subst_vars(expr, params):
"""
Replaces ${VAR} with the value of VAR taken from a dict or a config set::
from waflib import Utils
s = Utils.subst_vars('${PREFIX}/bin', env)
:type expr: string
:param expr: String to perform substitution on
:param params: Dictionary or config set to look up variable values.
"""
def repl_var(m):
if m.group(1):
return '\\'
if m.group(2):
return '$'
try:
# ConfigSet instances may contain lists
return params.get_flat(m.group(3))
except AttributeError:
return params[m.group(3)]
# if you get a TypeError, it means that 'expr' is not a string...
# Utils.subst_vars(None, env) will not work
return reg_subst.sub(repl_var, expr)
def destos_to_binfmt(key):
"""
Returns the binary format based on the unversioned platform name,
and defaults to ``elf`` if nothing is found.
:param key: platform name
:type key: string
:return: string representing the binary format
"""
if key == 'darwin':
return 'mac-o'
elif key in ('win32', 'cygwin', 'uwin', 'msys'):
return 'pe'
return 'elf'
def unversioned_sys_platform():
"""
Returns the unversioned platform name.
Some Python platform names contain versions, that depend on
the build environment, e.g. linux2, freebsd6, etc.
This returns the name without the version number. Exceptions are
os2 and win32, which are returned verbatim.
:rtype: string
:return: Unversioned platform name
"""
s = sys.platform
if s.startswith('java'):
# The real OS is hidden under the JVM.
from java.lang import System
s = System.getProperty('os.name')
# see http://lopica.sourceforge.net/os.html for a list of possible values
if s == 'Mac OS X':
return 'darwin'
elif s.startswith('Windows '):
return 'win32'
elif s == 'OS/2':
return 'os2'
elif s == 'HP-UX':
return 'hp-ux'
elif s in ('SunOS', 'Solaris'):
return 'sunos'
else: s = s.lower()
# powerpc == darwin for our purposes
if s == 'powerpc':
return 'darwin'
if s == 'win32' or s == 'os2':
return s
if s == 'cli' and os.name == 'nt':
# ironpython is only on windows as far as we know
return 'win32'
return re.split('\d+$', s)[0]
def nada(*k, **kw):
"""
Does nothing
:return: None
"""
pass
class Timer(object):
"""
Simple object for timing the execution of commands.
Its string representation is the current time::
from waflib.Utils import Timer
timer = Timer()
a_few_operations()
s = str(timer)
"""
def __init__(self):
self.start_time = datetime.datetime.utcnow()
def __str__(self):
delta = datetime.datetime.utcnow() - self.start_time
days = delta.days
hours, rem = divmod(delta.seconds, 3600)
minutes, seconds = divmod(rem, 60)
seconds += delta.microseconds * 1e-6
result = ''
if days:
result += '%dd' % days
if days or hours:
result += '%dh' % hours
if days or hours or minutes:
result += '%dm' % minutes
return '%s%.3fs' % (result, seconds)
def read_la_file(path):
"""
Reads property files, used by msvc.py
:param path: file to read
:type path: string
"""
sp = re.compile(r'^([^=]+)=\'(.*)\'$')
dc = {}
for line in readf(path).splitlines():
try:
_, left, right, _ = sp.split(line.strip())
dc[left] = right
except ValueError:
pass
return dc
def run_once(fun):
"""
Decorator: let a function cache its results, use like this::
@run_once
def foo(k):
return 345*2343
.. note:: in practice this can cause memory leaks, prefer a :py:class:`waflib.Utils.lru_cache`
:param fun: function to execute
:type fun: function
:return: the return value of the function executed
"""
cache = {}
def wrap(*k):
try:
return cache[k]
except KeyError:
ret = fun(*k)
cache[k] = ret
return ret
wrap.__cache__ = cache
wrap.__name__ = fun.__name__
return wrap
def get_registry_app_path(key, filename):
"""
Returns the value of a registry key for an executable
:type key: string
:type filename: list of string
"""
if not winreg:
return None
try:
result = winreg.QueryValue(key, "Software\\Microsoft\\Windows\\CurrentVersion\\App Paths\\%s.exe" % filename[0])
except WindowsError:
pass
else:
if os.path.isfile(result):
return result
def lib64():
"""
Guess the default ``/usr/lib`` extension for 64-bit applications
:return: '64' or ''
:rtype: string
"""
# default settings for /usr/lib
if os.sep == '/':
if platform.architecture()[0] == '64bit':
if os.path.exists('/usr/lib64') and not os.path.exists('/usr/lib32'):
return '64'
return ''
def sane_path(p):
# private function for the time being!
return os.path.abspath(os.path.expanduser(p))
process_pool = []
"""
List of processes started to execute sub-process commands
"""
def get_process():
"""
Returns a process object that can execute commands as sub-processes
:rtype: subprocess.Popen
"""
try:
return process_pool.pop()
except IndexError:
filepath = os.path.dirname(os.path.abspath(__file__)) + os.sep + 'processor.py'
cmd = [sys.executable, '-c', readf(filepath)]
return subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, bufsize=0)
def run_prefork_process(cmd, kwargs, cargs):
"""
Delegates process execution to a pre-forked process instance.
"""
if not 'env' in kwargs:
kwargs['env'] = dict(os.environ)
try:
obj = base64.b64encode(cPickle.dumps([cmd, kwargs, cargs]))
except TypeError:
return run_regular_process(cmd, kwargs, cargs)
proc = get_process()
if not proc:
return run_regular_process(cmd, kwargs, cargs)
proc.stdin.write(obj)
proc.stdin.write('\n'.encode())
proc.stdin.flush()
obj = proc.stdout.readline()
if not obj:
raise OSError('Preforked sub-process %r died' % proc.pid)
process_pool.append(proc)
ret, out, err, ex, trace = cPickle.loads(base64.b64decode(obj))
if ex:
if ex == 'OSError':
raise OSError(trace)
elif ex == 'ValueError':
raise ValueError(trace)
else:
raise Exception(trace)
return ret, out, err
def run_regular_process(cmd, kwargs, cargs={}):
"""
Executes a subprocess command by using subprocess.Popen
"""
proc = subprocess.Popen(cmd, **kwargs)
if kwargs.get('stdout') or kwargs.get('stderr'):
out, err = proc.communicate(**cargs)
status = proc.returncode
else:
out, err = (None, None)
status = proc.wait(**cargs)
return status, out, err
def run_process(cmd, kwargs, cargs={}):
"""
Executes a subprocess by using a pre-forked process when possible
or falling back to subprocess.Popen. See :py:func:`waflib.Utils.run_prefork_process`
and :py:func:`waflib.Utils.run_regular_process`
"""
if kwargs.get('stdout') and kwargs.get('stderr'):
return run_prefork_process(cmd, kwargs, cargs)
else:
return run_regular_process(cmd, kwargs, cargs)
def alloc_process_pool(n, force=False):
"""
Allocates an amount of processes to the default pool so its size is at least *n*.
It is useful to call this function early so that the pre-forked
processes use as little memory as possible.
:param n: pool size
:type n: integer
:param force: if True then *n* more processes are added to the existing pool
:type force: bool
"""
# mandatory on python2, unnecessary on python >= 3.2
global run_process, get_process, alloc_process_pool
if not force:
n = max(n - len(process_pool), 0)
try:
lst = [get_process() for x in range(n)]
except OSError:
run_process = run_regular_process
get_process = alloc_process_pool = nada
else:
for x in lst:
process_pool.append(x)
if sys.platform == 'cli' or not sys.executable:
run_process = run_regular_process
get_process = alloc_process_pool = nada
|
the-stack_106_20194
|
"""
Synth modules in Torch.
"""
import copy
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import tensor
from torch import Tensor as T
import torchsynth.util as util
from torchsynth.config import BASE_REPRODUCIBLE_BATCH_SIZE, SynthConfig
from torchsynth.parameter import ModuleParameter, ModuleParameterRange
from torchsynth.signal import Signal
class SynthModule(nn.Module):
"""
A base class for synthesis modules. A :class:`~.SynthModule`
optionally takes input from other :class:`~.SynthModule` instances.
The :class:`~.SynthModule` uses its (optional) input and its
set of :class:`~torchsynth.parameter.ModuleParameter` to generate
output. All :class:`~torchsynth.parameter.ModuleParameter` of
the :class:`~.SynthModule` are assumed to be
:attr:`~torchsynth.config.SynthConfig.batch_size`-length 1-D
tensors.
All :class:`~.SynthModule` objects should be atomic, i.e., they
should not contain other :class:`~.SynthModule` objects. This
design choice is in the spirit of modular synthesis.
Args:
synthconfig: An object containing synthesis settings that are shared
across all modules, typically specified by
:class:`~torchsynth.synth.Voice`, or some other, possibly custom
:class:`~torchsynth.synth.AbstractSynth` subclass.
device: An object representing the device on which the `torch` tensors
are to be allocated (as per PyTorch, broadly).
"""
# This outlines all the parameters available in this module
# TODO: Make this non-optional
default_parameter_ranges: Optional[List[ModuleParameterRange]] = None
def __init__(
self,
synthconfig: SynthConfig,
device: Optional[torch.device] = None,
**kwargs: Dict[str, T],
):
nn.Module.__init__(self)
self.synthconfig = synthconfig
self.device = device
self.synthconfig.to(device)
self.torchparameters: nn.ParameterDict = nn.ParameterDict()
self.parameter_ranges = []
# If this module needs a random seed, here it is
self.seed: Optional[int] = None
if self.default_parameter_ranges is not None:
# We want to create copies of the parameter ranges otherwise each
# instance of the same module type (ex. ADSR) will reference the
# same param range.
assert isinstance(self.default_parameter_ranges, list)
self.parameter_ranges = copy.deepcopy(self.default_parameter_ranges)
self._parameter_ranges_dict: Dict[str, ModuleParameterRange] = {
p.name: p for p in self.parameter_ranges
}
assert len(self._parameter_ranges_dict) == len(self.parameter_ranges)
self.add_parameters(
[
ModuleParameter(
value=None,
parameter_name=parameter_range.name,
data=torch.rand((self.synthconfig.batch_size,), device=device),
parameter_range=parameter_range,
)
for parameter_range in self.parameter_ranges
]
)
if kwargs:
# Parameter values can also be passed in as keyword args.
for name, data in kwargs.items():
if data.device != self.device:
data = data.to(self.device)
self.set_parameter(name, data)
@property
def batch_size(self) -> T:
"""Size of the batch to be generated."""
assert self.synthconfig.batch_size.ndim == 0
return self.synthconfig.batch_size
@property
def sample_rate(self) -> T:
"""Sample rate frequency in Hz."""
assert self.synthconfig.sample_rate.ndim == 0
return self.synthconfig.sample_rate
@property
def nyquist(self):
"""Convenience property for the highest frequency that can be
represented at :attr:`~.sample_rate` (as per Shannon-Nyquist)."""
return self.sample_rate / 2.0
@property
def eps(self) -> float:
"""A very small value used to avoid computational errors."""
return self.synthconfig.eps
@property
def buffer_size(self) -> T:
"""Size of the module output in samples."""
assert self.synthconfig.buffer_size.ndim == 0
return self.synthconfig.buffer_size
def to_buffer_size(self, signal: Signal) -> Signal:
"""
Fixes the length of a signal to the default buffer size of this module,
as specified by :attr:`~.SynthModule.buffer_size`. Longer signals are
truncated to length; shorter signals are zero-padded.
Args:
signal: A signal to pad or truncate.
"""
return util.fix_length(signal, self.buffer_size)
def seconds_to_samples(self, seconds: T) -> T:
"""
Convenience function to calculate the number of samples corresponding to
given a time value and :attr:`~.sample_rate`. Returns a possibly
fractional value.
Args:
seconds: Time value in seconds.
"""
return seconds * self.sample_rate
def output(self, *args: Any, **kwargs: Any) -> Signal: # pragma: no cover
"""
Performs the main action of :class:`~.SynthModule`. Each child class
should override this method.
"""
raise NotImplementedError("Derived classes must override this method")
def forward(self, *args: Any, **kwargs: Any) -> Signal: # pragma: no cover
"""
Wrapper for output that ensures a :attr:`~.SynthModule.buffer_size`
length output.
"""
signal = self.output(*args, **kwargs)
buffered = self.to_buffer_size(signal)
return buffered
def add_parameters(self, parameters: List[ModuleParameter]):
"""
Adds parameters to the :class:`~.SynthModule` parameter dictionary. Used
by the class constructor.
Args:
parameters: List of parameters to register with this module.
"""
for parameter in parameters:
assert parameter.parameter_name not in self.torchparameters
assert parameter.shape == (self.batch_size,)
self.torchparameters[parameter.parameter_name] = parameter
def get_parameter(self, parameter_id: str) -> ModuleParameter:
"""
Retrieves a single :class:`~torchsynth.parameter.ModuleParameter`, as
specified by its parameter Id.
Args:
parameter_id: Id of the parameter to retrieve.
"""
value = self.torchparameters[parameter_id]
assert value.shape == (self.batch_size,)
return value
def get_parameter_0to1(self, parameter_id: str) -> T:
"""
Retrieves a specified parameter value in the normalized range [0,1].
Args:
parameter_id: Id of the parameter to retrieve.
"""
value = self.torchparameters[parameter_id]
assert value.shape == (self.batch_size,)
return value
def set_parameter(self, parameter_id: str, value: T):
"""
Updates a parameter value in a parameter-specific non-normalized range.
Args:
parameter_id: Id of the parameter to update.
value: Value to assign to the parameter.
"""
value = value.to(self.device)
self.torchparameters[parameter_id].to_0to1(value)
value = self.torchparameters[parameter_id].data
assert torch.all(0.0 <= value) and torch.all(value <= 1.0)
assert value.shape == (self.batch_size,)
def set_parameter_0to1(self, parameter_id: str, value: T):
"""
Update a parameter value in a normalized range [0,1].
Args:
parameter_id: Id of the parameter to update.
value: Value to assign to the parameter.
"""
value = value.to(self.device)
assert torch.all(0.0 <= value) and torch.all(value <= 1.0)
assert value.shape == (self.batch_size,)
self.torchparameters[parameter_id].data = value
def p(self, parameter_id: str) -> T:
"""
Convenience method for retrieving a parameter value. Returns
the value in parameter-specific, non-normalized range.
Args:
parameter_id: Id of the parameter to retrieve.
"""
value = self.torchparameters[parameter_id].from_0to1()
assert value.shape == (self.batch_size,)
return value
def to(self, device: Optional[torch.device] = None, **kwargs):
"""
This function overrides the :func:`~torch.nn.Module.to` call in
:class:`torch.nn.Module`. It ensures that the related values
:class:`~torchsynth.parameter.ModuleParameterRange` and
:class:`~torchsynth.parameter.ModuleParameter`, as well as
:attr:`~.SynthModule.synthconfig` are also transferred to the correct
device.
Args:
device: device to send this module to
"""
self._update_device(device)
return super().to(device=device, **kwargs)
def _update_device(self, device: Optional[torch.device] = None):
"""
This method handles the device transfer tasks that are not managed by
PyTorch.
Args:
device: Device to assign to this module.
"""
self.synthconfig.to(device)
self.device = device
class ControlRateModule(SynthModule):
"""
An abstract base class for non-audio modules that adapts the functions of
:class:`.~SynthModule` to run at :attr:`~.ControlRateModule.control_rate`.
"""
@property
def sample_rate(self) -> T:
raise NotImplementedError("This module operates at control rate")
@property
def buffer_size(self) -> T:
raise NotImplementedError("This module uses control buffer size")
@property
def control_rate(self) -> T:
"""Control rate frequency in Hz."""
assert self.synthconfig.control_rate.ndim == 0
return self.synthconfig.control_rate
@property
def control_buffer_size(self) -> T:
"""Size of the module output in samples."""
assert self.synthconfig.control_buffer_size.ndim == 0
return self.synthconfig.control_buffer_size
def to_buffer_size(self, signal: Signal) -> Signal:
"""
Fixes the length of a signal to the control buffer size of this module,
as specified by :attr:`~.ControlRateModule.control_buffer_size`. Longer
signals are truncated to length; shorter signals are zero-padded.
Args:
signal: A signal to pad or truncate.
"""
return util.fix_length(signal, self.control_buffer_size)
def seconds_to_samples(self, seconds: T) -> T:
"""
Convenience function to calculate the number of samples corresponding to
given a time value and :attr:`~.control_rate`. Returns a possibly
fractional value.
Args:
seconds: Time value in seconds.
"""
return seconds * self.control_rate
def output(self, *args: Any, **kwargs: Any) -> Signal: # pragma: no cover
"""
Performs the main action of :class:`~.ControlRateModule`. Each child
class should override this method.
"""
raise NotImplementedError("Derived classes must override this method")
class ADSR(ControlRateModule):
"""
Envelope class for building a control-rate ADSR signal.
Args:
synthconfig: An object containing synthesis settings that are shared
across all modules, typically specified by
:class:`~torchsynth.synth.Voice`, or some other, possibly custom
:class:`~torchsynth.synth.AbstractSynth` subclass.
device: An object representing the device on which the `torch` tensors
are allocated (as per PyTorch, broadly).
"""
#: ADSR Parameters
default_parameter_ranges: List[ModuleParameterRange] = [
ModuleParameterRange(
0.0, 2.0, curve=0.5, name="attack", description="attack time (sec)"
),
ModuleParameterRange(
0.0, 2.0, curve=0.5, name="decay", description="decay time (sec)"
),
ModuleParameterRange(
0.0,
1.0,
name="sustain",
description="sustain amplitude 0-1. The only part of ADSR that "
+ "(confusingly, by convention) is not a time value.",
),
ModuleParameterRange(
0.0, 5.0, curve=0.5, name="release", description="release time (sec)"
),
ModuleParameterRange(
0.1,
6.0,
name="alpha",
description="envelope curve. 1 is linear, >1 is exponential.",
),
]
def __init__(
self,
synthconfig: SynthConfig,
device: Optional[torch.device] = None,
**kwargs: Dict[str, T],
):
super().__init__(synthconfig, device=device, **kwargs)
# Create some values that will be automatically loaded on device
self.register_buffer("zero", tensor(0.0, device=self.device))
self.register_buffer("one", tensor(1.0, device=self.device))
self.register_buffer(
"range", torch.arange(self.control_buffer_size, device=self.device)
)
def output(self, note_on_duration: T) -> Signal:
"""Generate an ADSR envelope.
By default, this envelope reacts as if it was triggered with midi, for
example playing a keyboard. Each midi event has a beginning and end:
note-on, when you press the key down; and note-off, when you release the
key. `note_on_duration` is the amount of time that the key is depressed.
During the note-on, the envelope moves through the attack and decay
sections of the envelope. This leads to musically-intuitive, but
programatically-counterintuitive behaviour:
Example:
Assume attack is .5 seconds, and decay is .5 seconds. If a note is
held for .75 seconds, the envelope won't pass through the entire
attack-and-decay (specifically, it will execute the entire attack,
and only .25 seconds of the decay).
If this is confusing, don't worry about it. ADSR's do a lot of work
behind the scenes to make the playing experience feel natural.
Args:
note_on_duration: Duration of note on event in seconds.
"""
if self.synthconfig.debug:
assert note_on_duration.ndim == 1
assert torch.all(note_on_duration > 0.0)
# Calculations to accommodate attack/decay phase cut by note duration.
attack = self.p("attack")
decay = self.p("decay")
self.alpha = self.p("alpha").unsqueeze(1)
new_attack = torch.minimum(attack, note_on_duration)
new_decay = torch.maximum(note_on_duration - attack, self.zero)
new_decay = torch.minimum(new_decay, decay)
attack_signal = self.make_attack(new_attack)
decay_signal = self.make_decay(new_attack, new_decay)
release_signal = self.make_release(note_on_duration)
return (attack_signal * decay_signal * release_signal).as_subclass(Signal)
def ramp(
self, duration: T, start: Optional[T] = None, inverse: Optional[bool] = False
) -> Signal:
"""
Makes a ramp of a given duration in seconds.
The construction of this matrix is rather cryptic. Essentially, this
method works by tilting and clipping ramps between 0 and 1, then
applying a scaling factor :attr:`~alpha`.
Args:
duration: Length of the ramp in seconds.
start: Initial delay of ramp in seconds.
inverse: Toggle to flip the ramp from ascending to descending.
"""
assert duration.ndim == 1
duration = self.seconds_to_samples(duration).unsqueeze(1)
# Convert to number of samples.
if start is not None:
start = self.seconds_to_samples(start).unsqueeze(1)
else:
start = 0.0
# Build ramps template.
ramp = self.range.expand((self.batch_size, self.range.shape[0]))
# Shape ramps.
ramp = ramp - start
ramp = torch.maximum(ramp, self.zero)
ramp = (ramp + self.eps) / duration + self.eps
ramp = torch.minimum(ramp, self.one)
# The following is a workaround. In inverse mode, a ramp with 0 duration
# (that is all 1's) becomes all 0's, which is a problem for the
# ultimate calculation of the ADSR signal (a * d * r => 0's). So this
# replaces only rows who sum to 0 (i.e., all components are zero).
if inverse:
ramp = torch.where(duration > 0.0, 1.0 - ramp, ramp)
# Apply scaling factor.
ramp = torch.pow(ramp, self.alpha)
return ramp.as_subclass(Signal)
def make_attack(self, attack_time) -> Signal:
"""
Builds the attack portion of the envelope.
Args:
attack_time: Length of the attack in seconds.
"""
return self.ramp(attack_time)
def make_decay(self, attack_time, decay_time) -> Signal:
"""
Creates the decay portion of the envelope.
Args:
attack_time: Length of the attack in seconds.
decay_time: Length of the decay time in seconds.
"""
sustain = self.p("sustain").unsqueeze(1)
a = 1.0 - sustain
b = self.ramp(decay_time, start=attack_time, inverse=True)
return torch.squeeze(a * b + sustain)
def make_release(self, note_on_duration) -> Signal:
"""
Creates the release portion of the envelope.
Args:
note_on_duration: Duration of midi note in seconds (release starts
when the midi note is released).
"""
return self.ramp(self.p("release"), start=note_on_duration, inverse=True)
def __str__(self): # pragma: no cover
return (
f"""ADSR(a={self.torchparameters['attack']}, """
f"""d={self.torchparameters['decay']}, """
f"""s={self.torchparameters['sustain']}, """
f"""r={self.torchparameters['release']}, """
f"""alpha={self.torchparameters['alpha']}"""
)
class VCO(SynthModule):
"""
Base class for voltage controlled oscillators.
Think of this as a VCO on a modular synthesizer. It has a base pitch
(specified here as a midi value), and a pitch modulation depth. Its call
accepts a modulation signal between [-1, 1]. An array of 0's returns a
stationary audio signal at its base pitch.
Args:
synthconfig: An object containing synthesis settings that are shared
across all modules, typically specified by
:class:`~torchsynth.synth.Voice`, or some other, possibly custom
:class:`~torchsynth.synth.AbstractSynth` subclass.
phase: Initial oscillator phase.
"""
default_parameter_ranges: List[ModuleParameterRange] = [
ModuleParameterRange(
-24.0,
24.0,
name="tuning",
description="tuning adjustment for VCO in midi",
),
ModuleParameterRange(
-96.0,
96.0,
curve=0.2,
symmetric=True,
name="mod_depth",
description="depth of the pitch modulation in semitones",
),
ModuleParameterRange(
-torch.pi,
torch.pi,
name="initial_phase",
description="Initial phase for this oscillator",
),
]
def output(self, midi_f0: T, mod_signal: Optional[Signal] = None) -> Signal:
"""
Generates audio signal from modulation signal.
Args:
midi_f0: Fundamental of note in midi note value (0-127).
mod_signal: Modulation signal to apply to the pitch.
"""
assert midi_f0.shape == (self.batch_size,)
if mod_signal is not None and mod_signal.shape != (
self.batch_size,
self.buffer_size,
):
raise ValueError(
"mod_signal has incorrect shape. Expected "
f"{torch.Size([self.batch_size, self.buffer_size])}, "
f"and received {mod_signal.shape}. Make sure the mod_signal "
"being passed in is at full audio sampling rate."
)
control_as_frequency = self.make_control_as_frequency(midi_f0, mod_signal)
if self.synthconfig.debug:
assert (control_as_frequency >= 0).all() and (
control_as_frequency <= self.nyquist
).all()
cosine_argument = self.make_argument(control_as_frequency)
cosine_argument += self.p("initial_phase").unsqueeze(1)
output = self.oscillator(cosine_argument, midi_f0)
return output.as_subclass(Signal)
def make_control_as_frequency(
self, midi_f0: T, mod_signal: Optional[Signal] = None
) -> Signal:
"""
Generates a time-varying control signal in frequency (Hz) from a midi
fundamental pitch and pitch-modulation signal.
Args:
midi_f0: Fundamental pitch value in midi.
mod_signal: Pitch modulation signal in midi.
"""
midi_f0 = (midi_f0 + self.p("tuning")).unsqueeze(1)
# If there is no modulation, then convert the midi_f0 values to
# frequency and return an expanded view that contains buffer size
# number of values
if mod_signal is None:
control_hz = util.midi_to_hz(midi_f0)
return control_hz.expand(-1, self.buffer_size)
# If there is modulation, then add that to the fundamental,
# clamp to a range [0.0, 127.0], then return in frequency Hz.
modulation = self.p("mod_depth").unsqueeze(1) * mod_signal
control = torch.clamp(midi_f0 + modulation, 0.0, 127.0)
return util.midi_to_hz(control)
def make_argument(self, freq: Signal) -> Signal:
"""
Generates the phase argument to feed an oscillating function to
generate an audio signal.
Args:
freq: Time-varying instantaneous frequency in Hz.
"""
return torch.cumsum(2 * torch.pi * freq / self.sample_rate, dim=1)
def oscillator(self, argument: Signal, midi_f0: T) -> Signal:
"""
This function accepts a phase argument and generates output audio. It is
implemented by the child class.
Args:
argument: The phase of the oscillator at each time sample.
midi_f0: Fundamental frequency in midi.
"""
raise NotImplementedError("Derived classes must override this method")
class SineVCO(VCO):
"""
Simple VCO that generates a pitched sinusoid.
"""
def oscillator(self, argument: Signal, midi_f0: T) -> Signal:
"""
A cosine oscillator. ...Good ol' cosine.
Args:
argument: The phase of the oscillator at each time sample.
midi_f0: Fundamental frequency in midi (ignored in this VCO).
"""
return torch.cos(argument)
class FmVCO(VCO):
"""
Frequency modulation VCO. Takes a modulation signal as instantaneous
frequency (in Hz) rather than as a midi value.
Typical modulation is calculated in pitch-space (midi). For FM to work,
we have to change the order of calculations. Here the modulation depth is
re-interpreted as the "modulation index" which is tied to the fundamental of
the oscillator being modulated:
:math:`I = \\Delta f / f_m`
where :math:`I` is the modulation index, :math:`\\Delta f` is the frequency
deviation imparted by the modulation, and :math:`f_m` is the modulation
frequency, both in Hz.
"""
# We include this override to output to make mod_signal non-optional
def output(self, midi_f0: T, mod_signal: Signal) -> Signal:
"""
Args:
midi_f0: note value in midi
mod_signal: audio rate frequency modulation signal
"""
return super().output(midi_f0, mod_signal)
def make_control_as_frequency(self, midi_f0: T, mod_signal) -> Signal:
"""
Creates a time-varying control signal in instantaneous frequency (Hz).
Args:
midi_f0: Fundamental frequency in midi.
mod_signal: FM modulation signal (interpreted as modulation index).
"""
# Compute modulation in Hz space (rather than midi-space).
f0_hz = util.midi_to_hz(midi_f0 + self.p("tuning")).unsqueeze(1)
fm_depth = self.p("mod_depth").unsqueeze(1) * f0_hz
modulation_hz = fm_depth * mod_signal
return torch.clamp(f0_hz + modulation_hz, 0.0, self.nyquist)
def oscillator(self, argument: Signal, midi_f0: T) -> Signal:
"""
A cosine oscillator. ...Good ol' cosine.
Args:
argument: The phase of the oscillator at each time sample.
midi_f0: Fundamental frequency in midi (ignored in this VCO).
"""
return torch.cos(argument)
class SquareSawVCO(VCO):
"""
An oscillator that can take on either a square or a sawtooth waveshape, and
can sweep continuously between them, as determined by the
:attr:`~torchsynth.module.SquareSawVCO.shape` parameter. A shape value of 0
makes a square wave; a shape of 1 makes a saw wave.
With apologies to Lazzarini and Timoney (2010).
`"New perspectives on distortion synthesis for virtual analog oscillators."
<https://doi.org/10.1162/comj.2010.34.1.28>`_
Computer Music Journal 34, no. 1: 28-40.
"""
default_parameter_ranges: List[
ModuleParameterRange
] = VCO.default_parameter_ranges + [
ModuleParameterRange(
0.0, 1.0, name="shape", description="Waveshape - square to saw [0,1]"
)
]
def oscillator(self, argument: Signal, midi_f0: T) -> Signal:
"""
Generates output square/saw audio given a phase argument.
Args:
argument: The phase of the oscillator at each time sample.
midi_f0: Fundamental frequency in midi.
"""
partials = self.partials_constant(midi_f0).unsqueeze(1)
square = torch.tanh(torch.pi * partials * torch.sin(argument) / 2)
shape = self.p("shape").unsqueeze(1)
return (1 - shape / 2) * square * (1 + shape * torch.cos(argument))
def partials_constant(self, midi_f0):
"""
Calculates a value to determine the number of overtones in the resulting
square / saw wave, in order to keep aliasing at an acceptable level.
Higher fundamental frequencies require fewer partials for a rich sound;
lower-frequency sounds can safely have more partials without causing
audible aliasing.
Args:
midi_f0: Fundamental frequency in midi.
"""
max_pitch = (
midi_f0 + self.p("tuning") + torch.maximum(self.p("mod_depth"), tensor(0))
)
max_f0 = util.midi_to_hz(max_pitch)
return 12000 / (max_f0 * torch.log10(max_f0))
class VCA(SynthModule):
"""
Voltage controlled amplifier.
The VCA shapes the amplitude of an audio input signal over time, as
determined by a control signal. To shape control-rate signals, use
:class:`torchsynth.module.ControlRateVCA`.
"""
def output(self, audio_in: Signal, control_in: Signal) -> Signal:
"""
Args:
audio: Audio input to shape with the VCA.
amp_control: Time-varying amplitude modulation signal.
"""
return audio_in * control_in
class ControlRateVCA(ControlRateModule):
"""
Voltage controlled amplifier.
The VCA shapes the amplitude of a control input signal over time, as
determined by another control signal. To shape audio-rate signals, use
:class:`torchsynth.module.VCA`.
"""
def output(self, audio_in: Signal, control_in: Signal) -> Signal:
"""
Args:
control: Control signal input to shape with the VCA.
amp_control: Time-varying amplitude modulation signal.
"""
return audio_in * control_in
class Noise(SynthModule):
"""
Generates white noise that is the same length as the buffer.
For performance noise is pre-computed. In order to maintain
reproducibility noise must be computed on the CPU and then transferred
to the GPU, if a GPU is being used. We pre-compute
:attr:`~torchsynth.config.BASE_REPRODUCIBLE_BATCH_SIZE`
samples of noise and then repeat those for larger batch sizes.
To keep things fast we only support multiples of
:attr:`~torchsynth.config.BASE_REPRODUCIBLE_BATCH_SIZE`
when reproducibility mode is enabled. For example, if you batch size
is 4 times :attr:`~torchsynth.config.BASE_REPRODUCIBLE_BATCH_SIZE`, then
you get the same noise signals repeated 4 times.
`Note`: If you have multiple `Noise` modules in the same
:class:`~torchsynth.synth.AbstractSynth`, make sure you instantiate
each `Noise` with a unique seed.
Args:
synthconfig: See :class:`~torchsynth.module.SynthModule`
seed: random number generator seed for white noise
"""
__noise_batch_size: int = BASE_REPRODUCIBLE_BATCH_SIZE
# Unfortunately, Final is not supported until Python 3.8
# noise_batch_size: Final[int] = BATCH_SIZE_FOR_REPRODUCIBILITY
def __init__(self, synthconfig: SynthConfig, seed: int, **kwargs):
super().__init__(synthconfig, **kwargs)
# Pre-compute default batch size number of noise samples
generator = torch.Generator(device="cpu").manual_seed(seed)
# In reproducible mode, we support batch sizes that are multiples
# of the BASE_REPRODUCIBLE_BATCH_SIZE
if self.synthconfig.reproducible:
if self.batch_size % self.__noise_batch_size != 0:
raise ValueError(
f"Batch size must be a multiple of {self.__noise_batch_size} "
"when using reproducible mode. Either change your batch size,"
"or set reproducible=False in the SynthConfig for this module."
)
noise = torch.empty(
(self.__noise_batch_size, self.buffer_size), device="cpu"
)
noise.data.uniform_(-1.0, 1.0, generator=generator)
if self.batch_size > self.__noise_batch_size:
noise = noise.repeat(self.batch_size // self.__noise_batch_size, 1)
else:
# Non-reproducible mode, just render noise of batch size
noise = torch.empty((self.batch_size, self.buffer_size), device="cpu")
noise.data.uniform_(-1.0, 1.0, generator=generator)
self.register_buffer("noise", noise.to(self.device))
def output(self) -> Signal:
return self.noise.as_subclass(Signal)
class LFO(ControlRateModule):
"""
Low Frequency Oscillator.
The LFO shape can be any mixture of sine, triangle, saw, reverse saw, and
square waves. Contributions of each base-shape are determined by the
:attr:`~torchsynth.module.LFO.lfo_types` values, which are between 0 and 1.
Args:
synthconfig: See :class:`~torchsynth.module.SynthConfig`.
exponent: A non-negative value that determines the discrimination of the
soft-max selector for LFO shapes. Higher values will tend to favour
one LFO shape over all others. Lower values will result in a more
even blend of LFO shapes.
"""
default_ranges: List[ModuleParameterRange] = [
ModuleParameterRange(
0.0,
20.0,
curve=0.25,
name="frequency",
description="Frequency in Hz of oscillation",
),
ModuleParameterRange(
-10.0,
20.0,
curve=0.5,
symmetric=True,
name="mod_depth",
description="LFO rate modulation in Hz",
),
ModuleParameterRange(
-torch.pi,
torch.pi,
name="initial_phase",
description="Initial phase of LFO",
),
]
def __init__(
self,
synthconfig: SynthConfig,
exponent: T = tensor(2.718281828), # e
**kwargs: Dict[str, T],
):
self.lfo_types = ["sin", "tri", "saw", "rsaw", "sqr"]
self.default_parameter_ranges = self.default_ranges.copy()
for lfo in self.lfo_types:
self.default_parameter_ranges.append(
ModuleParameterRange(
0.0,
1.0,
name=f"{lfo}",
description=f"Selection parameter for {lfo} LFO",
)
)
super().__init__(synthconfig, **kwargs)
self.exponent = exponent
def output(self, mod_signal: Optional[Signal] = None) -> Signal:
"""
Generates low frequency oscillator control signal.
Args:
mod_signal: LFO rate modulation signal in Hz. To modulate the
depth of the LFO, use :class:`torchsynth.module.ControlRateVCA`.
"""
# This module accepts signals at control rate!
if mod_signal is not None:
assert mod_signal.shape == (self.batch_size, self.control_buffer_size)
# Create frequency signal
frequency = self.make_control(mod_signal)
argument = torch.cumsum(2 * torch.pi * frequency / self.control_rate, dim=1)
argument = argument + self.p("initial_phase").unsqueeze(1)
# Get LFO shapes
shapes = torch.stack(self.make_lfo_shapes(argument), dim=1).as_subclass(Signal)
# Apply mode selection to the LFO shapes
mode = torch.stack([self.p(lfo) for lfo in self.lfo_types], dim=1)
mode = torch.pow(mode, self.exponent)
mode = mode / torch.sum(mode, dim=1, keepdim=True)
return torch.matmul(mode.unsqueeze(1), shapes).squeeze(1).as_subclass(Signal)
def make_control(self, mod_signal: Optional[Signal] = None) -> Signal:
"""
Applies the LFO-rate modulation signal to the LFO base frequency.
Args:
mod_signal: Modulation signal in Hz. Positive values increase the
LFO base rate; negative values decrease it.
"""
frequency = self.p("frequency").unsqueeze(1)
# If no modulation, then return a view of the frequency of this
# LFO expanded to the control buffer size
if mod_signal is None:
return frequency.expand(-1, self.control_buffer_size)
modulation = self.p("mod_depth").unsqueeze(1) * mod_signal
return torch.maximum(frequency + modulation, tensor(0.0))
def make_lfo_shapes(self, argument: Signal) -> Tuple[T, T, T, T, T]:
"""
Generates five separate signals for each LFO shape and returns them as a
tuple, to be mixed by :func:`torchsynth.module.LFO.output`.
Args:
argument: Time-varying phase to generate LFO signals.
"""
cos = torch.cos(argument + torch.pi)
square = torch.sign(cos)
cos = (cos + 1.0) / 2.0
square = (square + 1.0) / 2.0
saw = torch.remainder(argument, 2 * torch.pi) / (2 * torch.pi)
rev_saw = 1.0 - saw
triangle = 2 * saw
triangle = torch.where(triangle > 1.0, 2.0 - triangle, triangle)
return cos, triangle, saw, rev_saw, square
class ModulationMixer(SynthModule):
"""
A modulation matrix that combines :math:`N` input modulation signals to make
:math:`M` output modulation signals. Each output is a linear combination of
all in input signals, as determined by an :math:`N \times M` mixing matrix.
Args:
synthconfig: See :class:`~torchsynth.module.SynthConfig`.
n_input: Number of input signals to module mix.
n_output: Number of output signals to generate.
curves: A positive value that determines the contribution of each
input signal to the other signals. A low value discourages
over-mixing.
"""
def __init__(
self,
synthconfig: SynthConfig,
n_input: int,
n_output: int,
curves: Optional[List[float]] = None,
input_names: Optional[List[str]] = None,
output_names: Optional[List[str]] = None,
**kwargs: Dict[str, T],
):
# Parameter curves can be used to modify the parameter mapping
# for each input modulation source to the outputs
if curves is not None:
assert len(curves) == n_input
else:
curves = [0.5] * n_input
custom_names = False
if input_names is not None:
assert len(input_names) == n_input
assert output_names is not None
assert len(output_names) == n_output
custom_names = True
# Need to create the parameter ranges before calling super().__init
self.default_parameter_ranges = []
for i in range(n_input):
for j in range(n_output):
# Apply custom param name if it was passed in
if custom_names:
name = f"{input_names[i]}->{output_names[j]}"
description = f"Modulation {input_names[i]} to {output_names[j]}"
else:
name = f"{i}->{j}"
description = f"Modulation {i} to {j}"
self.default_parameter_ranges.append(
ModuleParameterRange(
0.0,
1.0,
curve=curves[i],
name=name,
description=description,
)
)
super().__init__(synthconfig, **kwargs)
self.n_input = n_input
self.n_output = n_output
def forward(self, *signals: Signal) -> Tuple[Signal]:
"""
Performs mixture of modulation signals.
"""
# Get params into batch_size x n_output x n_input matrix
params = torch.stack([self.p(p) for p in self.torchparameters], dim=1)
params = params.view(self.batch_size, self.n_input, self.n_output)
params = torch.swapaxes(params, 1, 2)
# Make sure there is the same number of input signals as mix params
assert len(signals) == params.shape[2]
signals = torch.stack(signals, dim=1)
modulation = torch.chunk(torch.matmul(params, signals), self.n_output, dim=1)
return tuple(m.squeeze(1).as_subclass(Signal) for m in modulation)
class AudioMixer(SynthModule):
"""
Sums together N audio signals and applies range-normalization if the
resulting signal is outside of [-1, 1].
"""
def __init__(
self,
synthconfig: SynthConfig,
n_input: int,
curves: Optional[List[float]] = None,
names: Optional[List[str]] = None,
**kwargs: Dict[str, T],
):
# Parameter curves can be used to modify the parameter mapping
# for each input modulation source to the outputs
if curves is not None:
assert len(curves) == n_input
else:
curves = [1.0] * n_input
# If param names were passed in, make sure we got the right number
if names is not None:
assert len(names) == n_input
# Need to create the parameter ranges before calling super().__init
self.default_parameter_ranges = []
for i in range(n_input):
name = f"level{i}" if names is None else names[i]
self.default_parameter_ranges.append(
ModuleParameterRange(
0.0,
1.0,
curve=curves[i],
name=name,
description=f"{name} mix level",
)
)
super().__init__(synthconfig, **kwargs)
self.n_input = n_input
def output(self, *signals: Signal) -> Signal:
"""
Returns a mixed signal from an array of input signals.
"""
# Turn params into matrix
params = torch.stack([self.p(p) for p in self.torchparameters], dim=1)
# Make sure we received the correct number of input signals
signals = torch.stack(signals, dim=1)
assert signals.shape[1] == params.shape[1]
# Mix signals and normalize output if required
output = torch.matmul(params.unsqueeze(1), signals).squeeze(1)
return util.normalize_if_clipping(output)
class ControlRateUpsample(SynthModule):
"""
Upsample control signals to the global sampling rate
Uses linear interpolation to resample an input control signal to the
audio buffer size set in synthconfig.
"""
def __init__(
self,
synthconfig: SynthConfig,
device: Optional[torch.device] = None,
**kwargs: Dict[str, T],
):
super().__init__(synthconfig, device, **kwargs)
self.upsample = torch.nn.Upsample(
self.synthconfig.buffer_size, mode="linear", align_corners=True
)
def output(self, signal: Signal) -> Signal:
return self.upsample(signal.unsqueeze(1)).squeeze(1)
class CrossfadeKnob(SynthModule):
"""
Crossfade knob parameter with no signal generation
"""
default_parameter_ranges: List[ModuleParameterRange] = [
ModuleParameterRange(
0.0,
1.0,
name="ratio",
description="crossfade knob",
),
]
class MonophonicKeyboard(SynthModule):
"""
A keyboard controller module. Mimics a mono-synth keyboard and contains
parameters that output a midi_f0 and note duration.
"""
default_parameter_ranges: List[ModuleParameterRange] = [
ModuleParameterRange(
0.0,
127.0,
curve=1.0,
name="midi_f0",
description="pitch value in 'midi' (69 = 440Hz)",
),
ModuleParameterRange(
0.01,
4.0,
curve=0.5,
name="duration",
description="note-on button, in seconds",
),
]
def forward(self) -> Tuple[T, T]:
return self.p("midi_f0"), self.p("duration")
class SoftModeSelector(SynthModule):
"""
A soft mode selector.
If there are n different modes, return a probability distribution over them.
TODO: Would be nice to sample in a way that maximizes
KL-divergence from uniform: https://github.com/torchsynth/torchsynth/issues/165
"""
def __init__(
self,
synthconfig: SynthConfig,
n_modes: int,
exponent: T = tensor(2.718281828), # e
**kwargs: Dict[str, T],
):
"""
exponent determines how strongly to scale each [0,1] value prior
to normalization. We should probably tune this:
https://github.com/torchsynth/torchsynth/issues/165
"""
# Need to create the parameter ranges before calling super().__init
self.default_parameter_ranges = [
ModuleParameterRange(
0.0,
1.0,
name=f"mode{i}weight",
description=f"mode{i} weight, before normalization",
)
for i in range(n_modes)
]
super().__init__(synthconfig=synthconfig, **kwargs)
self.exponent = exponent
def forward(self) -> Tuple[T, T]:
"""
Normalize all mode weights so they sum to 1.0
"""
# Is this tensor creation slow?
# But usually parameter stuff is not the bottleneck
params = torch.stack([p.data for p in self.torchparameters.values()])
params = torch.pow(params, exponent=self.exponent)
return params / torch.sum(params, dim=0)
class HardModeSelector(SynthModule):
"""
A hard mode selector.
NOTE: This is non-differentiable.
"""
def __init__(
self,
synthconfig: SynthConfig,
n_modes: int,
**kwargs: Dict[str, T],
):
# Need to create the parameter ranges before calling super().__init
self.default_parameter_ranges = [
ModuleParameterRange(
0.0,
1.0,
name=f"mode{i}weight",
description=f"mode{i} weight, before argmax",
)
for i in range(n_modes)
]
super().__init__(synthconfig=synthconfig, **kwargs)
def forward(self) -> Tuple[T, T]:
# Is this tensor creation slow?
# But usually parameter stuff is not the bottleneck
origparams = torch.stack([p.data for p in self.torchparameters.values()])
idx = torch.argmax(origparams, dim=0)
return F.one_hot(idx, num_classes=origparams.shape[0]).T
|
the-stack_106_20195
|
import asyncio
import dataclasses
import time
import traceback
from secrets import token_bytes
from typing import Callable, Dict, List, Optional, Tuple, Set
from blspy import AugSchemeMPL, G2Element
from chiabip158 import PyBIP158
import staicoin.server.ws_connection as ws
from staicoin.consensus.block_creation import create_unfinished_block
from staicoin.consensus.block_record import BlockRecord
from staicoin.consensus.pot_iterations import calculate_ip_iters, calculate_iterations_quality, calculate_sp_iters
from staicoin.full_node.bundle_tools import best_solution_generator_from_template, simple_solution_generator
from staicoin.full_node.full_node import FullNode
from staicoin.full_node.mempool_check_conditions import get_puzzle_and_solution_for_coin
from staicoin.full_node.signage_point import SignagePoint
from staicoin.protocols import farmer_protocol, full_node_protocol, introducer_protocol, timelord_protocol, wallet_protocol
from staicoin.protocols.full_node_protocol import RejectBlock, RejectBlocks
from staicoin.protocols.protocol_message_types import ProtocolMessageTypes
from staicoin.protocols.wallet_protocol import PuzzleSolutionResponse, RejectHeaderBlocks, RejectHeaderRequest
from staicoin.server.outbound_message import Message, make_msg
from staicoin.types.blockchain_format.coin import Coin, hash_coin_list
from staicoin.types.blockchain_format.pool_target import PoolTarget
from staicoin.types.blockchain_format.program import Program
from staicoin.types.blockchain_format.sized_bytes import bytes32
from staicoin.types.coin_record import CoinRecord
from staicoin.types.end_of_slot_bundle import EndOfSubSlotBundle
from staicoin.types.full_block import FullBlock
from staicoin.types.generator_types import BlockGenerator
from staicoin.types.mempool_inclusion_status import MempoolInclusionStatus
from staicoin.types.mempool_item import MempoolItem
from staicoin.types.peer_info import PeerInfo
from staicoin.types.unfinished_block import UnfinishedBlock
from staicoin.util.api_decorators import api_request, peer_required, bytes_required, execute_task
from staicoin.util.generator_tools import get_block_header
from staicoin.util.hash import std_hash
from staicoin.util.ints import uint8, uint32, uint64, uint128
from staicoin.util.merkle_set import MerkleSet
class FullNodeAPI:
full_node: FullNode
def __init__(self, full_node) -> None:
self.full_node = full_node
def _set_state_changed_callback(self, callback: Callable):
self.full_node.state_changed_callback = callback
@property
def server(self):
return self.full_node.server
@property
def log(self):
return self.full_node.log
@property
def api_ready(self):
return self.full_node.initialized
@peer_required
@api_request
async def request_peers(self, _request: full_node_protocol.RequestPeers, peer: ws.WSstaicoinConnection):
if peer.peer_server_port is None:
return None
peer_info = PeerInfo(peer.peer_host, peer.peer_server_port)
if self.full_node.full_node_peers is not None:
msg = await self.full_node.full_node_peers.request_peers(peer_info)
return msg
@peer_required
@api_request
async def respond_peers(
self, request: full_node_protocol.RespondPeers, peer: ws.WSstaicoinConnection
) -> Optional[Message]:
self.log.debug(f"Received {len(request.peer_list)} peers")
if self.full_node.full_node_peers is not None:
await self.full_node.full_node_peers.respond_peers(request, peer.get_peer_info(), True)
return None
@peer_required
@api_request
async def respond_peers_introducer(
self, request: introducer_protocol.RespondPeersIntroducer, peer: ws.WSstaicoinConnection
) -> Optional[Message]:
self.log.debug(f"Received {len(request.peer_list)} peers from introducer")
if self.full_node.full_node_peers is not None:
await self.full_node.full_node_peers.respond_peers(request, peer.get_peer_info(), False)
await peer.close()
return None
@execute_task
@peer_required
@api_request
async def new_peak(self, request: full_node_protocol.NewPeak, peer: ws.WSstaicoinConnection) -> Optional[Message]:
"""
A peer notifies us that they have added a new peak to their blockchain. If we don't have it,
we can ask for it.
"""
# this semaphore limits the number of tasks that can call new_peak() at
# the same time, since it can be expensive
async with self.full_node.new_peak_sem:
return await self.full_node.new_peak(request, peer)
@peer_required
@api_request
async def new_transaction(
self, transaction: full_node_protocol.NewTransaction, peer: ws.WSstaicoinConnection
) -> Optional[Message]:
"""
A peer notifies us of a new transaction.
Requests a full transaction if we haven't seen it previously, and if the fees are enough.
"""
# Ignore if syncing
if self.full_node.sync_store.get_sync_mode():
return None
if not (await self.full_node.synced()):
return None
# Ignore if already seen
if self.full_node.mempool_manager.seen(transaction.transaction_id):
return None
if self.full_node.mempool_manager.is_fee_enough(transaction.fees, transaction.cost):
# If there's current pending request just add this peer to the set of peers that have this tx
if transaction.transaction_id in self.full_node.full_node_store.pending_tx_request:
if transaction.transaction_id in self.full_node.full_node_store.peers_with_tx:
current_set = self.full_node.full_node_store.peers_with_tx[transaction.transaction_id]
if peer.peer_node_id in current_set:
return None
current_set.add(peer.peer_node_id)
return None
else:
new_set = set()
new_set.add(peer.peer_node_id)
self.full_node.full_node_store.peers_with_tx[transaction.transaction_id] = new_set
return None
self.full_node.full_node_store.pending_tx_request[transaction.transaction_id] = peer.peer_node_id
new_set = set()
new_set.add(peer.peer_node_id)
self.full_node.full_node_store.peers_with_tx[transaction.transaction_id] = new_set
async def tx_request_and_timeout(full_node: FullNode, transaction_id, task_id):
counter = 0
try:
while True:
# Limit to asking 10 peers, it's possible that this tx got included on chain already
# Highly unlikely 10 peers that advertised a tx don't respond to a request
if counter == 10:
break
if transaction_id not in full_node.full_node_store.peers_with_tx:
break
peers_with_tx: Set = full_node.full_node_store.peers_with_tx[transaction_id]
if len(peers_with_tx) == 0:
break
peer_id = peers_with_tx.pop()
assert full_node.server is not None
if peer_id not in full_node.server.all_connections:
continue
peer = full_node.server.all_connections[peer_id]
request_tx = full_node_protocol.RequestTransaction(transaction.transaction_id)
msg = make_msg(ProtocolMessageTypes.request_transaction, request_tx)
await peer.send_message(msg)
await asyncio.sleep(5)
counter += 1
if full_node.mempool_manager.seen(transaction_id):
break
except asyncio.CancelledError:
pass
finally:
# Always Cleanup
if transaction_id in full_node.full_node_store.peers_with_tx:
full_node.full_node_store.peers_with_tx.pop(transaction_id)
if transaction_id in full_node.full_node_store.pending_tx_request:
full_node.full_node_store.pending_tx_request.pop(transaction_id)
if task_id in full_node.full_node_store.tx_fetch_tasks:
full_node.full_node_store.tx_fetch_tasks.pop(task_id)
task_id = token_bytes()
fetch_task = asyncio.create_task(
tx_request_and_timeout(self.full_node, transaction.transaction_id, task_id)
)
self.full_node.full_node_store.tx_fetch_tasks[task_id] = fetch_task
return None
return None
@api_request
async def request_transaction(self, request: full_node_protocol.RequestTransaction) -> Optional[Message]:
"""Peer has requested a full transaction from us."""
# Ignore if syncing
if self.full_node.sync_store.get_sync_mode():
return None
spend_bundle = self.full_node.mempool_manager.get_spendbundle(request.transaction_id)
if spend_bundle is None:
return None
transaction = full_node_protocol.RespondTransaction(spend_bundle)
msg = make_msg(ProtocolMessageTypes.respond_transaction, transaction)
return msg
@peer_required
@api_request
@bytes_required
async def respond_transaction(
self,
tx: full_node_protocol.RespondTransaction,
peer: ws.WSstaicoinConnection,
tx_bytes: bytes = b"",
test: bool = False,
) -> Optional[Message]:
"""
Receives a full transaction from peer.
If tx is added to mempool, send tx_id to others. (new_transaction)
"""
assert tx_bytes != b""
spend_name = std_hash(tx_bytes)
if spend_name in self.full_node.full_node_store.pending_tx_request:
self.full_node.full_node_store.pending_tx_request.pop(spend_name)
if spend_name in self.full_node.full_node_store.peers_with_tx:
self.full_node.full_node_store.peers_with_tx.pop(spend_name)
await self.full_node.respond_transaction(tx.transaction, spend_name, peer, test)
return None
@api_request
async def request_proof_of_weight(self, request: full_node_protocol.RequestProofOfWeight) -> Optional[Message]:
if self.full_node.weight_proof_handler is None:
return None
if not self.full_node.blockchain.contains_block(request.tip):
self.log.error(f"got weight proof request for unknown peak {request.tip}")
return None
if request.tip in self.full_node.pow_creation:
event = self.full_node.pow_creation[request.tip]
await event.wait()
wp = await self.full_node.weight_proof_handler.get_proof_of_weight(request.tip)
else:
event = asyncio.Event()
self.full_node.pow_creation[request.tip] = event
wp = await self.full_node.weight_proof_handler.get_proof_of_weight(request.tip)
event.set()
tips = list(self.full_node.pow_creation.keys())
if len(tips) > 4:
# Remove old from cache
for i in range(0, 4):
self.full_node.pow_creation.pop(tips[i])
if wp is None:
self.log.error(f"failed creating weight proof for peak {request.tip}")
return None
# Serialization of wp is slow
if (
self.full_node.full_node_store.serialized_wp_message_tip is not None
and self.full_node.full_node_store.serialized_wp_message_tip == request.tip
):
return self.full_node.full_node_store.serialized_wp_message
message = make_msg(
ProtocolMessageTypes.respond_proof_of_weight, full_node_protocol.RespondProofOfWeight(wp, request.tip)
)
self.full_node.full_node_store.serialized_wp_message_tip = request.tip
self.full_node.full_node_store.serialized_wp_message = message
return message
@api_request
async def respond_proof_of_weight(self, request: full_node_protocol.RespondProofOfWeight) -> Optional[Message]:
self.log.warning("Received proof of weight too late.")
return None
@api_request
async def request_block(self, request: full_node_protocol.RequestBlock) -> Optional[Message]:
if not self.full_node.blockchain.contains_height(request.height):
reject = RejectBlock(request.height)
msg = make_msg(ProtocolMessageTypes.reject_block, reject)
return msg
header_hash = self.full_node.blockchain.height_to_hash(request.height)
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(header_hash)
if block is not None:
if not request.include_transaction_block and block.transactions_generator is not None:
block = dataclasses.replace(block, transactions_generator=None)
return make_msg(ProtocolMessageTypes.respond_block, full_node_protocol.RespondBlock(block))
reject = RejectBlock(request.height)
msg = make_msg(ProtocolMessageTypes.reject_block, reject)
return msg
@api_request
async def request_blocks(self, request: full_node_protocol.RequestBlocks) -> Optional[Message]:
if request.end_height < request.start_height or request.end_height - request.start_height > 32:
reject = RejectBlocks(request.start_height, request.end_height)
msg: Message = make_msg(ProtocolMessageTypes.reject_blocks, reject)
return msg
for i in range(request.start_height, request.end_height + 1):
if not self.full_node.blockchain.contains_height(uint32(i)):
reject = RejectBlocks(request.start_height, request.end_height)
msg = make_msg(ProtocolMessageTypes.reject_blocks, reject)
return msg
if not request.include_transaction_block:
blocks: List[FullBlock] = []
for i in range(request.start_height, request.end_height + 1):
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(
self.full_node.blockchain.height_to_hash(uint32(i))
)
if block is None:
reject = RejectBlocks(request.start_height, request.end_height)
msg = make_msg(ProtocolMessageTypes.reject_blocks, reject)
return msg
block = dataclasses.replace(block, transactions_generator=None)
blocks.append(block)
msg = make_msg(
ProtocolMessageTypes.respond_blocks,
full_node_protocol.RespondBlocks(request.start_height, request.end_height, blocks),
)
else:
blocks_bytes: List[bytes] = []
for i in range(request.start_height, request.end_height + 1):
block_bytes: Optional[bytes] = await self.full_node.block_store.get_full_block_bytes(
self.full_node.blockchain.height_to_hash(uint32(i))
)
if block_bytes is None:
reject = RejectBlocks(request.start_height, request.end_height)
msg = make_msg(ProtocolMessageTypes.reject_blocks, reject)
return msg
blocks_bytes.append(block_bytes)
respond_blocks_manually_streamed: bytes = (
bytes(uint32(request.start_height))
+ bytes(uint32(request.end_height))
+ len(blocks_bytes).to_bytes(4, "big", signed=False)
)
for block_bytes in blocks_bytes:
respond_blocks_manually_streamed += block_bytes
msg = make_msg(ProtocolMessageTypes.respond_blocks, respond_blocks_manually_streamed)
return msg
@api_request
async def reject_block(self, request: full_node_protocol.RejectBlock):
self.log.debug(f"reject_block {request.height}")
@api_request
async def reject_blocks(self, request: full_node_protocol.RejectBlocks):
self.log.debug(f"reject_blocks {request.start_height} {request.end_height}")
@api_request
async def respond_blocks(self, request: full_node_protocol.RespondBlocks) -> None:
self.log.warning("Received unsolicited/late blocks")
return None
@api_request
@peer_required
async def respond_block(
self,
respond_block: full_node_protocol.RespondBlock,
peer: ws.WSstaicoinConnection,
) -> Optional[Message]:
"""
Receive a full block from a peer full node (or ourselves).
"""
self.log.warning(f"Received unsolicited/late block from peer {peer.get_peer_info()}")
return None
@api_request
async def new_unfinished_block(
self, new_unfinished_block: full_node_protocol.NewUnfinishedBlock
) -> Optional[Message]:
# Ignore if syncing
if self.full_node.sync_store.get_sync_mode():
return None
block_hash = new_unfinished_block.unfinished_reward_hash
if self.full_node.full_node_store.get_unfinished_block(block_hash) is not None:
return None
# This prevents us from downloading the same block from many peers
if block_hash in self.full_node.full_node_store.requesting_unfinished_blocks:
return None
msg = make_msg(
ProtocolMessageTypes.request_unfinished_block,
full_node_protocol.RequestUnfinishedBlock(block_hash),
)
self.full_node.full_node_store.requesting_unfinished_blocks.add(block_hash)
# However, we want to eventually download from other peers, if this peer does not respond
# Todo: keep track of who it was
async def eventually_clear():
await asyncio.sleep(5)
if block_hash in self.full_node.full_node_store.requesting_unfinished_blocks:
self.full_node.full_node_store.requesting_unfinished_blocks.remove(block_hash)
asyncio.create_task(eventually_clear())
return msg
@api_request
async def request_unfinished_block(
self, request_unfinished_block: full_node_protocol.RequestUnfinishedBlock
) -> Optional[Message]:
unfinished_block: Optional[UnfinishedBlock] = self.full_node.full_node_store.get_unfinished_block(
request_unfinished_block.unfinished_reward_hash
)
if unfinished_block is not None:
msg = make_msg(
ProtocolMessageTypes.respond_unfinished_block,
full_node_protocol.RespondUnfinishedBlock(unfinished_block),
)
return msg
return None
@peer_required
@api_request
async def respond_unfinished_block(
self,
respond_unfinished_block: full_node_protocol.RespondUnfinishedBlock,
peer: ws.WSstaicoinConnection,
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
await self.full_node.respond_unfinished_block(respond_unfinished_block, peer)
return None
@api_request
@peer_required
async def new_signage_point_or_end_of_sub_slot(
self, new_sp: full_node_protocol.NewSignagePointOrEndOfSubSlot, peer: ws.WSstaicoinConnection
) -> Optional[Message]:
# Ignore if syncing
if self.full_node.sync_store.get_sync_mode():
return None
if (
self.full_node.full_node_store.get_signage_point_by_index(
new_sp.challenge_hash,
new_sp.index_from_challenge,
new_sp.last_rc_infusion,
)
is not None
):
return None
if self.full_node.full_node_store.have_newer_signage_point(
new_sp.challenge_hash, new_sp.index_from_challenge, new_sp.last_rc_infusion
):
return None
if new_sp.index_from_challenge == 0 and new_sp.prev_challenge_hash is not None:
if self.full_node.full_node_store.get_sub_slot(new_sp.prev_challenge_hash) is None:
collected_eos = []
challenge_hash_to_request = new_sp.challenge_hash
last_rc = new_sp.last_rc_infusion
num_non_empty_sub_slots_seen = 0
for _ in range(30):
if num_non_empty_sub_slots_seen >= 3:
self.log.debug("Diverged from peer. Don't have the same blocks")
return None
# If this is an end of sub slot, and we don't have the prev, request the prev instead
# We want to catch up to the latest slot so we can receive signage points
full_node_request = full_node_protocol.RequestSignagePointOrEndOfSubSlot(
challenge_hash_to_request, uint8(0), last_rc
)
response = await peer.request_signage_point_or_end_of_sub_slot(full_node_request, timeout=10)
if not isinstance(response, full_node_protocol.RespondEndOfSubSlot):
self.full_node.log.debug(f"Invalid response for slot {response}")
return None
collected_eos.append(response)
if (
self.full_node.full_node_store.get_sub_slot(
response.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge
)
is not None
or response.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge
== self.full_node.constants.GENESIS_CHALLENGE
):
for eos in reversed(collected_eos):
await self.respond_end_of_sub_slot(eos, peer)
return None
if (
response.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.number_of_iterations
!= response.end_of_slot_bundle.reward_chain.end_of_slot_vdf.number_of_iterations
):
num_non_empty_sub_slots_seen += 1
challenge_hash_to_request = (
response.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge
)
last_rc = response.end_of_slot_bundle.reward_chain.end_of_slot_vdf.challenge
self.full_node.log.warning("Failed to catch up in sub-slots")
return None
if new_sp.index_from_challenge > 0:
if (
new_sp.challenge_hash != self.full_node.constants.GENESIS_CHALLENGE
and self.full_node.full_node_store.get_sub_slot(new_sp.challenge_hash) is None
):
# If this is a normal signage point,, and we don't have the end of sub slot, request the end of sub slot
full_node_request = full_node_protocol.RequestSignagePointOrEndOfSubSlot(
new_sp.challenge_hash, uint8(0), new_sp.last_rc_infusion
)
return make_msg(ProtocolMessageTypes.request_signage_point_or_end_of_sub_slot, full_node_request)
# Otherwise (we have the prev or the end of sub slot), request it normally
full_node_request = full_node_protocol.RequestSignagePointOrEndOfSubSlot(
new_sp.challenge_hash, new_sp.index_from_challenge, new_sp.last_rc_infusion
)
return make_msg(ProtocolMessageTypes.request_signage_point_or_end_of_sub_slot, full_node_request)
@api_request
async def request_signage_point_or_end_of_sub_slot(
self, request: full_node_protocol.RequestSignagePointOrEndOfSubSlot
) -> Optional[Message]:
if request.index_from_challenge == 0:
sub_slot: Optional[Tuple[EndOfSubSlotBundle, int, uint128]] = self.full_node.full_node_store.get_sub_slot(
request.challenge_hash
)
if sub_slot is not None:
return make_msg(
ProtocolMessageTypes.respond_end_of_sub_slot,
full_node_protocol.RespondEndOfSubSlot(sub_slot[0]),
)
else:
if self.full_node.full_node_store.get_sub_slot(request.challenge_hash) is None:
if request.challenge_hash != self.full_node.constants.GENESIS_CHALLENGE:
self.log.info(f"Don't have challenge hash {request.challenge_hash}")
sp: Optional[SignagePoint] = self.full_node.full_node_store.get_signage_point_by_index(
request.challenge_hash,
request.index_from_challenge,
request.last_rc_infusion,
)
if sp is not None:
assert (
sp.cc_vdf is not None
and sp.cc_proof is not None
and sp.rc_vdf is not None
and sp.rc_proof is not None
)
full_node_response = full_node_protocol.RespondSignagePoint(
request.index_from_challenge,
sp.cc_vdf,
sp.cc_proof,
sp.rc_vdf,
sp.rc_proof,
)
return make_msg(ProtocolMessageTypes.respond_signage_point, full_node_response)
else:
self.log.info(f"Don't have signage point {request}")
return None
@peer_required
@api_request
async def respond_signage_point(
self, request: full_node_protocol.RespondSignagePoint, peer: ws.WSstaicoinConnection
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
async with self.full_node.timelord_lock:
# Already have signage point
if self.full_node.full_node_store.have_newer_signage_point(
request.challenge_chain_vdf.challenge,
request.index_from_challenge,
request.reward_chain_vdf.challenge,
):
return None
existing_sp = self.full_node.full_node_store.get_signage_point(
request.challenge_chain_vdf.output.get_hash()
)
if existing_sp is not None and existing_sp.rc_vdf == request.reward_chain_vdf:
return None
peak = self.full_node.blockchain.get_peak()
if peak is not None and peak.height > self.full_node.constants.MAX_SUB_SLOT_BLOCKS:
next_sub_slot_iters = self.full_node.blockchain.get_next_slot_iters(peak.header_hash, True)
sub_slots_for_peak = await self.full_node.blockchain.get_sp_and_ip_sub_slots(peak.header_hash)
assert sub_slots_for_peak is not None
ip_sub_slot: Optional[EndOfSubSlotBundle] = sub_slots_for_peak[1]
else:
sub_slot_iters = self.full_node.constants.SUB_SLOT_ITERS_STARTING
next_sub_slot_iters = sub_slot_iters
ip_sub_slot = None
added = self.full_node.full_node_store.new_signage_point(
request.index_from_challenge,
self.full_node.blockchain,
self.full_node.blockchain.get_peak(),
next_sub_slot_iters,
SignagePoint(
request.challenge_chain_vdf,
request.challenge_chain_proof,
request.reward_chain_vdf,
request.reward_chain_proof,
),
)
if added:
await self.full_node.signage_point_post_processing(request, peer, ip_sub_slot)
else:
self.log.debug(
f"Signage point {request.index_from_challenge} not added, CC challenge: "
f"{request.challenge_chain_vdf.challenge}, RC challenge: {request.reward_chain_vdf.challenge}"
)
return None
@peer_required
@api_request
async def respond_end_of_sub_slot(
self, request: full_node_protocol.RespondEndOfSubSlot, peer: ws.WSstaicoinConnection
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
msg, _ = await self.full_node.respond_end_of_sub_slot(request, peer)
return msg
@peer_required
@api_request
async def request_mempool_transactions(
self,
request: full_node_protocol.RequestMempoolTransactions,
peer: ws.WSstaicoinConnection,
) -> Optional[Message]:
received_filter = PyBIP158(bytearray(request.filter))
items: List[MempoolItem] = await self.full_node.mempool_manager.get_items_not_in_filter(received_filter)
for item in items:
transaction = full_node_protocol.RespondTransaction(item.spend_bundle)
msg = make_msg(ProtocolMessageTypes.respond_transaction, transaction)
await peer.send_message(msg)
return None
# FARMER PROTOCOL
@api_request
@peer_required
async def declare_proof_of_space(
self, request: farmer_protocol.DeclareProofOfSpace, peer: ws.WSstaicoinConnection
) -> Optional[Message]:
"""
Creates a block body and header, with the proof of space, coinbase, and fee targets provided
by the farmer, and sends the hash of the header data back to the farmer.
"""
if self.full_node.sync_store.get_sync_mode():
return None
async with self.full_node.timelord_lock:
sp_vdfs: Optional[SignagePoint] = self.full_node.full_node_store.get_signage_point(
request.challenge_chain_sp
)
if sp_vdfs is None:
self.log.warning(f"Received proof of space for an unknown signage point {request.challenge_chain_sp}")
return None
if request.signage_point_index > 0:
assert sp_vdfs.rc_vdf is not None
if sp_vdfs.rc_vdf.output.get_hash() != request.reward_chain_sp:
self.log.debug(
f"Received proof of space for a potentially old signage point {request.challenge_chain_sp}. "
f"Current sp: {sp_vdfs.rc_vdf.output.get_hash()}"
)
return None
if request.signage_point_index == 0:
cc_challenge_hash: bytes32 = request.challenge_chain_sp
else:
assert sp_vdfs.cc_vdf is not None
cc_challenge_hash = sp_vdfs.cc_vdf.challenge
pos_sub_slot: Optional[Tuple[EndOfSubSlotBundle, int, uint128]] = None
if request.challenge_hash != self.full_node.constants.GENESIS_CHALLENGE:
# Checks that the proof of space is a response to a recent challenge and valid SP
pos_sub_slot = self.full_node.full_node_store.get_sub_slot(cc_challenge_hash)
if pos_sub_slot is None:
self.log.warning(f"Received proof of space for an unknown sub slot: {request}")
return None
total_iters_pos_slot: uint128 = pos_sub_slot[2]
else:
total_iters_pos_slot = uint128(0)
assert cc_challenge_hash == request.challenge_hash
# Now we know that the proof of space has a signage point either:
# 1. In the previous sub-slot of the peak (overflow)
# 2. In the same sub-slot as the peak
# 3. In a future sub-slot that we already know of
# Checks that the proof of space is valid
quality_string: Optional[bytes32] = request.proof_of_space.verify_and_get_quality_string(
self.full_node.constants, cc_challenge_hash, request.challenge_chain_sp
)
assert quality_string is not None and len(quality_string) == 32
# Grab best transactions from Mempool for given tip target
aggregate_signature: G2Element = G2Element()
block_generator: Optional[BlockGenerator] = None
additions: Optional[List[Coin]] = []
removals: Optional[List[Coin]] = []
async with self.full_node.blockchain.lock:
peak: Optional[BlockRecord] = self.full_node.blockchain.get_peak()
if peak is not None:
# Finds the last transaction block before this one
curr_l_tb: BlockRecord = peak
while not curr_l_tb.is_transaction_block:
curr_l_tb = self.full_node.blockchain.block_record(curr_l_tb.prev_hash)
try:
mempool_bundle = await self.full_node.mempool_manager.create_bundle_from_mempool(
curr_l_tb.header_hash
)
except Exception as e:
self.log.error(f"Traceback: {traceback.format_exc()}")
self.full_node.log.error(f"Error making spend bundle {e} peak: {peak}")
mempool_bundle = None
if mempool_bundle is not None:
spend_bundle = mempool_bundle[0]
additions = mempool_bundle[1]
removals = mempool_bundle[2]
self.full_node.log.info(f"Add rem: {len(additions)} {len(removals)}")
aggregate_signature = spend_bundle.aggregated_signature
if self.full_node.full_node_store.previous_generator is not None:
self.log.info(
f"Using previous generator for height "
f"{self.full_node.full_node_store.previous_generator}"
)
block_generator = best_solution_generator_from_template(
self.full_node.full_node_store.previous_generator, spend_bundle
)
else:
block_generator = simple_solution_generator(spend_bundle)
def get_plot_sig(to_sign, _) -> G2Element:
if to_sign == request.challenge_chain_sp:
return request.challenge_chain_sp_signature
elif to_sign == request.reward_chain_sp:
return request.reward_chain_sp_signature
return G2Element()
def get_pool_sig(_1, _2) -> Optional[G2Element]:
return request.pool_signature
prev_b: Optional[BlockRecord] = self.full_node.blockchain.get_peak()
# Finds the previous block from the signage point, ensuring that the reward chain VDF is correct
if prev_b is not None:
if request.signage_point_index == 0:
if pos_sub_slot is None:
self.log.warning("Pos sub slot is None")
return None
rc_challenge = pos_sub_slot[0].reward_chain.end_of_slot_vdf.challenge
else:
assert sp_vdfs.rc_vdf is not None
rc_challenge = sp_vdfs.rc_vdf.challenge
# Backtrack through empty sub-slots
for eos, _, _ in reversed(self.full_node.full_node_store.finished_sub_slots):
if eos is not None and eos.reward_chain.get_hash() == rc_challenge:
rc_challenge = eos.reward_chain.end_of_slot_vdf.challenge
found = False
attempts = 0
while prev_b is not None and attempts < 10:
if prev_b.reward_infusion_new_challenge == rc_challenge:
found = True
break
if prev_b.finished_reward_slot_hashes is not None and len(prev_b.finished_reward_slot_hashes) > 0:
if prev_b.finished_reward_slot_hashes[-1] == rc_challenge:
# This block includes a sub-slot which is where our SP vdf starts. Go back one more
# to find the prev block
prev_b = self.full_node.blockchain.try_block_record(prev_b.prev_hash)
found = True
break
prev_b = self.full_node.blockchain.try_block_record(prev_b.prev_hash)
attempts += 1
if not found:
self.log.warning("Did not find a previous block with the correct reward chain hash")
return None
try:
finished_sub_slots: Optional[
List[EndOfSubSlotBundle]
] = self.full_node.full_node_store.get_finished_sub_slots(
self.full_node.blockchain, prev_b, cc_challenge_hash
)
if finished_sub_slots is None:
return None
if (
len(finished_sub_slots) > 0
and pos_sub_slot is not None
and finished_sub_slots[-1] != pos_sub_slot[0]
):
self.log.error("Have different sub-slots than is required to farm this block")
return None
except ValueError as e:
self.log.warning(f"Value Error: {e}")
return None
if prev_b is None:
pool_target = PoolTarget(
self.full_node.constants.GENESIS_PRE_FARM_POOL_PUZZLE_HASH,
uint32(0),
)
farmer_ph = self.full_node.constants.GENESIS_PRE_FARM_FARMER_PUZZLE_HASH
officialwallets_ph = self.full_node.constants.GENESIS_PRE_FARM_OFFICIALWALLETS_PUZZLE_HASH
else:
farmer_ph = request.farmer_puzzle_hash
officialwallets_ph = self.full_node.constants.GENESIS_PRE_FARM_OFFICIALWALLETS_PUZZLE_HASH
if request.proof_of_space.pool_contract_puzzle_hash is not None:
pool_target = PoolTarget(request.proof_of_space.pool_contract_puzzle_hash, uint32(0))
else:
assert request.pool_target is not None
pool_target = request.pool_target
if peak is None or peak.height <= self.full_node.constants.MAX_SUB_SLOT_BLOCKS:
difficulty = self.full_node.constants.DIFFICULTY_STARTING
sub_slot_iters = self.full_node.constants.SUB_SLOT_ITERS_STARTING
else:
difficulty = uint64(peak.weight - self.full_node.blockchain.block_record(peak.prev_hash).weight)
sub_slot_iters = peak.sub_slot_iters
for sub_slot in finished_sub_slots:
if sub_slot.challenge_chain.new_difficulty is not None:
difficulty = sub_slot.challenge_chain.new_difficulty
if sub_slot.challenge_chain.new_sub_slot_iters is not None:
sub_slot_iters = sub_slot.challenge_chain.new_sub_slot_iters
required_iters: uint64 = calculate_iterations_quality(
self.full_node.constants.DIFFICULTY_CONSTANT_FACTOR,
quality_string,
request.proof_of_space.size,
difficulty,
request.challenge_chain_sp,
)
sp_iters: uint64 = calculate_sp_iters(self.full_node.constants, sub_slot_iters, request.signage_point_index)
ip_iters: uint64 = calculate_ip_iters(
self.full_node.constants,
sub_slot_iters,
request.signage_point_index,
required_iters,
)
# The block's timestamp must be greater than the previous transaction block's timestamp
timestamp = uint64(int(time.time()))
curr: Optional[BlockRecord] = prev_b
while curr is not None and not curr.is_transaction_block and curr.height != 0:
curr = self.full_node.blockchain.try_block_record(curr.prev_hash)
if curr is not None:
assert curr.timestamp is not None
if timestamp <= curr.timestamp:
timestamp = uint64(int(curr.timestamp + 1))
self.log.info("Starting to make the unfinished block")
unfinished_block: UnfinishedBlock = create_unfinished_block(
self.full_node.constants,
total_iters_pos_slot,
sub_slot_iters,
request.signage_point_index,
sp_iters,
ip_iters,
request.proof_of_space,
cc_challenge_hash,
farmer_ph,
officialwallets_ph,
pool_target,
get_plot_sig,
get_pool_sig,
sp_vdfs,
timestamp,
self.full_node.blockchain,
b"",
block_generator,
aggregate_signature,
additions,
removals,
prev_b,
finished_sub_slots,
)
self.log.info("Made the unfinished block")
if prev_b is not None:
height: uint32 = uint32(prev_b.height + 1)
else:
height = uint32(0)
self.full_node.full_node_store.add_candidate_block(quality_string, height, unfinished_block)
foliage_sb_data_hash = unfinished_block.foliage.foliage_block_data.get_hash()
if unfinished_block.is_transaction_block():
foliage_transaction_block_hash = unfinished_block.foliage.foliage_transaction_block_hash
else:
foliage_transaction_block_hash = bytes([0] * 32)
message = farmer_protocol.RequestSignedValues(
quality_string,
foliage_sb_data_hash,
foliage_transaction_block_hash,
)
await peer.send_message(make_msg(ProtocolMessageTypes.request_signed_values, message))
# Adds backup in case the first one fails
if unfinished_block.is_transaction_block() and unfinished_block.transactions_generator is not None:
unfinished_block_backup = create_unfinished_block(
self.full_node.constants,
total_iters_pos_slot,
sub_slot_iters,
request.signage_point_index,
sp_iters,
ip_iters,
request.proof_of_space,
cc_challenge_hash,
farmer_ph,
officialwallets_ph,
pool_target,
get_plot_sig,
get_pool_sig,
sp_vdfs,
timestamp,
self.full_node.blockchain,
b"",
None,
G2Element(),
None,
None,
prev_b,
finished_sub_slots,
)
self.full_node.full_node_store.add_candidate_block(
quality_string, height, unfinished_block_backup, backup=True
)
return None
@api_request
@peer_required
async def signed_values(
self, farmer_request: farmer_protocol.SignedValues, peer: ws.WSstaicoinConnection
) -> Optional[Message]:
"""
Signature of header hash, by the harvester. This is enough to create an unfinished
block, which only needs a Proof of Time to be finished. If the signature is valid,
we call the unfinished_block routine.
"""
candidate_tuple: Optional[Tuple[uint32, UnfinishedBlock]] = self.full_node.full_node_store.get_candidate_block(
farmer_request.quality_string
)
if candidate_tuple is None:
self.log.warning(f"Quality string {farmer_request.quality_string} not found in database")
return None
height, candidate = candidate_tuple
if not AugSchemeMPL.verify(
candidate.reward_chain_block.proof_of_space.plot_public_key,
candidate.foliage.foliage_block_data.get_hash(),
farmer_request.foliage_block_data_signature,
):
self.log.warning("Signature not valid. There might be a collision in plots. Ignore this during tests.")
return None
fsb2 = dataclasses.replace(
candidate.foliage,
foliage_block_data_signature=farmer_request.foliage_block_data_signature,
)
if candidate.is_transaction_block():
fsb2 = dataclasses.replace(
fsb2, foliage_transaction_block_signature=farmer_request.foliage_transaction_block_signature
)
new_candidate = dataclasses.replace(candidate, foliage=fsb2)
if not self.full_node.has_valid_pool_sig(new_candidate):
self.log.warning("Trying to make a pre-farm block but height is not 0")
return None
# Propagate to ourselves (which validates and does further propagations)
request = full_node_protocol.RespondUnfinishedBlock(new_candidate)
try:
await self.full_node.respond_unfinished_block(request, None, True)
except Exception as e:
# If we have an error with this block, try making an empty block
self.full_node.log.error(f"Error farming block {e} {request}")
candidate_tuple = self.full_node.full_node_store.get_candidate_block(
farmer_request.quality_string, backup=True
)
if candidate_tuple is not None:
height, unfinished_block = candidate_tuple
self.full_node.full_node_store.add_candidate_block(
farmer_request.quality_string, height, unfinished_block, False
)
message = farmer_protocol.RequestSignedValues(
farmer_request.quality_string,
unfinished_block.foliage.foliage_block_data.get_hash(),
unfinished_block.foliage.foliage_transaction_block_hash,
)
await peer.send_message(make_msg(ProtocolMessageTypes.request_signed_values, message))
return None
# TIMELORD PROTOCOL
@peer_required
@api_request
async def new_infusion_point_vdf(
self, request: timelord_protocol.NewInfusionPointVDF, peer: ws.WSstaicoinConnection
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
# Lookup unfinished blocks
async with self.full_node.timelord_lock:
return await self.full_node.new_infusion_point_vdf(request, peer)
@peer_required
@api_request
async def new_signage_point_vdf(
self, request: timelord_protocol.NewSignagePointVDF, peer: ws.WSstaicoinConnection
) -> None:
if self.full_node.sync_store.get_sync_mode():
return None
full_node_message = full_node_protocol.RespondSignagePoint(
request.index_from_challenge,
request.challenge_chain_sp_vdf,
request.challenge_chain_sp_proof,
request.reward_chain_sp_vdf,
request.reward_chain_sp_proof,
)
await self.respond_signage_point(full_node_message, peer)
@peer_required
@api_request
async def new_end_of_sub_slot_vdf(
self, request: timelord_protocol.NewEndOfSubSlotVDF, peer: ws.WSstaicoinConnection
) -> Optional[Message]:
if self.full_node.sync_store.get_sync_mode():
return None
if (
self.full_node.full_node_store.get_sub_slot(request.end_of_sub_slot_bundle.challenge_chain.get_hash())
is not None
):
return None
# Calls our own internal message to handle the end of sub slot, and potentially broadcasts to other peers.
full_node_message = full_node_protocol.RespondEndOfSubSlot(request.end_of_sub_slot_bundle)
msg, added = await self.full_node.respond_end_of_sub_slot(full_node_message, peer)
if not added:
self.log.error(
f"Was not able to add end of sub-slot: "
f"{request.end_of_sub_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge}. "
f"Re-sending new-peak to timelord"
)
await self.full_node.send_peak_to_timelords(peer=peer)
return None
else:
return msg
@api_request
async def request_block_header(self, request: wallet_protocol.RequestBlockHeader) -> Optional[Message]:
header_hash = self.full_node.blockchain.height_to_hash(request.height)
if header_hash is None:
msg = make_msg(ProtocolMessageTypes.reject_header_request, RejectHeaderRequest(request.height))
return msg
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(header_hash)
if block is not None:
tx_removals, tx_additions = await self.full_node.blockchain.get_tx_removals_and_additions(block)
header_block = get_block_header(block, tx_additions, tx_removals)
msg = make_msg(
ProtocolMessageTypes.respond_block_header,
wallet_protocol.RespondBlockHeader(header_block),
)
return msg
return None
@api_request
async def request_additions(self, request: wallet_protocol.RequestAdditions) -> Optional[Message]:
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(request.header_hash)
# We lock so that the coin store does not get modified
if (
block is None
or block.is_transaction_block() is False
or self.full_node.blockchain.height_to_hash(block.height) != request.header_hash
):
reject = wallet_protocol.RejectAdditionsRequest(request.height, request.header_hash)
msg = make_msg(ProtocolMessageTypes.reject_additions_request, reject)
return msg
assert block is not None and block.foliage_transaction_block is not None
# Note: this might return bad data if there is a reorg in this time
additions = await self.full_node.coin_store.get_coins_added_at_height(block.height)
if self.full_node.blockchain.height_to_hash(block.height) != request.header_hash:
raise ValueError(f"Block {block.header_hash} no longer in chain")
puzzlehash_coins_map: Dict[bytes32, List[Coin]] = {}
for coin_record in additions:
if coin_record.coin.puzzle_hash in puzzlehash_coins_map:
puzzlehash_coins_map[coin_record.coin.puzzle_hash].append(coin_record.coin)
else:
puzzlehash_coins_map[coin_record.coin.puzzle_hash] = [coin_record.coin]
coins_map: List[Tuple[bytes32, List[Coin]]] = []
proofs_map: List[Tuple[bytes32, bytes, Optional[bytes]]] = []
if request.puzzle_hashes is None:
for puzzle_hash, coins in puzzlehash_coins_map.items():
coins_map.append((puzzle_hash, coins))
response = wallet_protocol.RespondAdditions(block.height, block.header_hash, coins_map, None)
else:
# Create addition Merkle set
addition_merkle_set = MerkleSet()
# Addition Merkle set contains puzzlehash and hash of all coins with that puzzlehash
for puzzle, coins in puzzlehash_coins_map.items():
addition_merkle_set.add_already_hashed(puzzle)
addition_merkle_set.add_already_hashed(hash_coin_list(coins))
assert addition_merkle_set.get_root() == block.foliage_transaction_block.additions_root
for puzzle_hash in request.puzzle_hashes:
result, proof = addition_merkle_set.is_included_already_hashed(puzzle_hash)
if puzzle_hash in puzzlehash_coins_map:
coins_map.append((puzzle_hash, puzzlehash_coins_map[puzzle_hash]))
hash_coin_str = hash_coin_list(puzzlehash_coins_map[puzzle_hash])
result_2, proof_2 = addition_merkle_set.is_included_already_hashed(hash_coin_str)
assert result
assert result_2
proofs_map.append((puzzle_hash, proof, proof_2))
else:
coins_map.append((puzzle_hash, []))
assert not result
proofs_map.append((puzzle_hash, proof, None))
response = wallet_protocol.RespondAdditions(block.height, block.header_hash, coins_map, proofs_map)
msg = make_msg(ProtocolMessageTypes.respond_additions, response)
return msg
@api_request
async def request_removals(self, request: wallet_protocol.RequestRemovals) -> Optional[Message]:
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(request.header_hash)
# We lock so that the coin store does not get modified
if (
block is None
or block.is_transaction_block() is False
or block.height != request.height
or block.height > self.full_node.blockchain.get_peak_height()
or self.full_node.blockchain.height_to_hash(block.height) != request.header_hash
):
reject = wallet_protocol.RejectRemovalsRequest(request.height, request.header_hash)
msg = make_msg(ProtocolMessageTypes.reject_removals_request, reject)
return msg
assert block is not None and block.foliage_transaction_block is not None
# Note: this might return bad data if there is a reorg in this time
all_removals: List[CoinRecord] = await self.full_node.coin_store.get_coins_removed_at_height(block.height)
if self.full_node.blockchain.height_to_hash(block.height) != request.header_hash:
raise ValueError(f"Block {block.header_hash} no longer in chain")
all_removals_dict: Dict[bytes32, Coin] = {}
for coin_record in all_removals:
all_removals_dict[coin_record.coin.name()] = coin_record.coin
coins_map: List[Tuple[bytes32, Optional[Coin]]] = []
proofs_map: List[Tuple[bytes32, bytes]] = []
# If there are no transactions, respond with empty lists
if block.transactions_generator is None:
proofs: Optional[List]
if request.coin_names is None:
proofs = None
else:
proofs = []
response = wallet_protocol.RespondRemovals(block.height, block.header_hash, [], proofs)
elif request.coin_names is None or len(request.coin_names) == 0:
for removed_name, removed_coin in all_removals_dict.items():
coins_map.append((removed_name, removed_coin))
response = wallet_protocol.RespondRemovals(block.height, block.header_hash, coins_map, None)
else:
assert block.transactions_generator
removal_merkle_set = MerkleSet()
for removed_name, removed_coin in all_removals_dict.items():
removal_merkle_set.add_already_hashed(removed_name)
assert removal_merkle_set.get_root() == block.foliage_transaction_block.removals_root
for coin_name in request.coin_names:
result, proof = removal_merkle_set.is_included_already_hashed(coin_name)
proofs_map.append((coin_name, proof))
if coin_name in all_removals_dict:
removed_coin = all_removals_dict[coin_name]
coins_map.append((coin_name, removed_coin))
assert result
else:
coins_map.append((coin_name, None))
assert not result
response = wallet_protocol.RespondRemovals(block.height, block.header_hash, coins_map, proofs_map)
msg = make_msg(ProtocolMessageTypes.respond_removals, response)
return msg
@api_request
async def send_transaction(self, request: wallet_protocol.SendTransaction) -> Optional[Message]:
spend_name = request.transaction.name()
status, error = await self.full_node.respond_transaction(request.transaction, spend_name)
error_name = error.name if error is not None else None
if status == MempoolInclusionStatus.SUCCESS:
response = wallet_protocol.TransactionAck(spend_name, uint8(status.value), error_name)
else:
# If if failed/pending, but it previously succeeded (in mempool), this is idempotence, return SUCCESS
if self.full_node.mempool_manager.get_spendbundle(spend_name) is not None:
response = wallet_protocol.TransactionAck(spend_name, uint8(MempoolInclusionStatus.SUCCESS.value), None)
else:
response = wallet_protocol.TransactionAck(spend_name, uint8(status.value), error_name)
msg = make_msg(ProtocolMessageTypes.transaction_ack, response)
return msg
@api_request
async def request_puzzle_solution(self, request: wallet_protocol.RequestPuzzleSolution) -> Optional[Message]:
coin_name = request.coin_name
height = request.height
coin_record = await self.full_node.coin_store.get_coin_record(coin_name)
reject = wallet_protocol.RejectPuzzleSolution(coin_name, height)
reject_msg = make_msg(ProtocolMessageTypes.reject_puzzle_solution, reject)
if coin_record is None or coin_record.spent_block_index != height:
return reject_msg
header_hash = self.full_node.blockchain.height_to_hash(height)
block: Optional[FullBlock] = await self.full_node.block_store.get_full_block(header_hash)
if block is None or block.transactions_generator is None:
return reject_msg
block_generator: Optional[BlockGenerator] = await self.full_node.blockchain.get_block_generator(block)
assert block_generator is not None
error, puzzle, solution = get_puzzle_and_solution_for_coin(
block_generator, coin_name, self.full_node.constants.MAX_BLOCK_COST_CLVM
)
if error is not None:
return reject_msg
pz = Program.to(puzzle)
sol = Program.to(solution)
wrapper = PuzzleSolutionResponse(coin_name, height, pz, sol)
response = wallet_protocol.RespondPuzzleSolution(wrapper)
response_msg = make_msg(ProtocolMessageTypes.respond_puzzle_solution, response)
return response_msg
@api_request
async def request_header_blocks(self, request: wallet_protocol.RequestHeaderBlocks) -> Optional[Message]:
if request.end_height < request.start_height or request.end_height - request.start_height > 32:
return None
header_hashes = []
for i in range(request.start_height, request.end_height + 1):
if not self.full_node.blockchain.contains_height(uint32(i)):
reject = RejectHeaderBlocks(request.start_height, request.end_height)
msg = make_msg(ProtocolMessageTypes.reject_header_blocks, reject)
return msg
header_hashes.append(self.full_node.blockchain.height_to_hash(uint32(i)))
blocks: List[FullBlock] = await self.full_node.block_store.get_blocks_by_hash(header_hashes)
header_blocks = []
for block in blocks:
added_coins_records = await self.full_node.coin_store.get_coins_added_at_height(block.height)
removed_coins_records = await self.full_node.coin_store.get_coins_removed_at_height(block.height)
added_coins = [record.coin for record in added_coins_records if not record.coinbase]
removal_names = [record.coin.name() for record in removed_coins_records]
header_block = get_block_header(block, added_coins, removal_names)
header_blocks.append(header_block)
msg = make_msg(
ProtocolMessageTypes.respond_header_blocks,
wallet_protocol.RespondHeaderBlocks(request.start_height, request.end_height, header_blocks),
)
return msg
@api_request
async def respond_compact_proof_of_time(self, request: timelord_protocol.RespondCompactProofOfTime):
if self.full_node.sync_store.get_sync_mode():
return None
await self.full_node.respond_compact_proof_of_time(request)
@execute_task
@peer_required
@api_request
async def new_compact_vdf(self, request: full_node_protocol.NewCompactVDF, peer: ws.WSstaicoinConnection):
if self.full_node.sync_store.get_sync_mode():
return None
# this semaphore will only allow a limited number of tasks call
# new_compact_vdf() at a time, since it can be expensive
async with self.full_node.compact_vdf_sem:
await self.full_node.new_compact_vdf(request, peer)
@peer_required
@api_request
async def request_compact_vdf(self, request: full_node_protocol.RequestCompactVDF, peer: ws.WSstaicoinConnection):
if self.full_node.sync_store.get_sync_mode():
return None
await self.full_node.request_compact_vdf(request, peer)
@peer_required
@api_request
async def respond_compact_vdf(self, request: full_node_protocol.RespondCompactVDF, peer: ws.WSstaicoinConnection):
if self.full_node.sync_store.get_sync_mode():
return None
await self.full_node.respond_compact_vdf(request, peer)
|
the-stack_106_20197
|
from __future__ import division
import numpy as np
from menpo.base import doc_inherit, name_of_callable
from menpo.math import pca, pcacov, ipca, as_matrix
from .linear import MeanLinearVectorModel
from .vectorizable import VectorizableBackedModel
class PCAVectorModel(MeanLinearVectorModel):
r"""
A :map:`MeanLinearModel` where components are Principal Components.
Principal Component Analysis (PCA) by eigenvalue decomposition of the
data's scatter matrix. For details of the implementation of PCA, see
:map:`pca`.
Parameters
----------
samples : `ndarray` or `list` or `iterable` of `ndarray`
List or iterable of numpy arrays to build the model from, or an
existing data matrix.
centre : `bool`, optional
When ``True`` (default) PCA is performed after mean centering the data.
If ``False`` the data is assumed to be centred, and the mean will be
``0``.
n_samples : `int`, optional
If provided then ``samples`` must be an iterator that yields
``n_samples``. If not provided then samples has to be a `list` (so we
know how large the data matrix needs to be).
max_n_components : `int`, optional
The maximum number of components to keep in the model. Any components
above and beyond this one are discarded.
inplace : `bool`, optional
If ``True`` the data matrix is modified in place. Otherwise, the data
matrix is copied.
"""
def __init__(self, samples, centre=True, n_samples=None,
max_n_components=None, inplace=True):
# Generate data matrix
data, self.n_samples = self._data_to_matrix(samples, n_samples)
# Compute pca
e_vectors, e_values, mean = pca(data, centre=centre, inplace=inplace)
# The call to __init__ of MeanLinearModel is done in here
self._constructor_helper(
eigenvalues=e_values, eigenvectors=e_vectors, mean=mean,
centred=centre, max_n_components=max_n_components)
@classmethod
def init_from_covariance_matrix(cls, C, mean, n_samples, centred=True,
is_inverse=False, max_n_components=None):
r"""
Build the Principal Component Analysis (PCA) by eigenvalue
decomposition of the provided covariance/scatter matrix. For details
of the implementation of PCA, see :map:`pcacov`.
Parameters
----------
C : ``(n_features, n_features)`` `ndarray` or `scipy.sparse`
The Covariance/Scatter matrix. If it is a precision matrix (inverse
covariance), then set `is_inverse=True`.
mean : ``(n_features, )`` `ndarray`
The mean vector.
n_samples : `int`
The number of samples used to generate the covariance matrix.
centred : `bool`, optional
When ``True`` we assume that the data were centered before
computing the covariance matrix.
is_inverse : `bool`, optional
It ``True``, then it is assumed that `C` is a precision matrix (
inverse covariance). Thus, the eigenvalues will be inverted. If
``False``, then it is assumed that `C` is a covariance matrix.
max_n_components : `int`, optional
The maximum number of components to keep in the model. Any
components above and beyond this one are discarded.
"""
# Compute pca on covariance
e_vectors, e_values = pcacov(C, is_inverse=is_inverse)
# Create new pca instance
model = cls.__new__(cls)
model.n_samples = n_samples
# The call to __init__ of MeanLinearModel is done in here
model._constructor_helper(
eigenvalues=e_values, eigenvectors=e_vectors, mean=mean,
centred=centred, max_n_components=max_n_components)
return model
@classmethod
def init_from_components(cls, components, eigenvalues, mean, n_samples,
centred, max_n_components=None):
r"""
Build the Principal Component Analysis (PCA) using the provided
components (eigenvectors) and eigenvalues.
Parameters
----------
components : ``(n_components, n_features)`` `ndarray`
The eigenvectors to be used.
eigenvalues : ``(n_components, )`` `ndarray`
The corresponding eigenvalues.
mean : ``(n_features, )`` `ndarray`
The mean vector.
n_samples : `int`
The number of samples used to generate the eigenvectors.
centred : `bool`
When ``True`` we assume that the data were centered before
computing the eigenvectors.
max_n_components : `int`, optional
The maximum number of components to keep in the model. Any
components above and beyond this one are discarded.
"""
# This is a bit of a filthy trick that by rights should not be done,
# but we want to have these nice static constructors so we are living
# with the shame (create an empty object instance which we fill in).
model = cls.__new__(cls)
model.n_samples = n_samples
# The call to __init__ of MeanLinearModel is done in here
model._constructor_helper(
eigenvalues=eigenvalues, eigenvectors=components, mean=mean,
centred=centred, max_n_components=max_n_components)
return model
def _constructor_helper(self, eigenvalues, eigenvectors, mean, centred,
max_n_components):
# if covariance is not centred, mean must be zeros.
if centred:
MeanLinearVectorModel.__init__(self, eigenvectors, mean)
else:
MeanLinearVectorModel.__init__(self, eigenvectors,
np.zeros(mean.shape, dtype=mean.dtype))
self.centred = centred
self._eigenvalues = eigenvalues
# start the active components as all the components
self._n_active_components = int(self.n_components)
self._trimmed_eigenvalues = np.array([])
if max_n_components is not None:
self.trim_components(max_n_components)
def _data_to_matrix(self, data, n_samples):
# build a data matrix from all the samples
if n_samples is None:
n_samples = len(data)
# Assumed data is ndarray of (n_samples, n_features) or list of samples
if not isinstance(data, np.ndarray):
# Make sure we have an array, slice of the number of requested
# samples
data = np.array(data)[:n_samples]
return data, n_samples
def __setstate__(self, state):
if 'mean_vector' in state:
state['_mean'] = state['mean_vector']
del state['mean_vector']
self.__dict__ = state
@property
def n_active_components(self):
r"""
The number of components currently in use on this model.
:type: `int`
"""
return self._n_active_components
@n_active_components.setter
def n_active_components(self, value):
r"""
Sets an updated number of active components on this model. The number
of active components represents the number of principal components
that will be used for generative purposes. Note that this therefore
makes the model stateful. Also note that setting the number of
components will not affect memory unless :meth:`trim_components`
is called.
Parameters
----------
value : `int`
The new number of active components.
Raises
------
ValueError
Tried setting n_active_components to {value} - value needs to be a
float 0.0 < n_components < self._total_kept_variance_ratio ({}) or
an integer 1 < n_components < self.n_components ({})
"""
err_str = ("Tried setting n_active_components to {} - "
"value needs to be a float "
"0.0 < n_components < self._total_kept_variance_ratio "
"({}) or an integer 1 < n_components < "
"self.n_components ({})".format(
value, self._total_variance_ratio(), self.n_components))
# check value
if isinstance(value, float):
if 0.0 < value <= self._total_variance_ratio():
# value needed to capture desired variance
value = np.sum(
[r < value
for r in self._total_eigenvalues_cumulative_ratio()]) + 1
else:
# variance must be bigger than 0.0
raise ValueError(err_str)
if isinstance(value, int):
if value < 1:
# at least 1 value must be kept
raise ValueError(err_str)
elif value >= self.n_components:
if self.n_active_components < self.n_components:
# if the number of available components is smaller than
# the total number of components set value to the later
value = self.n_components
else:
# if the previous is false and value bigger than the
# total number of components, do nothing
return
if 0 < value <= self.n_components:
self._n_active_components = int(value)
else:
raise ValueError(err_str)
@MeanLinearVectorModel.components.getter
def components(self):
r"""
Returns the active components of the model.
:type: ``(n_active_components, n_features)`` `ndarray`
"""
return self._components[:self.n_active_components, :]
@property
def eigenvalues(self):
r"""
Returns the eigenvalues associated with the active components of the
model, i.e. the amount of variance captured by each active component,
sorted form largest to smallest.
:type: ``(n_active_components,)`` `ndarray`
"""
return self._eigenvalues[:self.n_active_components]
def whitened_components(self):
r"""
Returns the active components of the model, whitened.
Returns
-------
whitened_components : ``(n_active_components, n_features)`` `ndarray`
The whitened components.
"""
return self.components / (
np.sqrt(self.eigenvalues * self.n_samples +
self.noise_variance())[:, None])
def original_variance(self):
r"""
Returns the total amount of variance captured by the original model,
i.e. the amount of variance present on the original samples.
Returns
-------
optional_variance : `float`
The variance captured by the model.
"""
return self._eigenvalues.sum() + self._trimmed_eigenvalues.sum()
def variance(self):
r"""
Returns the total amount of variance retained by the active
components.
Returns
-------
variance : `float`
Total variance captured by the active components.
"""
return self.eigenvalues.sum()
def _total_variance(self):
r"""
Returns the total amount of variance retained by all components
(active and inactive). Useful when the model has been trimmed.
Returns
-------
total_variance : `float`
Total variance captured by all components.
"""
return self._eigenvalues.sum()
def variance_ratio(self):
r"""
Returns the ratio between the amount of variance retained by the
active components and the total amount of variance present on the
original samples.
Returns
-------
variance_ratio : `float`
Ratio of active components variance and total variance present
in original samples.
"""
return self.variance() / self.original_variance()
def _total_variance_ratio(self):
r"""
Returns the ratio between the total amount of variance retained by
all components (active and inactive) and the total amount of variance
present on the original samples.
Returns
-------
total_variance_ratio : `float`
Ratio of total variance over the original variance.
"""
return self._total_variance() / self.original_variance()
def eigenvalues_ratio(self):
r"""
Returns the ratio between the variance captured by each active
component and the total amount of variance present on the original
samples.
Returns
-------
eigenvalues_ratio : ``(n_active_components,)`` `ndarray`
The active eigenvalues array scaled by the original variance.
"""
return self.eigenvalues / self.original_variance()
def _total_eigenvalues_ratio(self):
r"""
Returns the ratio between the variance captured by each active
component and the total amount of variance present on the original
samples.
Returns
-------
total_eigenvalues_ratio : ``(n_components,)`` `ndarray`
Array of eigenvalues scaled by the original variance.
"""
return self._eigenvalues / self.original_variance()
def eigenvalues_cumulative_ratio(self):
r"""
Returns the cumulative ratio between the variance captured by the
active components and the total amount of variance present on the
original samples.
Returns
-------
eigenvalues_cumulative_ratio : ``(n_active_components,)`` `ndarray`
Array of cumulative eigenvalues.
"""
return np.cumsum(self.eigenvalues_ratio())
def _total_eigenvalues_cumulative_ratio(self):
r"""
Returns the cumulative ratio between the variance captured by the
active components and the total amount of variance present on the
original samples.
Returns
-------
total_eigenvalues_cumulative_ratio : ``(n_active_components,)`` `ndarray`
Array of total cumulative eigenvalues.
"""
return np.cumsum(self._total_eigenvalues_ratio())
def noise_variance(self):
r"""
Returns the average variance captured by the inactive components,
i.e. the sample noise assumed in a Probabilistic PCA formulation.
If all components are active, then ``noise_variance == 0.0``.
Returns
-------
noise_variance : `float`
The mean variance of the inactive components.
"""
if self.n_active_components == self.n_components:
if self._trimmed_eigenvalues.size != 0:
noise_variance = self._trimmed_eigenvalues.mean()
else:
noise_variance = 0.0
else:
noise_variance = np.hstack(
(self._eigenvalues[self.n_active_components:],
self._trimmed_eigenvalues)).mean()
return noise_variance
def noise_variance_ratio(self):
r"""
Returns the ratio between the noise variance and the total amount of
variance present on the original samples.
Returns
-------
noise_variance_ratio : `float`
The ratio between the noise variance and the variance present
in the original samples.
"""
return self.noise_variance() / self.original_variance()
def inverse_noise_variance(self):
r"""
Returns the inverse of the noise variance.
Returns
-------
inverse_noise_variance : `float`
Inverse of the noise variance.
Raises
------
ValueError
If ``noise_variance() == 0``
"""
noise_variance = self.noise_variance()
if np.allclose(noise_variance, 0):
raise ValueError("noise variance is effectively 0 - "
"cannot take the inverse")
return 1.0 / noise_variance
def component(self, index, with_mean=True, scale=1.0):
r"""
A particular component of the model, in vectorized form.
Parameters
----------
index : `int`
The component that is to be returned
with_mean: `bool`, optional
If ``True``, the component will be blended with the mean vector
before being returned. If not, the component is returned on it's
own.
scale : `float`, optional
A scale factor that should be applied to the component. Only
valid in the case where with_mean is ``True``. The scale is applied
in units of standard deviations (so a scale of ``1.0``
`with_mean` visualizes the mean plus ``1`` std. dev of the component
in question).
Returns
-------
component_vector : ``(n_features,)`` `ndarray`
The component vector of the given index.
"""
if with_mean:
# on PCA, scale is in units of std. deviations...
scaled_eigval = scale * np.sqrt(self.eigenvalues[index])
return (scaled_eigval * self.components[index]) + self._mean
else:
return self.components[index]
def instance_vectors(self, weights, normalized_weights=False):
"""
Creates new vectorized instances of the model using the first
components in a particular weighting.
Parameters
----------
weights : ``(n_vectors, n_weights)`` `ndarray` or `list` of `lists`
The weightings for the first `n_weights` components that
should be used per instance that is to be produced
``weights[i, j]`` is the linear contribution of the j'th
principal component to the i'th instance vector produced. Note
that if ``n_weights < n_components``, only the first ``n_weight``
components are used in the reconstruction (i.e. unspecified
weights are implicitly ``0``).
normalized_weights : `bool`, optional
If ``True``, the weights are assumed to be normalized w.r.t the
eigenvalues. This can be easier to create unique instances by
making the weights more interpretable.
Returns
-------
vectors : ``(n_vectors, n_features)`` `ndarray`
The instance vectors for the weighting provided.
Raises
------
ValueError
If n_weights > n_components
"""
weights = np.asarray(weights) # if eg a list is provided
n_instances, n_weights = weights.shape
if n_weights > self.n_active_components:
raise ValueError(
"Number of weightings cannot be greater than {}".format(
self.n_active_components))
else:
full_weights = np.zeros((n_instances, self.n_active_components),
dtype=self._components.dtype)
full_weights[..., :n_weights] = weights
weights = full_weights
if normalized_weights:
# If the weights were normalized, then they are all relative to
# to the scale of the eigenvalues and thus must be multiplied by
# the sqrt of the eigenvalues.
weights *= self.eigenvalues ** 0.5
return self._instance_vectors_for_full_weights(weights)
def instance(self, weights, normalized_weights=False):
r"""
Creates a new vector instance of the model by weighting together the
components.
Parameters
----------
weights : ``(n_weights,)`` `ndarray` or `list`
The weightings for the first `n_weights` components that should be
used.
``weights[j]`` is the linear contribution of the j'th principal
component to the instance vector.
normalized_weights : `bool`, optional
If ``True``, the weights are assumed to be normalized w.r.t the
eigenvalues. This can be easier to create unique instances by
making the weights more interpretable.
Returns
-------
vector : ``(n_features,)`` `ndarray`
The instance vector for the weighting provided.
"""
weights = np.asarray(weights)
return self.instance_vectors(
weights[None, :], normalized_weights=normalized_weights).flatten()
def trim_components(self, n_components=None):
r"""
Permanently trims the components down to a certain amount. The number of
active components will be automatically reset to this particular value.
This will reduce `self.n_components` down to `n_components`
(if ``None``, `self.n_active_components` will be used), freeing up
memory in the process.
Once the model is trimmed, the trimmed components cannot be recovered.
Parameters
----------
n_components: `int` >= ``1`` or `float` > ``0.0`` or ``None``, optional
The number of components that are kept or else the amount (ratio)
of variance that is kept. If ``None``, `self.n_active_components` is
used.
Notes
-----
In case `n_components` is greater than the total number of components or
greater than the amount of variance currently kept, this method does
not perform any action.
"""
if n_components is None:
# by default trim using the current n_active_components
n_components = self.n_active_components
# set self.n_active_components to n_components
self.n_active_components = n_components
if self.n_active_components < self.n_components:
# Just stored so that we can fit < 80 chars
nac = self.n_active_components
# set self.n_components to n_components. We have to copy to ensure
# that the data is actually removed, otherwise a view is returned
self._components = self._components[:nac].copy()
# store the eigenvalues associated to the discarded components
self._trimmed_eigenvalues = np.hstack((
self._trimmed_eigenvalues,
self._eigenvalues[self.n_active_components:]))
# make sure that the eigenvalues are trimmed too
self._eigenvalues = self._eigenvalues[:nac].copy()
def project_whitened(self, vector_instance):
"""
Projects the `vector_instance` onto the whitened components,
retrieving the whitened linear weightings.
Parameters
----------
vector_instance : ``(n_features,)`` `ndarray`
A novel vector.
Returns
-------
projected : ``(n_features,)`` `ndarray`
A vector of whitened linear weightings
"""
whitened_components = self.whitened_components()
return np.dot(vector_instance, whitened_components.T)
def orthonormalize_against_inplace(self, linear_model):
r"""
Enforces that the union of this model's components and another are
both mutually orthonormal.
Note that the model passed in is guaranteed to not have it's number
of available components changed. This model, however, may loose some
dimensionality due to reaching a degenerate state.
The removed components will always be trimmed from the end of
components (i.e. the components which capture the least variance).
If trimming is performed, `n_components` and `n_available_components`
would be altered - see :meth:`trim_components` for details.
Parameters
----------
linear_model : :map:`LinearModel`
A second linear model to orthonormalize this against.
"""
# take the QR decomposition of the model components
Q = (np.linalg.qr(np.hstack((linear_model._components.T,
self._components.T)))[0]).T
# the model passed to us went first, so all it's components will
# survive. Pull them off, and update the other model.
linear_model.components = Q[:linear_model.n_components, :]
# it's possible that all of our components didn't survive due to
# degeneracy. We need to trim our components down before replacing
# them to ensure the number of components is consistent (otherwise
# the components setter will complain at us)
n_available_components = Q.shape[0] - linear_model.n_components
if n_available_components < self.n_components:
# oh dear, we've lost some components from the end of our model.
if self.n_active_components < n_available_components:
# save the current number of active components
n_active_components = self.n_active_components
else:
# save the current number of available components
n_active_components = n_available_components
# call trim_components to update our state.
self.trim_components(n_components=n_available_components)
if n_active_components < n_available_components:
# reset the number of active components
self.n_active_components = n_active_components
# now we can set our own components with the updated orthogonal ones
self.components = Q[linear_model.n_components:, :]
def increment(self, data, n_samples=None, forgetting_factor=1.0,
verbose=False):
r"""
Update the eigenvectors, eigenvalues and mean vector of this model
by performing incremental PCA on the given samples.
Parameters
----------
samples : `list` of :map:`Vectorizable`
List of new samples to update the model from.
n_samples : `int`, optional
If provided then ``samples`` must be an iterator that yields
``n_samples``. If not provided then samples has to be a
list (so we know how large the data matrix needs to be).
forgetting_factor : ``[0.0, 1.0]`` `float`, optional
Forgetting factor that weights the relative contribution of new
samples vs old samples. If 1.0, all samples are weighted equally
and, hence, the results is the exact same as performing batch
PCA on the concatenated list of old and new simples. If <1.0,
more emphasis is put on the new samples. See [1] for details.
References
----------
.. [1] David Ross, Jongwoo Lim, Ruei-Sung Lin, Ming-Hsuan Yang.
"Incremental Learning for Robust Visual Tracking". IJCV, 2007.
"""
data, n_new_samples = self._data_to_matrix(data, n_samples)
# compute incremental pca
e_vectors, e_values, m_vector = ipca(
data, self._components, self._eigenvalues, self.n_samples,
m_a=self._mean, f=forgetting_factor)
# if the number of active components is the same as the total number
# of components so it will be after this method is executed
reset = (self.n_active_components == self.n_components)
# update mean, components, eigenvalues and number of samples
self._mean = m_vector
self._components = e_vectors
self._eigenvalues = e_values
self.n_samples += n_new_samples
# reset the number of active components to the total number of
# components
if reset:
self.n_active_components = self.n_components
def plot_eigenvalues(self, figure_id=None, new_figure=False,
render_lines=True, line_colour='b', line_style='-',
line_width=2, render_markers=True, marker_style='o',
marker_size=6, marker_face_colour='b',
marker_edge_colour='k', marker_edge_width=1.,
render_axes=True, axes_font_name='sans-serif',
axes_font_size=10, axes_font_style='normal',
axes_font_weight='normal', figure_size=(10, 6),
render_grid=True, grid_line_style='--',
grid_line_width=0.5):
r"""
Plot of the eigenvalues.
Parameters
----------
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
render_lines : `bool`, optional
If ``True``, the line will be rendered.
line_colour : See Below, optional
The colour of the lines.
Example options ::
{``r``, ``g``, ``b``, ``c``, ``m``, ``k``, ``w``}
or
``(3, )`` `ndarray`
or
`list` of length ``3``
line_style : {``-``, ``--``, ``-.``, ``:``}, optional
The style of the lines.
line_width : `float`, optional
The width of the lines.
render_markers : `bool`, optional
If ``True``, the markers will be rendered.
marker_style : See Below, optional
The style of the markers.
Example options ::
{``.``, ``,``, ``o``, ``v``, ``^``, ``<``, ``>``, ``+``,
``x``, ``D``, ``d``, ``s``, ``p``, ``*``, ``h``, ``H``,
``1``, ``2``, ``3``, ``4``, ``8``}
marker_size : `int`, optional
The size of the markers in points.
marker_face_colour : See Below, optional
The face (filling) colour of the markers.
Example options ::
{``r``, ``g``, ``b``, ``c``, ``m``, ``k``, ``w``}
or
``(3, )`` `ndarray`
or
`list` of length ``3``
marker_edge_colour : See Below, optional
The edge colour of the markers.
Example options ::
{``r``, ``g``, ``b``, ``c``, ``m``, ``k``, ``w``}
or
``(3, )`` `ndarray`
or
`list` of length ``3``
marker_edge_width : `float`, optional
The width of the markers' edge.
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : See Below, optional
The font of the axes.
Example options ::
{``serif``, ``sans-serif``, ``cursive``, ``fantasy``,
``monospace``}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : {``normal``, ``italic``, ``oblique``}, optional
The font style of the axes.
axes_font_weight : See Below, optional
The font weight of the axes.
Example options ::
{``ultralight``, ``light``, ``normal``, ``regular``,
``book``, ``medium``, ``roman``, ``semibold``,
``demibold``, ``demi``, ``bold``, ``heavy``,
``extra bold``, ``black``}
figure_size : (`float`, `float`) or ``None``, optional
The size of the figure in inches.
render_grid : `bool`, optional
If ``True``, the grid will be rendered.
grid_line_style : {``-``, ``--``, ``-.``, ``:``}, optional
The style of the grid lines.
grid_line_width : `float`, optional
The width of the grid lines.
Returns
-------
viewer : :map:`MatplotlibRenderer`
The viewer object.
"""
from menpo.visualize import plot_curve
return plot_curve(
range(self.n_active_components), [self.eigenvalues],
figure_id=figure_id, new_figure=new_figure, legend_entries=None,
title='Eigenvalues', x_label='Component Number',
y_label='Eigenvalue',
axes_x_limits=[0, self.n_active_components - 1],
axes_y_limits=None, axes_x_ticks=None, axes_y_ticks=None,
render_lines=render_lines, line_colour=line_colour,
line_style=line_style, line_width=line_width,
render_markers=render_markers, marker_style=marker_style,
marker_size=marker_size, marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=marker_edge_width, render_legend=False,
render_axes=render_axes, axes_font_name=axes_font_name,
axes_font_size=axes_font_size, axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight, figure_size=figure_size,
render_grid=render_grid, grid_line_style=grid_line_style,
grid_line_width=grid_line_width)
def plot_eigenvalues_widget(self, figure_size=(10, 6), style='coloured'):
r"""
Plot of the eigenvalues using an interactive widget.
Parameters
----------
figure_size : (`float`, `float`) or ``None``, optional
The size of the figure in inches.
style : {``'coloured'``, ``'minimal'``}, optional
If ``'coloured'``, then the style of the widget will be coloured. If
``minimal``, then the style is simple using black and white colours.
"""
try:
from menpowidgets import plot_graph
except ImportError as e:
from menpo.visualize.base import MenpowidgetsMissingError
raise MenpowidgetsMissingError(e)
plot_graph(x_axis=range(self.n_active_components),
y_axis=[self.eigenvalues], legend_entries=['Eigenvalues'],
figure_size=figure_size, style=style)
def plot_eigenvalues_ratio(self, figure_id=None, new_figure=False,
render_lines=True, line_colour='b',
line_style='-', line_width=2,
render_markers=True, marker_style='o',
marker_size=6, marker_face_colour='b',
marker_edge_colour='k', marker_edge_width=1.,
render_axes=True, axes_font_name='sans-serif',
axes_font_size=10, axes_font_style='normal',
axes_font_weight='normal', figure_size=(10, 6),
render_grid=True, grid_line_style='--',
grid_line_width=0.5):
r"""
Plot of the variance ratio captured by the eigenvalues.
Parameters
----------
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
render_lines : `bool`, optional
If ``True``, the line will be rendered.
line_colour : See Below, optional
The colour of the lines.
Example options ::
{``r``, ``g``, ``b``, ``c``, ``m``, ``k``, ``w``}
or
``(3, )`` `ndarray`
or
`list` of length ``3``
line_style : {``-``, ``--``, ``-.``, ``:``}, optional
The style of the lines.
line_width : `float`, optional
The width of the lines.
render_markers : `bool`, optional
If ``True``, the markers will be rendered.
marker_style : See Below, optional
The style of the markers.
Example options ::
{``.``, ``,``, ``o``, ``v``, ``^``, ``<``, ``>``, ``+``,
``x``, ``D``, ``d``, ``s``, ``p``, ``*``, ``h``, ``H``,
``1``, ``2``, ``3``, ``4``, ``8``}
marker_size : `int`, optional
The size of the markers in points.
marker_face_colour : See Below, optional
The face (filling) colour of the markers.
Example options ::
{``r``, ``g``, ``b``, ``c``, ``m``, ``k``, ``w``}
or
``(3, )`` `ndarray`
or
`list` of length ``3``
marker_edge_colour : See Below, optional
The edge colour of the markers.
Example options ::
{``r``, ``g``, ``b``, ``c``, ``m``, ``k``, ``w``}
or
``(3, )`` `ndarray`
or
`list` of length ``3``
marker_edge_width : `float`, optional
The width of the markers' edge.
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : See Below, optional
The font of the axes.
Example options ::
{``serif``, ``sans-serif``, ``cursive``, ``fantasy``,
``monospace``}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : {``normal``, ``italic``, ``oblique``}, optional
The font style of the axes.
axes_font_weight : See Below, optional
The font weight of the axes.
Example options ::
{``ultralight``, ``light``, ``normal``, ``regular``,
``book``, ``medium``, ``roman``, ``semibold``,
``demibold``, ``demi``, ``bold``, ``heavy``,
``extra bold``, ``black``}
figure_size : (`float`, `float`) or `None`, optional
The size of the figure in inches.
render_grid : `bool`, optional
If ``True``, the grid will be rendered.
grid_line_style : {``-``, ``--``, ``-.``, ``:``}, optional
The style of the grid lines.
grid_line_width : `float`, optional
The width of the grid lines.
Returns
-------
viewer : :map:`MatplotlibRenderer`
The viewer object.
"""
from menpo.visualize import plot_curve
return plot_curve(
range(self.n_active_components), [self.eigenvalues_ratio()],
figure_id=figure_id, new_figure=new_figure, legend_entries=None,
title='Variance Ratio of Eigenvalues', x_label='Component Number',
y_label='Variance Ratio',
axes_x_limits=[0, self.n_active_components - 1],
axes_y_limits=None, axes_x_ticks=None, axes_y_ticks=None,
render_lines=render_lines, line_colour=line_colour,
line_style=line_style, line_width=line_width,
render_markers=render_markers, marker_style=marker_style,
marker_size=marker_size, marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=marker_edge_width, render_legend=False,
render_axes=render_axes, axes_font_name=axes_font_name,
axes_font_size=axes_font_size, axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight, figure_size=figure_size,
render_grid=render_grid, grid_line_style=grid_line_style,
grid_line_width=grid_line_width)
def plot_eigenvalues_ratio_widget(self, figure_size=(10, 6),
style='coloured'):
r"""
Plot of the variance ratio captured by the eigenvalues using an
interactive widget.
Parameters
----------
figure_size : (`float`, `float`) or ``None``, optional
The size of the figure in inches.
style : {``'coloured'``, ``'minimal'``}, optional
If ``'coloured'``, then the style of the widget will be coloured. If
``minimal``, then the style is simple using black and white colours.
"""
try:
from menpowidgets import plot_graph
except ImportError as e:
from menpo.visualize.base import MenpowidgetsMissingError
raise MenpowidgetsMissingError(e)
plot_graph(x_axis=range(self.n_active_components),
y_axis=[self.eigenvalues_ratio()],
legend_entries=['Eigenvalues ratio'],
figure_size=figure_size, style=style)
def plot_eigenvalues_cumulative_ratio(self, figure_id=None,
new_figure=False, render_lines=True,
line_colour='b', line_style='-',
line_width=2, render_markers=True,
marker_style='o', marker_size=6,
marker_face_colour='b',
marker_edge_colour='k',
marker_edge_width=1.,
render_axes=True,
axes_font_name='sans-serif',
axes_font_size=10,
axes_font_style='normal',
axes_font_weight='normal',
figure_size=(10, 6), render_grid=True,
grid_line_style='--',
grid_line_width=0.5):
r"""
Plot of the cumulative variance ratio captured by the eigenvalues.
Parameters
----------
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
render_lines : `bool`, optional
If ``True``, the line will be rendered.
line_colour : See Below, optional
The colour of the lines.
Example options ::
{``r``, ``g``, ``b``, ``c``, ``m``, ``k``, ``w``}
or
``(3, )`` `ndarray`
or
`list` of length ``3``
line_style : {``-``, ``--``, ``-.``, ``:``}, optional
The style of the lines.
line_width : `float`, optional
The width of the lines.
render_markers : `bool`, optional
If ``True``, the markers will be rendered.
marker_style : See Below, optional
The style of the markers.
Example options ::
{``.``, ``,``, ``o``, ``v``, ``^``, ``<``, ``>``, ``+``,
``x``, ``D``, ``d``, ``s``, ``p``, ``*``, ``h``, ``H``,
``1``, ``2``, ``3``, ``4``, ``8``}
marker_size : `int`, optional
The size of the markers in points.
marker_face_colour : See Below, optional
The face (filling) colour of the markers.
Example options ::
{``r``, ``g``, ``b``, ``c``, ``m``, ``k``, ``w``}
or
``(3, )`` `ndarray`
or
`list` of length ``3``
marker_edge_colour : See Below, optional
The edge colour of the markers.
Example options ::
{``r``, ``g``, ``b``, ``c``, ``m``, ``k``, ``w``}
or
``(3, )`` `ndarray`
or
`list` of length ``3``
marker_edge_width : `float`, optional
The width of the markers' edge.
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : See Below, optional
The font of the axes.
Example options ::
{``serif``, ``sans-serif``, ``cursive``, ``fantasy``,
``monospace``}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : {``normal``, ``italic``, ``oblique``}, optional
The font style of the axes.
axes_font_weight : See Below, optional
The font weight of the axes.
Example options ::
{``ultralight``, ``light``, ``normal``, ``regular``,
``book``, ``medium``, ``roman``, ``semibold``,
``demibold``, ``demi``, ``bold``, ``heavy``,
``extra bold``, ``black``}
figure_size : (`float`, `float`) or `None`, optional
The size of the figure in inches.
render_grid : `bool`, optional
If ``True``, the grid will be rendered.
grid_line_style : {``-``, ``--``, ``-.``, ``:``}, optional
The style of the grid lines.
grid_line_width : `float`, optional
The width of the grid lines.
Returns
-------
viewer : :map:`MatplotlibRenderer`
The viewer object.
"""
from menpo.visualize import plot_curve
return plot_curve(
range(self.n_active_components),
[self.eigenvalues_cumulative_ratio()], figure_id=figure_id,
new_figure=new_figure, legend_entries=None,
title='Cumulative Variance Ratio of Eigenvalues',
x_label='Component Number', y_label='Cumulative Variance Ratio',
axes_x_limits=[0, self.n_active_components - 1],
axes_y_limits=None, axes_x_ticks=None, axes_y_ticks=None,
render_lines=render_lines, line_colour=line_colour,
line_style=line_style, line_width=line_width,
render_markers=render_markers, marker_style=marker_style,
marker_size=marker_size, marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=marker_edge_width, render_legend=False,
render_axes=render_axes, axes_font_name=axes_font_name,
axes_font_size=axes_font_size, axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight, figure_size=figure_size,
render_grid=render_grid, grid_line_style=grid_line_style,
grid_line_width=grid_line_width)
def plot_eigenvalues_cumulative_ratio_widget(self, figure_size=(10, 6),
style='coloured'):
r"""
Plot of the cumulative variance ratio captured by the eigenvalues using
an interactive widget.
Parameters
----------
figure_size : (`float`, `float`) or ``None``, optional
The size of the figure in inches.
style : {``'coloured'``, ``'minimal'``}, optional
If ``'coloured'``, then the style of the widget will be coloured. If
``minimal``, then the style is simple using black and white colours.
"""
try:
from menpowidgets import plot_graph
except ImportError as e:
from menpo.visualize.base import MenpowidgetsMissingError
raise MenpowidgetsMissingError(e)
plot_graph(x_axis=range(self.n_active_components),
y_axis=[self.eigenvalues_cumulative_ratio()],
legend_entries=['Eigenvalues cumulative ratio'],
figure_size=figure_size, style=style)
def __str__(self):
str_out = 'PCA Vector Model \n' \
' - centred: {}\n' \
' - # features: {}\n' \
' - # active components: {}\n' \
' - kept variance: {:.2} {:.1%}\n' \
' - noise variance: {:.2} {:.1%}\n' \
' - total # components: {}\n' \
' - components shape: {}\n'.format(
self.centred, self.n_features, self.n_active_components,
self.variance(), self.variance_ratio(), self.noise_variance(),
self.noise_variance_ratio(), self.n_components,
self.components.shape)
return str_out
class PCAModel(VectorizableBackedModel, PCAVectorModel):
r"""
A :map:`MeanLinearModel` where components are Principal Components
and the components are vectorized instances.
Principal Component Analysis (PCA) by eigenvalue decomposition of the
data's scatter matrix. For details of the implementation of PCA, see
:map:`pca`.
Parameters
----------
samples : `list` or `iterable` of :map:`Vectorizable`
List or iterable of samples to build the model from.
centre : `bool`, optional
When ``True`` (default) PCA is performed after mean centering the data.
If ``False`` the data is assumed to be centred, and the mean will be
``0``.
n_samples : `int`, optional
If provided then ``samples`` must be an iterator that yields
``n_samples``. If not provided then samples has to be a `list` (so we
know how large the data matrix needs to be).
max_n_components : `int`, optional
The maximum number of components to keep in the model. Any components
above and beyond this one are discarded.
inplace : `bool`, optional
If ``True`` the data matrix is modified in place. Otherwise, the data
matrix is copied.
verbose : `bool`, optional
Whether to print building information or not.
"""
def __init__(self, samples, centre=True, n_samples=None,
max_n_components=None, inplace=True, verbose=False):
# build a data matrix from all the samples
data, template = as_matrix(samples, length=n_samples,
return_template=True, verbose=verbose)
n_samples = data.shape[0]
PCAVectorModel.__init__(self, data, centre=centre,
max_n_components=max_n_components,
n_samples=n_samples, inplace=inplace)
VectorizableBackedModel.__init__(self, template)
@classmethod
def init_from_covariance_matrix(cls, C, mean, n_samples, centred=True,
is_inverse=False, max_n_components=None):
r"""
Build the Principal Component Analysis (PCA) by eigenvalue
decomposition of the provided covariance/scatter matrix. For details
of the implementation of PCA, see :map:`pcacov`.
Parameters
----------
C : ``(n_features, n_features)`` `ndarray` or `scipy.sparse`
The Covariance/Scatter matrix. If it is a precision matrix (inverse
covariance), then set `is_inverse=True`.
mean : :map:`Vectorizable`
The mean instance. It must be a :map:`Vectorizable` and *not* an
`ndarray`.
n_samples : `int`
The number of samples used to generate the covariance matrix.
centred : `bool`, optional
When ``True`` we assume that the data were centered before
computing the covariance matrix.
is_inverse : `bool`, optional
It ``True``, then it is assumed that `C` is a precision matrix (
inverse covariance). Thus, the eigenvalues will be inverted. If
``False``, then it is assumed that `C` is a covariance matrix.
max_n_components : `int`, optional
The maximum number of components to keep in the model. Any
components above and beyond this one are discarded.
"""
# Create new pca instance
self_model = PCAVectorModel.__new__(cls)
self_model.n_samples = n_samples
# Compute pca on covariance
e_vectors, e_values = pcacov(C, is_inverse=is_inverse)
# The call to __init__ of MeanLinearModel is done in here
self_model._constructor_helper(eigenvalues=e_values,
eigenvectors=e_vectors,
mean=mean.as_vector(),
centred=centred,
max_n_components=max_n_components)
VectorizableBackedModel.__init__(self_model, mean)
return self_model
@classmethod
def init_from_components(cls, components, eigenvalues, mean, n_samples,
centred, max_n_components=None):
r"""
Build the Principal Component Analysis (PCA) using the provided
components (eigenvectors) and eigenvalues.
Parameters
----------
components : ``(n_components, n_features)`` `ndarray`
The eigenvectors to be used.
eigenvalues : ``(n_components, )`` `ndarray`
The corresponding eigenvalues.
mean : :map:`Vectorizable`
The mean instance. It must be a :map:`Vectorizable` and *not* an
`ndarray`.
n_samples : `int`
The number of samples used to generate the eigenvectors.
centred : `bool`, optional
When ``True`` we assume that the data were centered before
computing the eigenvectors.
max_n_components : `int`, optional
The maximum number of components to keep in the model. Any
components above and beyond this one are discarded.
"""
# Create new pca instance
self_model = PCAVectorModel.__new__(cls)
self_model.n_samples = n_samples
# The call to __init__ of MeanLinearModel is done in here
self_model._constructor_helper(
eigenvalues=eigenvalues, eigenvectors=components,
mean=mean.as_vector(), centred=centred,
max_n_components=max_n_components)
VectorizableBackedModel.__init__(self_model, mean)
return self_model
def mean(self):
r"""
Return the mean of the model.
:type: :map:`Vectorizable`
"""
return self.template_instance.from_vector(self._mean)
@property
def mean_vector(self):
r"""
Return the mean of the model as a 1D vector.
:type: `ndarray`
"""
return self._mean
@doc_inherit(name='project_out')
def project_out_vector(self, instance_vector):
return PCAVectorModel.project_out(self, instance_vector)
@doc_inherit(name='reconstruct')
def reconstruct_vector(self, instance_vector):
return PCAVectorModel.reconstruct(self, instance_vector)
@doc_inherit(name='project')
def project_vector(self, instance_vector):
return PCAVectorModel.project(self, instance_vector)
@doc_inherit(name='instance')
def instance_vector(self, weights, normalized_weights=False):
return PCAVectorModel.instance(self, weights,
normalized_weights=normalized_weights)
@doc_inherit(name='component')
def component_vector(self, index, with_mean=True, scale=1.0):
return PCAVectorModel.component(self, index, with_mean=with_mean,
scale=scale)
@doc_inherit(name='project_whitened')
def project_whitened_vector(self, vector_instance):
return PCAVectorModel.project_whitened(self, vector_instance)
def component(self, index, with_mean=True, scale=1.0):
r"""
Return a particular component of the linear model.
Parameters
----------
index : `int`
The component that is to be returned
with_mean: `bool`, optional
If ``True``, the component will be blended with the mean vector
before being returned. If not, the component is returned on it's
own.
scale : `float`, optional
A scale factor that should be applied to the component. Only
valid in the case where ``with_mean == True``. See
:meth:`component_vector` for how this scale factor is interpreted.
Returns
-------
component : `type(self.template_instance)`
The requested component instance.
"""
return self.template_instance.from_vector(self.component_vector(
index, with_mean=with_mean, scale=scale))
def instance(self, weights, normalized_weights=False):
"""
Creates a new instance of the model using the first ``len(weights)``
components.
Parameters
----------
weights : ``(n_weights,)`` `ndarray` or `list`
``weights[i]`` is the linear contribution of the i'th component
to the instance vector.
normalized_weights : `bool`, optional
If ``True``, the weights are assumed to be normalized w.r.t the
eigenvalues. This can be easier to create unique instances by
making the weights more interpretable.
Raises
------
ValueError
If n_weights > n_components
Returns
-------
instance : `type(self.template_instance)`
An instance of the model.
"""
v = self.instance_vector(weights, normalized_weights=normalized_weights)
return self.template_instance.from_vector(v)
def project_whitened(self, instance):
"""
Projects the `instance` onto the whitened components, retrieving the
whitened linear weightings.
Parameters
----------
instance : :map:`Vectorizable`
A novel instance.
Returns
-------
projected : (n_components,)
A vector of whitened linear weightings
"""
return self.project_whitened_vector(instance.as_vector())
def increment(self, samples, n_samples=None, forgetting_factor=1.0,
verbose=False):
r"""
Update the eigenvectors, eigenvalues and mean vector of this model
by performing incremental PCA on the given samples.
Parameters
----------
samples : `list` of :map:`Vectorizable`
List of new samples to update the model from.
n_samples : `int`, optional
If provided then ``samples`` must be an iterator that yields
``n_samples``. If not provided then samples has to be a
list (so we know how large the data matrix needs to be).
forgetting_factor : ``[0.0, 1.0]`` `float`, optional
Forgetting factor that weights the relative contribution of new
samples vs old samples. If 1.0, all samples are weighted equally
and, hence, the results is the exact same as performing batch
PCA on the concatenated list of old and new simples. If <1.0,
more emphasis is put on the new samples. See [1] for details.
References
----------
.. [1] David Ross, Jongwoo Lim, Ruei-Sung Lin, Ming-Hsuan Yang.
"Incremental Learning for Robust Visual Tracking". IJCV, 2007.
"""
# build a data matrix from the new samples
data = as_matrix(samples, length=n_samples, verbose=verbose)
n_new_samples = data.shape[0]
PCAVectorModel.increment(self, data, n_samples=n_new_samples,
forgetting_factor=forgetting_factor,
verbose=verbose)
def view_widget(self, figure_size=(7, 7)):
r"""
Visualizes the model using an interactive widget. It only works if it
is a 2D/3D shape or appearance model.
Parameters
----------
figure_size : (`int`, `int`), optional
The initial size of the rendered figure.
"""
try:
from menpowidgets import view_widget
view_widget(self, figure_size=figure_size)
except ImportError as e:
from menpo.visualize.base import MenpowidgetsMissingError
raise MenpowidgetsMissingError(e)
def __str__(self):
str_out = 'PCA Model \n' \
' - instance class: {}\n' \
' - centred: {}\n' \
' - # features: {}\n' \
' - # active components: {}\n' \
' - kept variance: {:.2} {:.1%}\n' \
' - noise variance: {:.2} {:.1%}\n' \
' - total # components: {}\n' \
' - components shape: {}\n'.format(
name_of_callable(self.template_instance), self.centred,
self.n_features, self.n_active_components, self.variance(),
self.variance_ratio(), self.noise_variance(),
self.noise_variance_ratio(), self.n_components,
self.components.shape)
return str_out
|
the-stack_106_20200
|
# ------------------------------------------------------------------------------
# CodeHawk Binary Analyzer
# Author: Henny Sipma
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2021 Aarno Labs, LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
"""Compares two binaries."""
from typing import Dict, List, Mapping, Sequence, Tuple, TYPE_CHECKING
from chb.relational.CallgraphMatcher import CallgraphMatcher
from chb.relational.FunctionRelationalAnalysis import FunctionRelationalAnalysis
import chb.util.fileutil as UF
if TYPE_CHECKING:
from chb.app.AppAccess import AppAccess
class RelationalAnalysis:
"""Establishes relationships between functions in two related binaries.
A function mapping is established as follows:
1. If the number of functions is the same in both binaries, it is assumed
(for now) that their order in both binaries is the same (we are
dealing with micropatches, and so we don't expect large changes between
the two binaries). In this case functions are mapped directly by their
relative position in the binary.
2. If the number of functions is different in the two binaries, a
combination of criteria is used to map functions. They get mapped directly
- if they are at the same address, or
- if they have identical md5 hash
For the remaining functions the callgraph is used to determine relationships
between the functions in each binary, and a function mapping is obtained from
matching the two call graphs.
"""
def __init__(
self,
app1: "AppAccess",
app2: "AppAccess",
faddrs1: List[str] = [],
faddrs2: List[str] = []) -> None:
self._app1 = app1
self._app2 = app2
if faddrs1:
self._faddrs1 = sorted(faddrs1)
else:
self._faddrs1 = sorted(app1.appfunction_addrs)
if faddrs2:
self._faddrs2 = sorted(faddrs2)
else:
self._faddrs2 = sorted(app2.appfunction_addrs)
self._functionmapping: Dict[str, str] = {} # potentially partial map
self._functionanalyses: Dict[str, FunctionRelationalAnalysis] = {}
self._fnmd5s: Dict[str, Tuple[List[str], List[str]]] = {}
@property
def app1(self) -> "AppAccess":
return self._app1
@property
def app2(self) -> "AppAccess":
return self._app2
@property
def faddrs1(self) -> Sequence[str]:
return self._faddrs1
@property
def faddrs2(self) -> Sequence[str]:
return self._faddrs2
@property
def fncount1(self) -> int:
return len(self._faddrs1)
@property
def fncount2(self) -> int:
return len(self._faddrs2)
@property
def function_analyses(self) -> Mapping[str, FunctionRelationalAnalysis]:
if len(self._functionanalyses) == 0:
for faddr1 in self.faddrs1:
if faddr1 in self.function_mapping:
faddr2 = self.function_mapping[faddr1]
fn1 = self.app1.function(faddr1)
fn2 = self.app2.function(faddr2)
self._functionanalyses[faddr1] = FunctionRelationalAnalysis(
self.app1, fn1, self.app2, fn2)
return self._functionanalyses
def function_analysis(self, faddr: str) -> FunctionRelationalAnalysis:
if faddr in self.function_analyses:
return self.function_analyses[faddr]
else:
raise UF.CHBError("Address not found in function relational analyses")
@property
def function_mapping(self) -> Mapping[str, str]:
if len(self._functionmapping) > 0:
return self._functionmapping
elif self.fncount1 == self.fncount2:
result: Dict[str, str] = {}
diff1 = sorted(set(self.faddrs1) - set(self.faddrs2))
diff2 = sorted(set(self.faddrs2) - set(self.faddrs1))
for (faddr1, faddr2) in zip(diff1, diff2):
result[faddr1] = faddr2
for faddr1 in self.faddrs1:
if faddr1 not in result:
result[faddr1] = faddr1
self._functionmapping = result
return self._functionmapping
else:
callgraphmatcher = CallgraphMatcher(
self.app1,
self.faddrs1,
self.app1.callgraph(),
self.app2,
self.faddrs2,
self.app2.callgraph())
self._functionmapping = callgraphmatcher.function_mapping
return self._functionmapping
def functions_changed(self) -> List[str]:
"""Return a list of functions that moved or are not md5-equivalent."""
result: List[str] = []
for (faddr, fra) in self.function_analyses.items():
if fra.moved or not fra.is_md5_equal:
result.append(faddr)
for faddr in self.faddrs1:
if faddr not in self.function_mapping:
result.append(faddr)
return result
def blocks_changed(self, faddr: str) -> List[str]:
if faddr in self.function_analyses:
fra = self.function_analyses[faddr]
if fra.is_structurally_equivalent:
return fra.blocks_changed()
return []
def report(self, showfunctions: bool, showinstructions: bool) -> str:
lines: List[str] = []
lines.append("Summary Report")
lines.append("=" * 80)
lines.append("")
fnames: Dict[str, str] = {}
for faddr in self.functions_changed():
if self.app1.has_function_name(faddr):
fnames[faddr] = self.app1.function_name(faddr) + " (" + faddr + ")"
else:
fnames[faddr] = faddr
maxnamelen = max(len(n) for n in fnames.values()) + 3
lines.append(
"function".ljust(maxnamelen)
+ "moved to".ljust(12)
+ "md5-equal".ljust(12)
+ "cfg-isomorphic".ljust(18)
+ "blocks-changed".ljust(12))
lines.append("-" * 88)
fnotfound: List[str] = [] # not found in patched version
fnotmapped: List[str] = [] # not found in original version
totalinstrs: int = 0
totalblocks: int = 0
for faddr in self.functions_changed():
if faddr in self.function_mapping:
fra = self.function_analyses[faddr]
if faddr != self.function_mapping[faddr]:
moved = self.function_mapping[faddr]
else:
moved = "not moved"
else:
moved = "not found"
if faddr in self.function_analyses:
md5eq = "yes" if fra.is_md5_equal else "no"
if fra.is_cfg_isomorphic:
streq = "yes"
blockschanged = len(fra.blocks_changed())
totalinstrs += fra.instructions_changed()
allblocks = len(fra.basic_blocks1)
totalblocks += blockschanged
blchg = str(blockschanged) + "/" + str(allblocks)
else:
streq = "no"
blchg = str(len(fra.cfg_blocks1)) + " -> " + str(len(fra.cfg_blocks2))
lines.append(
fnames[faddr].ljust(maxnamelen)
+ moved.ljust(16)
+ md5eq.ljust(12)
+ streq.ljust(18)
+ blchg.ljust(12))
else:
fnotfound.append(faddr)
lines.append("\nTotal blocks changed: " + str(totalblocks))
lines.append("Total instructions changed: " + str(totalinstrs))
lines.append("")
if len(self.function_mapping) < len(self.faddrs2):
for faddr2 in sorted(self.faddrs2):
if faddr2 not in self.function_mapping.values():
fnotmapped.append(faddr2)
lines.append(
"\nFunctions mapped from original to patched: "
+ str(len(self.function_mapping)))
lines.append(
"Functions not found in patched version: " + str(len(fnotfound)))
lines.append(
"Functions in patched version not mapped: " + str(len(fnotmapped)))
if showfunctions or showinstructions:
lines.append("")
lines.append("=" * 80)
lines.append("Functions changed")
lines.append("=" * 80)
for faddr in self.functions_changed():
if faddr in self.function_analyses:
fra = self.function_analyses[faddr]
lines.append("\nFunction " + fnames[faddr])
lines.append(fra.report(showinstructions))
else:
lines.append(
"\nFunction "
+ fnames[faddr]
+ " not mapped to patched version")
return "\n".join(lines)
|
the-stack_106_20202
|
from gql import gql, Client
from gql.transport.aiohttp import AIOHTTPTransport
def getLastExchanges(network, exchange, contract: str, limit, pairAddress):
transport = AIOHTTPTransport(url="https://graphql.bitquery.io")
client = Client(transport=transport, fetch_schema_from_transport=True)
query = gql(
"""
query getLastExchanges ($network: EthereumNetwork, $contract: String!, $exchange: String!, $limit: Int, $pairAddress: String!){
ethereum(network: $network) {
dexTrades(
options: {limit: $limit, desc: "timeInterval.second"}
exchangeName: {is: $exchange}
baseCurrency: {is: $contract}
smartContractAddress: {is: $pairAddress}
) {
transaction {
hash
}
date {
date
}
block {
height
}
buyAmount
buyAmountInUsd: buyAmount(in: USD)
buyCurrency {
symbol
address
}
sellAmount
sellAmountInUsd: sellAmount(in: USD)
sellCurrency {
symbol
address
}
sellAmountInUsd: sellAmount(in: USD)
tradeAmount(in: USD)
transaction {
gasValue
gasPrice
gas
}
timeInterval {
second
}
}
}
}
"""
)
params = {
"network": network,
"contract": contract,
"exchange": exchange,
"limit": limit,
"pairAddress": pairAddress
}
result = client.execute(query, variable_values=params)
return(result["ethereum"]["dexTrades"])
def getPairs(network, exchange, contract: str):
transport = AIOHTTPTransport(url="https://graphql.bitquery.io")
client = Client(transport=transport, fetch_schema_from_transport=True)
query = gql(
"""
query getPairs ($network: EthereumNetwork, $contract: String!, $exchange: String!){
ethereum(network: $network) {
dexTrades(
exchangeName: {is: $exchange}
baseCurrency: {is: $contract}
options: {desc: "trades"}
) {
quoteCurrency: quoteCurrency {
symbol
address
}
baseCurrency {
symbol
address
}
poolToken: smartContract {
address {
address
}
}
trades: count
}
}
}
"""
)
params = {
"network": network,
"contract": contract,
"exchange": exchange
}
result = client.execute(query, variable_values=params)
dex_trades = (result["ethereum"]["dexTrades"])
pool_addresses = []
quote_currencies = []
for x in dex_trades:
pool_addresses.append(x["poolToken"]["address"]["address"])
for x in dex_trades:
quote_currencies.append(x["quoteCurrency"]["address"])
transport = AIOHTTPTransport(url="https://graphql.bitquery.io")
client = Client(transport=transport, fetch_schema_from_transport=True)
query = gql(
"""
query getPairLiquidity ($network: EthereumNetwork, $pool_addresses: [String!]){
ethereum(network: $network) {
address(address: {in: $pool_addresses}) {
balances {
currency {
address
name
}
value
}
address
}
}
}
"""
)
params = {
"network": network,
"pool_addresses": pool_addresses
}
result = client.execute(query, variable_values=params)
balances = (result["ethereum"]["address"])
pools=[]
final_pools=[]
for x in balances:
for y in x["balances"]:
if(y["currency"]["address"] in quote_currencies or y["currency"]["address"] == contract):
pools.append([x["address"], y["currency"]["name"], y["value"], y["currency"]["address"]])
for v, w in zip(pools[::2],pools[1::2]):
final_pools.append([v,w])
return final_pools
def getPrice(network, exchange, contract: str, pairAddress):
transport = AIOHTTPTransport(url="https://graphql.bitquery.io")
client = Client(transport=transport, fetch_schema_from_transport=True)
query = gql(
"""
query getLastExchanges ($network: EthereumNetwork, $contract: String!, $exchange: String!, $pairAddress: String!){
ethereum(network: $network) {
dexTrades(
options: {limit: 1, desc: "timeInterval.second"}
exchangeName: {is: $exchange}
baseCurrency: {is: $contract}
smartContractAddress: {is: $pairAddress}
) {
transaction {
hash
}
date {
date
}
block {
height
}
buyAmount
buyAmountInUsd: buyAmount(in: USD)
buyCurrency {
symbol
address
}
sellAmount
sellAmountInUsd: sellAmount(in: USD)
sellCurrency {
symbol
address
}
sellAmountInUsd: sellAmount(in: USD)
tradeAmount(in: USD)
transaction {
gasValue
gasPrice
gas
}
timeInterval {
second
}
}
}
}
"""
)
params = {
"network": network,
"contract": contract,
"exchange": exchange,
"pairAddress": pairAddress
}
result = client.execute(query, variable_values=params)
if(result["ethereum"]["dexTrades"][0]["buyAmountInUsd"] / result["ethereum"]["dexTrades"][0]["sellAmount"] != 0.0):
return(result["ethereum"]["dexTrades"][0]["buyAmountInUsd"] / result["ethereum"]["dexTrades"][0]["sellAmount"])
else:
return result["ethereum"]["dexTrades"][0]["sellAmountInUsd"] / result["ethereum"]["dexTrades"][0]["buyAmount"]
def getOHLC(network, exchange, baseCurrency: str, quoteCurrency: str, limit):
transport = AIOHTTPTransport(url="https://graphql.bitquery.io")
client = Client(transport=transport, fetch_schema_from_transport=True)
query = gql(
"""
query getOHLC ($network: EthereumNetwork, $baseCurrency: String!, $exchange: String!, $quoteCurrency: String!, $limit: Int){
ethereum(network: $network) {
dexTrades(
options: {limit: $limit, desc: "timeInterval.minute"}
exchangeName: {is: $exchange}
baseCurrency: {is: $baseCurrency}
quoteCurrency: {is: $quoteCurrency}
) {
timeInterval {
minute(count: 5)
}
high: quotePrice(calculate: maximum)
low: quotePrice(calculate: minimum)
open: minimum(of: block, get: quote_price)
close: maximum(of: block, get: quote_price)
baseCurrency {
name
}
quoteCurrency {
name
}
}
}
}
"""
)
params = {
"network": network,
"baseCurrency": baseCurrency,
"exchange": exchange,
"quoteCurrency": quoteCurrency,
"limit": limit
}
result = client.execute(query, variable_values=params)
return(result["ethereum"]["dexTrades"])
|
the-stack_106_20204
|
import nengo
import numpy as np
from numpy import random
import matplotlib.pyplot as plt
import matplotlib.cm as cm
# import tensorflow as tf
import os
from nengo.dists import Choice
from datetime import datetime
# from nengo_extras.data import load_mnist
import pickle
from nengo.utils.matplotlib import rasterplot
import time
from InputData import PresentInputWithPause
# from nengo_extras.graphviz import net_diagram
from nengo.neurons import LIFRate
from nengo.params import Parameter, NumberParam, FrozenObject
from nengo.dists import Choice, Distribution, get_samples, Uniform
from nengo.utils.numpy import clip, is_array_like
from utilis import *
# import keras
from args_mnist import args as my_args
import itertools
import random
import logging
# import nni
def evaluate_mnist_multiple_var_v2(args):
#############################
# load the data
#############################
input_nbr = args.input_nbr
# (image_train, label_train), (image_test, label_test) = (keras.datasets.mnist.load_data())
probe_sample_rate = (input_nbr/10)/1000 #Probe sample rate. Proportional to input_nbr to scale down sampling rate of simulations
# # probe_sample_rate = 1000
# image_train_filtered = []
# label_train_filtered = []
x = args.digit
# for i in range(0,input_nbr):
# image_train_filtered.append(image_train[i])
# label_train_filtered.append(label_train[i])
# image_train_filtered = np.array(image_train_filtered)
# label_train_filtered = np.array(label_train_filtered)
# np.save(
# 'mnist.npz',
# image_train_filtered=image_train_filtered,
# label_train_filtered=label_train_filtered,
# image_test_filtered=image_test_filtered,
# label_test_filtered=label_test_filtered,
# )
data = np.load('mnist.npz', allow_pickle=True)
image_train_filtered = data['image_train_filtered']
label_train_filtered = data['label_train_filtered']
image_test_filtered = data['image_test_filtered']
label_test_filtered = data['label_test_filtered']
#Simulation Parameters
#Presentation time
presentation_time = args.presentation_time #0.20
#Pause time
pause_time = args.pause_time
#Iterations
iterations=args.iterations
#Input layer parameters
n_in = args.n_in
# g_max = 1/784 #Maximum output contribution
g_max = args.g_max
n_neurons = args.n_neurons # Layer 1 neurons
# inhib_factor = args.inhib_factor #Multiplication factor for lateral inhibition
input_neurons_args = {
"n_neurons":n_in,
"dimensions":1,
"label":"Input layer",
"encoders":nengo.dists.Uniform(1,1),
# "max_rates":nengo.dists.Uniform(22,22),
# "intercepts":nengo.dists.Uniform(0,0),
"gain":nengo.dists.Uniform(args.gain_in,args.gain_in),
"bias":nengo.dists.Uniform(args.bias_in,args.bias_in),
"neuron_type":MyLIF_in(tau_rc=args.tau_in,min_voltage=-1, amplitude=args.g_max)
# "neuron_type":nengo.neurons.SpikingRectifiedLinear()#SpikingRelu neuron.
}
#Layer 1 parameters
layer_1_neurons_args = {
"n_neurons":n_neurons,
"dimensions":1,
"label":"Layer 1",
"encoders":nengo.dists.Uniform(1,1),
"gain":nengo.dists.Uniform(args.gain_out,args.gain_out),
"bias":nengo.dists.Uniform(args.bias_out,args.bias_out),
# "intercepts":nengo.dists.Choice([0]),
# "max_rates":nengo.dists.Choice([args.rate_out,args.rate_out]),
# "noise":nengo.processes.WhiteNoise(dist=nengo.dists.Gaussian(0, 0.5), seed=1),
# "neuron_type":nengo.neurons.LIF(tau_rc=args.tau_out, min_voltage=0)
# "neuron_type":MyLIF_out(tau_rc=args.tau_out, min_voltage=-1)
"neuron_type":STDPLIF(tau_rc=args.tau_out, min_voltage=-1, spiking_threshold=args.thr_out, inhibition_time=args.inhibition_time)
}
# "noise":nengo.processes.WhiteNoise(dist=nengo.dists.Gaussian(0, 20), seed=1),
#Lateral Inhibition parameters
# lateral_inhib_args = {
# "transform": inhib_factor* (np.full((n_neurons, n_neurons), 1) - np.eye(n_neurons)),
# "synapse":args.inhib_synapse,
# "label":"Lateral Inhibition"
# }
#Learning rule parameters
vthp=0.25
vthn=0.25
# np.random.seed(0)
# vth_var = (2 * np.random.rand(n_neurons,n_in)) -1 #between -1 to 1 of shape W
# var_ratio=args.var_ratio
# vthp = vthp + (vthp*var_ratio*vth_var)
# vthn = vthn + (vthn*var_ratio*vth_var)
learning_args = {
"lr": args.lr,
"winit_min":0,
"winit_max":1,
"vprog":args.vprog,
"vthp":vthp,
"vthn":vthn,
"weight_quant":args.weight_quant,
# "var_ratio":args.var_ratio,
# "tpw":50,
# "prev_flag":True,
"sample_distance": int((presentation_time+pause_time)*200*10), #Store weight after 10 images
}
# argument_string = "presentation_time: "+ str(presentation_time)+ "\n pause_time: "+ str(pause_time)+ "\n input_neurons_args: " + str(input_neurons_args)+ " \n layer_1_neuron_args: " + str(layer_1_neurons_args)+"\n Lateral Inhibition parameters: " + str(lateral_inhib_args) + "\n learning parameters: " + str(learning_args)+ "\n g_max: "+ str(g_max)
images = image_train_filtered
labels = label_train_filtered
model = nengo.Network("My network")
#############################
# Model construction
#############################
with model:
# picture = nengo.Node(PresentInputWithPause(images, presentation_time, pause_time,0))
picture = nengo.Node(nengo.processes.PresentInput(images, presentation_time=presentation_time))
true_label = nengo.Node(nengo.processes.PresentInput(labels, presentation_time=presentation_time))
# true_label = nengo.Node(PresentInputWithPause(labels, presentation_time, pause_time,-1))
# input layer
input_layer = nengo.Ensemble(**input_neurons_args)
input_conn = nengo.Connection(picture,input_layer.neurons,synapse=None)
#first layer
layer1 = nengo.Ensemble(**layer_1_neurons_args)
#Weights between input layer and layer 1
w = nengo.Node(CustomRule_post_v3(**learning_args), size_in=n_in, size_out=n_neurons)
nengo.Connection(input_layer.neurons, w, synapse=None)
nengo.Connection(w, layer1.neurons, synapse=None)
# nengo.Connection(w, layer1.neurons,transform=g_max, synapse=None)
# init_weights = np.random.uniform(0, 1, (n_neurons, n_in))
# conn1 = nengo.Connection(input_layer.neurons,layer1.neurons,learning_rule_type=VLR(learning_rate=args.lr,vprog=-0.6, var_ratio = args.var_ratio),transform=init_weights)
#Lateral inhibition
# inhib = nengo.Connection(layer1.neurons,layer1.neurons,**lateral_inhib_args)
#Probes
p_true_label = nengo.Probe(true_label, sample_every=probe_sample_rate)
p_input_layer = nengo.Probe(input_layer.neurons, sample_every=probe_sample_rate)
p_layer_1 = nengo.Probe(layer1.neurons, sample_every=probe_sample_rate)
# weights_probe = nengo.Probe(conn1,"weights",sample_every=probe_sample_rate)
weights = w.output.history
# with nengo_ocl.Simulator(model) as sim :
with nengo.Simulator(model, dt=0.005) as sim:
w.output.set_signal_vmem(sim.signals[sim.model.sig[input_layer.neurons]["voltage"]])
w.output.set_signal_out(sim.signals[sim.model.sig[layer1.neurons]["out"]])
sim.run((presentation_time+pause_time) * labels.shape[0]*iterations)
#save the model
# now = time.strftime("%Y%m%d-%H%M%S")
# folder = os.getcwd()+"/MNIST_VDSP"+now
# os.mkdir(folder)
# print(weights)
# weights = sim.data[weights_probe]
last_weight = weights[-1]
# pickle.dump(weights, open( folder+"/trained_weights", "wb" ))
# pickle.dump(argument_string, open( folder+"/arguments", "wb" ))
t_data = sim.trange(sample_every=probe_sample_rate)
labels = sim.data[p_true_label][:,0]
output_spikes = sim.data[p_layer_1]
neuron_class = np.zeros((n_neurons, 1))
n_classes = 10
for j in range(n_neurons):
spike_times_neuron_j = t_data[np.where(output_spikes[:,j] > 0)]
max_spike_times = 0
for i in range(n_classes):
class_presentation_times_i = t_data[np.where(labels == i)]
#Normalized number of spikes wrt class presentation time
num_spikes = len(np.intersect1d(spike_times_neuron_j,class_presentation_times_i))/(len(class_presentation_times_i)+1)
if(num_spikes>max_spike_times):
neuron_class[j] = i
max_spike_times = num_spikes
# print("Neuron class: \n", neuron_class)
sim.close()
'''
Testing
'''
# img_rows, img_cols = 28, 28
input_nbr = 10000
# input_nbr = int(args.input_nbr/6)
# Dataset = "Mnist"
# # (image_train, label_train), (image_test, label_test) = load_mnist()
# (image_train, label_train), (image_test, label_test) = (tf.keras.datasets.mnist.load_data())
# #select the 0s and 1s as the two classes from MNIST data
# image_test_filtered = []
# label_test_filtered = []
# for i in range(0,input_nbr):
# # if (label_train[i] == 1 or label_train[i] == 0):
# image_test_filtered.append(image_test[i])
# label_test_filtered.append(label_test[i])
# print("actual input",len(label_test_filtered))
# print(np.bincount(label_test_filtered))
# image_test_filtered = np.array(image_test_filtered)
# label_test_filtered = np.array(label_test_filtered)
#############################
model = nengo.Network(label="My network",)
# Learning params
with model:
# input layer
# picture = nengo.Node(PresentInputWithPause(images, presentation_time, pause_time,0))
picture = nengo.Node(nengo.processes.PresentInput(image_test_filtered, presentation_time=presentation_time))
true_label = nengo.Node(nengo.processes.PresentInput(label_test_filtered, presentation_time=presentation_time))
# true_label = nengo.Node(PresentInputWithPause(labels, presentation_time, pause_time,-1))
input_layer = nengo.Ensemble(**input_neurons_args)
input_conn = nengo.Connection(picture,input_layer.neurons,synapse=None)
#first layer
layer1 = nengo.Ensemble(**layer_1_neurons_args)
# w = nengo.Node(CustomRule_post_v2(**learning_args), size_in=784, size_out=n_neurons)
nengo.Connection(input_layer.neurons, layer1.neurons,transform=last_weight)
p_true_label = nengo.Probe(true_label)
p_layer_1 = nengo.Probe(layer1.neurons)
p_input_layer = nengo.Probe(input_layer.neurons)
#if(not full_log):
# nengo.Node(log)
#############################
step_time = (presentation_time + pause_time)
with nengo.Simulator(model,dt=0.005) as sim:
sim.run(step_time * label_test_filtered.shape[0])
labels = sim.data[p_true_label][:,0]
output_spikes = sim.data[p_layer_1]
n_classes = 10
# rate_data = nengo.synapses.Lowpass(0.1).filtfilt(sim.data[p_layer_1])
predicted_labels = []
true_labels = []
correct_classified = 0
wrong_classified = 0
class_spikes = np.ones((10,1))
for num in range(input_nbr):
#np.sum(sim.data[my_spike_probe] > 0, axis=0)
output_spikes_num = output_spikes[num*int(presentation_time/0.005):(num+1)*int(presentation_time/0.005),:] # 0.350/0.005
num_spikes = np.sum(output_spikes_num > 0, axis=0)
for i in range(n_classes):
sum_temp = 0
count_temp = 0
for j in range(n_neurons):
if((neuron_class[j]) == i) :
sum_temp += num_spikes[j]
count_temp +=1
if(count_temp==0):
class_spikes[i] = 0
else:
class_spikes[i] = sum_temp/count_temp
# print(class_spikes)
k = np.argmax(num_spikes)
# predicted_labels.append(neuron_class[k])
class_pred = np.argmax(class_spikes)
predicted_labels.append(class_pred)
true_class = labels[(num*int(presentation_time/0.005))]
# print(true_class)
# print(class_pred)
# if(neuron_class[k] == true_class):
# correct_classified+=1
# else:
# wrong_classified+=1
if(class_pred == true_class):
correct_classified+=1
else:
wrong_classified+=1
accuracy = correct_classified/ (correct_classified+wrong_classified)*100
print("Accuracy: ", accuracy)
sim.close()
# nni.report_final_result(accuracy)
del weights, sim.data, labels, output_spikes, class_pred, t_data
return accuracy, last_weight
# for tstep in np.arange(0, len(weights), 1):
# tstep = int(tstep)
# print(tstep)
# fig, axes = plt.subplots(1,1, figsize=(3,3))
# for i in range(0,(n_neurons)):
# fig = plt.figure()
# ax1 = fig.add_subplot()
# cax = ax1.matshow(np.reshape(weights[tstep][i],(28,28)),interpolation='nearest', vmax=1, vmin=0)
# fig.colorbar(cax)
# plt.tight_layout()
# fig.savefig(folder+'/weights'+str(tstep)+'.png')
# plt.close('all')
# gen_video(folder, "weights")
# for tstep in np.arange(0, len(weights), 1):
# tstep = int(tstep)
# print(tstep)
# fig, axes = plt.subplots(1,1, figsize=(3,3))
# for i in range(0,(n_neurons)):
# fig = plt.figure()
# ax1 = fig.add_subplot()
# cax = ax1.hist(weights[tstep][i])
# ax1.set_xlim(0,1)
# ax1.set_ylim(0,350)
# plt.tight_layout()
# fig.savefig(folder+'/histogram'+str(tstep)+'.png')
# plt.close('all')
# gen_video(folder, "histogram")
if __name__ == '__main__':
logger = logging.getLogger(__name__)
args = my_args()
print(args.__dict__)
logging.basicConfig(level=logging.DEBUG)
# Fix the seed of all random number generator
seed = 500
random.seed(seed)
np.random.seed(seed)
params = nni.get_next_parameter()
args.g_max = params['g_max']
args.tau_in = params['tau_in']
args.tau_out = params['tau_out']
args.lr = params['lr']
# args.presentation_time = params['presentation_time']
# args.rate_out = params['rate_out']
accuracy, weights = evaluate_mnist_multiple_var(args)
print('accuracy:', accuracy)
# now = time.strftime("%Y%m%d-%H%M%S")
# folder = os.getcwd()+"/MNIST_VDSP"+now
# os.mkdir(folder)
# plt.figure(figsize=(12,10))
# plt.subplot(2, 1, 1)
# plt.title('Input neurons')
# rasterplot(time_points, p_input_layer)
# plt.xlabel("Time [s]")
# plt.ylabel("Neuron index")
# plt.subplot(2, 1, 2)
# plt.title('Output neurons')
# rasterplot(time_points, p_layer_1)
# plt.xlabel("Time [s]")
# plt.ylabel("Neuron index")
# plt.tight_layout()
# plt.savefig(folder+'/raster'+'.png')
# for tstep in np.arange(0, len(weights), 1):
# tstep = int(tstep)
# # tstep = len(weightds) - tstep -1
# print(tstep)
# columns = int(args.n_neurons/5)
# fig, axes = plt.subplots(int(args.n_neurons/columns), int(columns), figsize=(20,25))
# for i in range(0,(args.n_neurons)):
# axes[int(i/columns)][int(i%columns)].matshow(np.reshape(weights[tstep][i],(28,28)),interpolation='nearest', vmax=1, vmin=0)
# plt.tight_layout()
# fig.savefig(folder+'/weights'+str(tstep)+'.png')
# plt.close('all')
# gen_video(folder, "weights")
logger.info('All done.')
|
the-stack_106_20205
|
from ifcopenshell.geom.app import application
from PyQt4 import QtCore, QtGui
class my_app(application):
def __init__(self):
application.__init__(self)
# self.window = my_app.window()
self.window.setWindowTitle("TU Eindhoven IfcOpenShell scripting tool")
self.label = QtGui.QLabel(self.window)
self.label.setGeometry(QtCore.QRect(40, 140, 361, 511))
self.label.setSizePolicy(QtGui.QSizePolicy.Preferred,QtGui.QSizePolicy.Preferred)
self.label.setObjectName("label")
self.label.setText("logo")
myPixmap = QtGui.QPixmap('./tu_logo.png')
self.label.resize(myPixmap.width(),myPixmap.height())
myScaledPixmap = myPixmap.scaled(self.label.size(), QtCore.Qt.KeepAspectRatio)
self.label.setPixmap(myScaledPixmap)
# tb.insertWidget(self.label)
self.window.statusBar().addWidget(self.label)
self.ios_label = QtGui.QLabel(self.window)
self.ios_label.setGeometry(QtCore.QRect(40, 140, 361, 511))
self.ios_label.setSizePolicy(QtGui.QSizePolicy.Preferred,QtGui.QSizePolicy.Preferred)
self.ios_label.setObjectName("label")
self.ios_label.setText("logo")
myPixmap = QtGui.QPixmap('./ifcopenshell.png')
self.ios_label.resize(myPixmap.width(),myPixmap.height())
myScaledPixmap = myPixmap.scaled(self.ios_label.size(), QtCore.Qt.KeepAspectRatio)
self.ios_label.setPixmap(myScaledPixmap)
self.window.statusBar().addWidget(self.ios_label)
my_app().start()
|
the-stack_106_20207
|
"""
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.resnet import ResNet50, ResNet101, ResNet152
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
import pathlib
from scipy.stats import variation
import math
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 64
args_lr = 0.0006
args_model = 'resnet50'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/dl_checkpoints/' + args.tc + '/' + job_name + '_*'
total_epochs = 50
starting_epoch = 0
# first step is to update the PID
pid = os.getpid()
message = job_name + ' pid ' + str(pid) # 'job50 pid 3333'
send_signal.send(args.node, 10002, message)
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[5].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
message = job_name + ' b_end'
send_signal.send(args.node, 10002, message)
model = keras.models.load_model(save_file)
message = job_name + ' c_end'
send_signal.send(args.node, 10002, message)
else:
print('train from start')
model = models.Sequential()
if '50' in args_model:
base_model = ResNet50(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '101' in args_model:
base_model = ResNet101(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '152' in args_model:
base_model = ResNet152(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
#model.add(layers.UpSampling2D((2,2)))
#model.add(layers.UpSampling2D((2,2)))
#model.add(layers.UpSampling2D((2,2)))
model.add(base_model)
model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
batch_time = []
batch_begin = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
message = job_name + ' waste ' + str(epoch_waste_time) # 'job50 waste 100'
if epoch_waste_time > 0:
send_signal.send(args.node, 10002, message)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
pathlib.Path('/scratch/li.baol/dl_checkpoints/'+args.tc+'/').mkdir(parents=True, exist_ok=True)
model.save('/scratch/li.baol/dl_checkpoints/'+args.tc+'/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
message = job_name + ' checkpoint'
send_signal.send(args.node, 10002, message)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
first_epoch_start = 0
batches_per_epoch = math.ceil(y_train.shape[0] / batch_size)
stable_batch = 0
class PrintEpoch(keras.callbacks.Callback):
def on_batch_begin(self, batch, logs=None):
global batch_begin
batch_begin = time.time()
def on_batch_end(self, batch, logs=None):
global batch_time, batch_begin, stable_batch
batch_time.append(float(time.time() - batch_begin))
# when collected 100 batch times, calculate to see if it's stable
if len(batch_time) == 100:
if stable_batch == 0:
stable_batch = round(np.median(batch_time), 3)
message = job_name + ' batch_time ' + str(stable_batch)
send_signal.send(args.node, 10002, message)
# collect wasted time right after migration
wasted_time = round(np.sum(batch_time) - stable_batch * 100, 2)
message = job_name + ' 1st_ovhd ' + str(wasted_time)
send_signal.send(args.node, 10002, message)
batch_time = []
self.remaining_batches -= 100
message = job_name + ' remain_batch ' + str(self.remaining_batches)
send_signal.send(args.node, 10002, message)
def on_epoch_begin(self, epoch, logs=None):
global current_epoch, first_epoch_start
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
if epoch == starting_epoch and args.resume:
first_epoch_start = time.time()
message = job_name + ' d_end'
send_signal.send(args.node, 10002, message)
elif epoch == starting_epoch:
first_epoch_start = time.time()
if epoch == starting_epoch:
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
self.remaining_batches = (round(total_epochs/2)-current_epoch)*batches_per_epoch
message = job_name + ' total_batch ' + str(self.remaining_batches)
send_signal.send(args.node, 10002, message)
message = job_name + ' epoch_begin ' + str(current_epoch)
send_signal.send(args.node, 10002, message)
def on_epoch_end(self, epoch, logs=None):
if epoch == starting_epoch:
first_epoch_time = int(time.time() - first_epoch_start)
message = job_name + ' 1st_epoch ' + str(first_epoch_time)
send_signal.send(args.node, 10002, message)
progress = round((epoch+1) / round(total_epochs/2), 2)
message = job_name + ' completion ' + str(progress)
send_signal.send(args.node, 10002, message)
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
|
the-stack_106_20209
|
#!/usr/bin/python2
# -*- coding: utf-8 -*-
"""
CHOOSING LETTER-COLOR ASSOCIATIONS TASK
O.Colizoli & M.Blasco Oliver, 2019
Outputs a TSV file for sub-101 and sub-201 (Group2 gets Group1's preferences)
"""
### Import Libraries ###
import os, time # for paths and data
from psychopy import core, visual, event, gui, monitors
import random
import numpy as np
import pandas as pd
from IPython import embed as shell
import general_parameters as gp # letter conditions, counterbalancing
debug_mode = False
#########################################
################# TEXTS #################
#########################################
welcome_txt = "Imagine that each letter of the alphabet would be associated with a color.\
\n\nYour task is to pair each letter (left hand side) to the colors presented (right hand side).\
\nThere are NO right or wrong answers, just go with your 'gut' feeling, your preferences or (dis)likes.\
\nYou may change your answers before your final decision.\
\nHowever, you may not choose the same color for more than one letter (each letter should have a unique color).\
\n\nTo make your choice, first press the letter via the keyboard, followed by ENTER.\
\nThereafter, press the number on the keyboard corresponding to the desired color, followed by ENTER.\
\nPress ENTER again, to go to the next letter.\
\n\n[Press the SPACEBAR to begin]"
######################################################
##################### FUNCTIONS #####################
######################################################
def show_input(win,response_input,type_input,options):
keys = []
user_txt = None
all_options= options + ["return","backspace","space"]
while True:
tmp_key = event.getKeys(all_options) # keyList = [a,b,c]
if len(tmp_key)>0:
if tmp_key[0] == "return":
if keys == []: # pressed forst by accident
continue
else:
break
elif tmp_key[0] == "backspace": # fix backspace
try:
keys.pop()
user_txt = ''.join(keys)
response_input.setText(user_txt)
response_input.draw()
win.flip()
except:
continue
user_txt = ''.join(keys)
elif tmp_key[0] == "space":
keys.append('')
else:
keys.append(tmp_key[0])
user_txt = ''.join(keys)
response_input.setText(user_txt)
response_input.draw()
win.flip()
return user_txt
######################################################
##################### PARAMETERS #####################
######################################################
#Get subject number
g = gui.Dlg()
g.addField('Subject Number:')
g.show()
subject_ID = int(g.data[0])
session = 1
# in case GUI doesn't work
# subject_ID = 1
if subject_ID:
# trials
trials = 8
# Stimuli
## LETTER SET DEPENDENT ON SUBJECT NUMBER
letters = gp.letter_sets_lower[np.mod(subject_ID,2)] # lower case, TRAINED set!!
colors = gp.trained_colors
colorcodes = ['1.','2.','3.','4.','5.','6.','7.','8.','9.','10.','11.','12.','13.']
colorcode_input = [1,2,3,4,5,6,7,8,9,10,11,12,13]
colorcode_input_str = ['0','1','2','3','4','5','6','7','8','9','10','11','12','13']
random.shuffle(colors) # shuffle order of colors, not letters
# Build dictionary of colors"
color_dict = {k: 0 for k in colorcode_input}
for k in range(len(color_dict)):
color_dict[k+1] = colors[k]
# Text sizes
cc_size = 24
l_size = 40
cp_size = 22 # radius, so size is 32
choice_size = 46
choice_col_size = 16
### Positions X on screen
# Letters
let_x1 = -500 #-500
let_x2 = let_x1+100 #-400
# Colors number (coln) and color patches (colp)
coln_x1 = -100 #-100
colp_x1 = coln_x1+40 #-60
coln_x2 = coln_x1+150 #50
colp_x2 = coln_x2+40 #90
# Choices (letcol) and number color (letcoln)
letcol_x1 = 350 #350
letcoln_x1 = letcol_x1+25 #375
letcol_x2 = letcol_x1+70 #420
letcoln_x2 = letcol_x2+25 #445
letcol_xnone = np.mean([letcol_x1,letcol_x2])
### Positions Y on screen
# Letters & Colors options
let_col_y1 = np.linspace(220,-140,7).astype(int)
distance_y = np.abs(let_col_y1[0]-let_col_y1[1])
start_let_col_y2 = let_col_y1[0]-(distance_y/2)
end_let_col_y2 = let_col_y1[-1]+(distance_y/2)
let_col_y2 = np.linspace(start_let_col_y2,end_let_col_y2,6).astype(int)
print('Position in y letters and colors:')
print(let_col_y1)
print(let_col_y2)
# Choices (letcol) and number color (letcoln)
choice_y1 = np.linspace(180,-120,7).astype(int)
dist_y_choice = np.abs(choice_y1[0]-choice_y1[1])
start_choice_y2 = choice_y1[0]-(dist_y_choice/2)
end_choice_y2 = choice_y1[-1]+(dist_y_choice/2)
choice_y2 = np.linspace(start_choice_y2,end_choice_y2,6).astype(int)
choice_ynone = choice_y1[-1]-(dist_y_choice/2)
print('Position in y choices:')
print(choice_y1)
print(choice_y2)
print(choice_ynone)
### SIGN POSITIONS
sign_y = let_col_y1[0]+60
ask_y = let_col_y1[-1]-60
input_y = ask_y-40
sign_x_let = np.mean([let_x1,let_x2])
sign_x_col = np.mean([coln_x1,colp_x2])
sign_x_choice = np.mean([letcol_x1,letcol_x2])
# Choosen pairs dictionary - to be filled during the task
pairs_dict = {k: '' for k in letters}
## Create LogFile folder cwd/LogFiles
cwd = os.getcwd()
## output file name with time stamp prevents any overwriting of data
timestr = time.strftime("%Y%m%d-%H%M%S")
# G1
if subject_ID < 200: # make G1 & G2's folders
subject_yoked = subject_ID+100
logfile_dir = os.path.join(cwd,'colors','sub-{}'.format(subject_ID)) # system independent
logfile_dir_g2 = os.path.join(cwd,'colors','sub-{}'.format(subject_ID+100)) # system independent
if not os.path.isdir(logfile_dir):
os.makedirs(logfile_dir)
if not os.path.isdir(logfile_dir_g2):
os.makedirs(logfile_dir_g2)
# no time stamp because extra something to delete when scanning...
output_filename = os.path.join(logfile_dir,'sub-{}_colors.tsv'.format(subject_ID ))
output_filename_g2 = os.path.join(logfile_dir_g2,'sub-{}_colors.tsv'.format(subject_ID+100 )) # G2 gets G1's colors
# G2
else: # save elsewhere in 'prefs' folder
subject_yoked = subject_ID-100
pref_dir = os.path.join(cwd,'LogFiles','sub-{}'.format(subject_ID),'sess-{}'.format(session),'behav') # system independent
if not os.path.isdir(pref_dir):
os.makedirs(pref_dir)
output_filename = os.path.join(pref_dir,'sub-{}_prefs_{}.tsv'.format(subject_ID,timestr ))
header = ['subject','subject_yoked','letter','colorcode','r','g','b']
data_pairs = pd.DataFrame(columns=header) #called DF in other script
# Set-up window:
mon = monitors.Monitor('myMac15', width=gp.screen_width, distance=gp.screen_dist)
mon.setSizePix((gp.scnWidth, gp.scnHeight))
win = visual.Window((gp.scnWidth, gp.scnHeight),color=gp.white,colorSpace='rgb255',monitor=mon,fullscr=not debug_mode,units='pix',allowStencil=True,autoLog=False)
win.setMouseVisible(True)
# Set-up stimuli and timing
instr = visual.TextStim(win, color='black', pos=(0.0, 0.0), wrapWidth=gp.ww )
letter_sign = visual.TextStim(win, text = 'LETTERS:', color='black', pos=(sign_x_let,sign_y), height = 24)
color_sign = visual.TextStim(win, text = 'COLORS:', color='black', pos=(sign_x_col,sign_y), height = 24)
choices_sign = visual.TextStim(win, text = 'YOUR CHOICES:', color='black', pos=(sign_x_choice,sign_y), height = 24)
letter_ask = visual.TextStim(win, text = 'Letter?', color='black', pos=(sign_x_let,ask_y),height=20)
color_ask = visual.TextStim(win, text = 'Color (number)?', color='black', pos=(sign_x_col,ask_y),height=20)
letter_input = visual.TextStim(win, color= 'black', pos=(sign_x_let,input_y),height=30)
color_input = visual.TextStim(win, color= 'black', pos=(sign_x_col,input_y),height=30)
final_choice_ask = visual.TextStim(win, text = 'Continue choosing [ENTER]\nor Final choice [y]', color='black', pos=(sign_x_choice,ask_y), height = 20)
keep_choosing = visual.TextStim(win, text = 'Letters with no color or repeated colors. \nPlease, continue choosing. ', color='black', pos=(sign_x_choice,ask_y-40), height = 20)
# Letters to be trained
letter_01 = visual.TextStim(win, text = letters[0], color='black', font = gp.font_trained, pos=(let_x1,let_col_y1[0]), height = l_size)
letter_02 = visual.TextStim(win, text = letters[1], color='black', font = gp.font_trained, pos=(let_x1,let_col_y1[1]), height = l_size)
letter_03 = visual.TextStim(win, text = letters[2], color='black', font = gp.font_trained, pos=(let_x1,let_col_y1[2]), height = l_size)
letter_04 = visual.TextStim(win, text = letters[3], color='black', font = gp.font_trained, pos=(let_x1,let_col_y1[3]), height = l_size)
letter_05 = visual.TextStim(win, text = letters[4], color='black', font = gp.font_trained, pos=(let_x1,let_col_y1[4]), height = l_size)
letter_06 = visual.TextStim(win, text = letters[5], color='black', font = gp.font_trained, pos=(let_x1,let_col_y1[5]), height = l_size)
letter_07 = visual.TextStim(win, text = letters[6], color='black', font = gp.font_trained, pos=(let_x1,let_col_y1[6]), height = l_size)
letter_08 = visual.TextStim(win, text = letters[7], color='black', font = gp.font_trained, pos=(let_x2,let_col_y2[0]), height = l_size)
letter_09 = visual.TextStim(win, text = letters[8], color='black', font = gp.font_trained, pos=(let_x2,let_col_y2[1]), height = l_size)
letter_10 = visual.TextStim(win, text = letters[9], color='black', font = gp.font_trained, pos=(let_x2,let_col_y2[2]), height = l_size)
letter_11 = visual.TextStim(win, text = letters[10], color='black', font = gp.font_trained, pos=(let_x2,let_col_y2[3]), height = l_size)
letter_12 = visual.TextStim(win, text = letters[11], color='black', font = gp.font_trained, pos=(let_x2,let_col_y2[4]), height = l_size)
letter_13 = visual.TextStim(win, text = letters[12], color='black', font = gp.font_trained, pos=(let_x2,let_col_y2[5]), height = l_size)
letter_holder = [letter_01,letter_02,letter_03,letter_04,letter_05,letter_06,letter_07,
letter_08,letter_09,letter_10,letter_11,letter_12,letter_13]
# Color codes 01-13
colorcode_01 = visual.TextStim(win, text = colorcodes[0], color='black', pos=(coln_x1,let_col_y1[0]), height = cc_size)
colorcode_02 = visual.TextStim(win, text = colorcodes[1], color='black', pos=(coln_x1,let_col_y1[1]), height = cc_size)
colorcode_03 = visual.TextStim(win, text = colorcodes[2], color='black', pos=(coln_x1,let_col_y1[2]), height = cc_size)
colorcode_04 = visual.TextStim(win, text = colorcodes[3], color='black', pos=(coln_x1,let_col_y1[3]), height = cc_size)
colorcode_05 = visual.TextStim(win, text = colorcodes[4], color='black', pos=(coln_x1,let_col_y1[4]), height = cc_size)
colorcode_06 = visual.TextStim(win, text = colorcodes[5], color='black', pos=(coln_x1,let_col_y1[5]), height = cc_size)
colorcode_07 = visual.TextStim(win, text = colorcodes[6], color='black', pos=(coln_x1,let_col_y1[6]), height = cc_size)
colorcode_08 = visual.TextStim(win, text = colorcodes[7], color='black', pos=(coln_x2,let_col_y2[0]), height = cc_size)
colorcode_09 = visual.TextStim(win, text = colorcodes[8], color='black', pos=(coln_x2,let_col_y2[1]), height = cc_size)
colorcode_10 = visual.TextStim(win, text = colorcodes[9], color='black', pos=(coln_x2,let_col_y2[2]), height = cc_size)
colorcode_11 = visual.TextStim(win, text = colorcodes[10], color='black', pos=(coln_x2,let_col_y2[3]), height = cc_size)
colorcode_12 = visual.TextStim(win, text = colorcodes[11], color='black', pos=(coln_x2,let_col_y2[4]), height = cc_size)
colorcode_13 = visual.TextStim(win, text = colorcodes[12], color='black', pos=(coln_x2,let_col_y2[5]), height = cc_size)
colorcode_holder = [colorcode_01,colorcode_02,colorcode_03,colorcode_04,colorcode_05,colorcode_06,colorcode_07,
colorcode_08,colorcode_09,colorcode_10,colorcode_11,colorcode_12,colorcode_13]
# Color circles RGB tones
colorpatch_01 = visual.Circle(win=win,radius=cp_size,fillColor=color_dict[1],fillColorSpace = 'rgb255', pos =(colp_x1,let_col_y1[0]), opacity = 1.0)
colorpatch_02 = visual.Circle(win=win,radius=cp_size,fillColor=color_dict[2],fillColorSpace = 'rgb255', pos =(colp_x1,let_col_y1[1]), opacity = 1.0)
colorpatch_03 = visual.Circle(win=win,radius=cp_size,fillColor=color_dict[3],fillColorSpace = 'rgb255', pos =(colp_x1,let_col_y1[2]), opacity = 1.0)
colorpatch_04 = visual.Circle(win=win,radius=cp_size,fillColor=color_dict[4],fillColorSpace = 'rgb255', pos =(colp_x1,let_col_y1[3]), opacity = 1.0)
colorpatch_05 = visual.Circle(win=win,radius=cp_size,fillColor=color_dict[5],fillColorSpace = 'rgb255', pos =(colp_x1,let_col_y1[4]), opacity = 1.0)
colorpatch_06 = visual.Circle(win=win,radius=cp_size,fillColor=color_dict[6],fillColorSpace = 'rgb255', pos =(colp_x1,let_col_y1[5]), opacity = 1.0)
colorpatch_07 = visual.Circle(win=win,radius=cp_size,fillColor=color_dict[7],fillColorSpace = 'rgb255', pos =(colp_x1,let_col_y1[6]), opacity = 1.0)
colorpatch_08 = visual.Circle(win=win,radius=cp_size,fillColor=color_dict[8],fillColorSpace = 'rgb255', pos =(colp_x2,let_col_y2[0]), opacity = 1.0)
colorpatch_09 = visual.Circle(win=win,radius=cp_size,fillColor=color_dict[9],fillColorSpace = 'rgb255', pos =(colp_x2,let_col_y2[1]), opacity = 1.0)
colorpatch_10 = visual.Circle(win=win,radius=cp_size,fillColor=color_dict[10],fillColorSpace = 'rgb255', pos =(colp_x2,let_col_y2[2]), opacity = 1.0)
colorpatch_11 = visual.Circle(win=win,radius=cp_size,fillColor=color_dict[11],fillColorSpace = 'rgb255', pos =(colp_x2,let_col_y2[3]), opacity = 1.0)
colorpatch_12 = visual.Circle(win=win,radius=cp_size,fillColor=color_dict[12],fillColorSpace = 'rgb255', pos =(colp_x2,let_col_y2[4]), opacity = 1.0)
colorpatch_13 = visual.Circle(win=win,radius=cp_size,fillColor=color_dict[13],fillColorSpace = 'rgb255', pos =(colp_x2,let_col_y2[5]), opacity = 1.0)
colorpatch_holder = [colorpatch_01,colorpatch_02,colorpatch_03,colorpatch_04,colorpatch_05,colorpatch_06,colorpatch_07,
colorpatch_08,colorpatch_09,colorpatch_10,colorpatch_11,colorpatch_12,colorpatch_13]
# Choices holders: colored letter
# pos_choice = [(200,150),(200,100),(200,50),(200,0),(200,-50),(200,-100),(200,-150),
# (250,125),(250,75),(250,25),(250,-25),(250,-75),(250,-125)]
globals() ['choice_{}'.format(letters[0])] = visual.TextStim(win,font = gp.font_trained, pos=(letcol_x1,choice_y1[0]),height=choice_size,colorSpace='rgb255')
globals() ['choice_{}'.format(letters[1])] = visual.TextStim(win,font = gp.font_trained, pos=(letcol_x1,choice_y1[1]),height=choice_size,colorSpace='rgb255')
globals() ['choice_{}'.format(letters[2])] = visual.TextStim(win,font = gp.font_trained, pos=(letcol_x1,choice_y1[2]),height=choice_size,colorSpace='rgb255')
globals() ['choice_{}'.format(letters[3])] = visual.TextStim(win,font = gp.font_trained, pos=(letcol_x1,choice_y1[3]),height=choice_size,colorSpace='rgb255')
globals() ['choice_{}'.format(letters[4])] = visual.TextStim(win,font = gp.font_trained, pos=(letcol_x1,choice_y1[4]),height=choice_size,colorSpace='rgb255')
globals() ['choice_{}'.format(letters[5])] = visual.TextStim(win,font = gp.font_trained, pos=(letcol_x1,choice_y1[5]),height=choice_size,colorSpace='rgb255')
globals() ['choice_{}'.format(letters[6])] = visual.TextStim(win,font = gp.font_trained, pos=(letcol_x1,choice_y1[6]),height=choice_size,colorSpace='rgb255')
globals() ['choice_{}'.format(letters[7])] = visual.TextStim(win,font = gp.font_trained, pos=(letcol_x2,choice_y2[0]),height=choice_size,colorSpace='rgb255')
globals() ['choice_{}'.format(letters[8])] = visual.TextStim(win,font = gp.font_trained, pos=(letcol_x2,choice_y2[1]),height=choice_size,colorSpace='rgb255')
globals() ['choice_{}'.format(letters[9])] = visual.TextStim(win,font = gp.font_trained, pos=(letcol_x2,choice_y2[2]),height=choice_size,colorSpace='rgb255')
globals() ['choice_{}'.format(letters[10])] = visual.TextStim(win,font = gp.font_trained, pos=(letcol_x2,choice_y2[3]),height=choice_size,colorSpace='rgb255')
globals() ['choice_{}'.format(letters[11])] = visual.TextStim(win,font = gp.font_trained, pos=(letcol_x2,choice_y2[4]),height=choice_size,colorSpace='rgb255')
globals() ['choice_{}'.format(letters[12])] = visual.TextStim(win,font = gp.font_trained, pos=(letcol_x2,choice_y2[5]),height=choice_size,colorSpace='rgb255')
choice_None = visual.TextStim(win,text='None',color='black',pos=(letcol_xnone,choice_ynone),height=24)
# Choosen color number per letter
globals() ['choicecode_{}'.format(letters[0])] = visual.TextStim(win,color='black',pos=(letcoln_x1,choice_y1[0]),height=choice_col_size)
globals() ['choicecode_{}'.format(letters[1])] = visual.TextStim(win,color='black',pos=(letcoln_x1,choice_y1[1]),height=choice_col_size)
globals() ['choicecode_{}'.format(letters[2])] = visual.TextStim(win,color='black',pos=(letcoln_x1,choice_y1[2]),height=choice_col_size)
globals() ['choicecode_{}'.format(letters[3])] = visual.TextStim(win,color='black',pos=(letcoln_x1,choice_y1[3]),height=choice_col_size)
globals() ['choicecode_{}'.format(letters[4])] = visual.TextStim(win,color='black',pos=(letcoln_x1,choice_y1[4]),height=choice_col_size)
globals() ['choicecode_{}'.format(letters[5])] = visual.TextStim(win,color='black',pos=(letcoln_x1,choice_y1[5]),height=choice_col_size)
globals() ['choicecode_{}'.format(letters[6])] = visual.TextStim(win,color='black',pos=(letcoln_x1,choice_y1[6]),height=choice_col_size)
globals() ['choicecode_{}'.format(letters[7])] = visual.TextStim(win,color='black',pos=(letcoln_x2,choice_y2[0]),height=choice_col_size)
globals() ['choicecode_{}'.format(letters[8])] = visual.TextStim(win,color='black',pos=(letcoln_x2,choice_y2[1]),height=choice_col_size)
globals() ['choicecode_{}'.format(letters[9])] = visual.TextStim(win,color='black',pos=(letcoln_x2,choice_y2[2]),height=choice_col_size)
globals() ['choicecode_{}'.format(letters[10])] = visual.TextStim(win,color='black',pos=(letcoln_x2,choice_y2[3]),height=choice_col_size)
globals() ['choicecode_{}'.format(letters[11])] = visual.TextStim(win,color='black',pos=(letcoln_x2,choice_y2[4]),height=choice_col_size)
globals() ['choicecode_{}'.format(letters[12])] = visual.TextStim(win,color='black',pos=(letcoln_x2,choice_y2[5]),height=choice_col_size)
# INSTRUCTIONS
instr.setText(welcome_txt)
instr.draw()
win.flip()
event.waitKeys()
# Show signs
letter_sign.setAutoDraw(True)
color_sign.setAutoDraw(True)
choices_sign.setAutoDraw(True)
# Start choosing until they are done
no_final_choice = True
trial=0
while no_final_choice:
#for t in range(trials):
trial += 1
print('##### Trial {} #####'.format(trial))
# Show list of stimuli:
for l in letter_holder:
l.setAutoDraw(True)
for cc in colorcode_holder:
cc.setAutoDraw(True)
for cp in colorpatch_holder:
cp.setAutoDraw(True)
# Ask letter
letter_ask.setAutoDraw(True)
win.flip()
## Record keypresses for letter display
letter_resp = show_input(win,response_input=letter_input,type_input='letter',options=letters) # fix backspace
letter_input.setAutoDraw(True)
print('Letter response: ', letter_resp)
# Ask color:
color_ask.setAutoDraw(True)
win.flip()
## Record keypresses for letter display
color_resp = show_input(win,response_input=color_input,type_input='color',options=colorcode_input_str)
print('Color response: ', color_resp)
color_input.setAutoDraw(True)
# Assign color to letter:
pairs_dict[letter_resp] = color_resp
# Show all choices choice at the right side (mix letter-color)
for let in pairs_dict:
if pairs_dict[let] is not '':
col = int(pairs_dict[let])
# SHow colored letter
if (let in letters) and (col in colorcode_input):
#Colored letter
globals() ['choice_{}'.format(let)].setText(let)
globals() ['choice_{}'.format(let)].setColor(color_dict[col])
globals() ['choice_{}'.format(let)].setAutoDraw(True)
# Color number
globals() ['choicecode_{}'.format(let)].setText(col)
globals() ['choicecode_{}'.format(let)].setAutoDraw(True)
else:
choice_None.setAutoDraw(True)
win.flip()
# Remove 'None' from dictionary
no_let = []
no_col = []
for let, col in pairs_dict.items():
if let not in letters:
print('- {} is not a letter of interest.'.format(let))
no_let.append(let)
if (col not in colorcode_input_str) and (col is not ''):
print('- {} is not a color of interest.'.format(col))
pairs_dict[let] = ''
print('- Now key {} that had non-color {} is empty again.'.format(let,col))
for nl in range(len(no_let)):
pairs_dict.pop(no_let[nl])
print('- Non-letter {} removed from dictionary'.format(no_let[nl]))
# Remove answers
letter_ask.setAutoDraw(False)
color_ask.setAutoDraw(False)
color_input.setAutoDraw(False)
letter_input.setAutoDraw(False)
final_choice_ask.draw()
win.flip()
choice_None.setAutoDraw(False)
keys=[]
final_choice = event.waitKeys(keyList=['y','return'])
print('- final choice:', final_choice[0])
if final_choice[0] == 'y':
#Select choosen colors and check empty ones:
chosen_colors = []
empty = 0
for col in pairs_dict.values():
chosen_colors.append(col)
if col == '':
empty +=1
#Check repeated colors:
seen = set()
uniq = []
duplic = []
for c in chosen_colors:
if c not in seen:
uniq.append(c)
seen.add(c)
else:
duplic.append(c)
# Finish only if there is no empty or repeated colors
if len(duplic)>0 or empty>0:
keep_choosing.draw()
win.flip()
core.wait(2)
else:
no_final_choice = False
# Save data
writer_count = 0
for letter in pairs_dict:
if pairs_dict[letter] is not '':
colornum = int(pairs_dict[letter])
colorRGB = color_dict[colornum]
data_pairs.loc[writer_count] = [
subject_ID, # subject
subject_yoked, # subject yoked to
letter, # letter
colornum, # Color code on the screen
int(colorRGB[0]), #R in 255
int(colorRGB[1]), #G in 255
int(colorRGB[2]), #B in 255
]
data_pairs.to_csv(output_filename,sep='\t')
if subject_ID < 200:
data_pairs.to_csv(output_filename_g2,sep='\t') # G2 gets G1's colors
writer_count += 1
# Close-up
win.close()
core.quit()
|
the-stack_106_20212
|
# -*- encoding: utf-8 -*-
"""
keri.kli.commands.multisig module
"""
import argparse
from hio import help
from hio.base import doing
from keri.app import directing, grouping, indirecting
from keri.app.cli.common import rotating, existing, displaying
logger = help.ogler.getLogger()
parser = argparse.ArgumentParser(description='Begin or join a rotation of a group identifier')
parser.set_defaults(handler=lambda args: rotateGroupIdentifier(args),
transferable=True)
parser.add_argument('--name', '-n', help='Human readable reference', required=True)
parser.add_argument('--group', '-g', help="Human readable environment reference for group identifier", required=True)
rotating.addRotationArgs(parser)
def rotateGroupIdentifier(args):
"""
Performs a rotation on the group identifier specified as an argument. The identifier prefix of the environment
represented by the name parameter must be a member of the group identifier. This command will perform a rotation
of the local identifier if the sequence number of the local identifier is the same as the group identifier sequence
number. It will wait for all other members of the group to acheive the same sequence number (group + 1) and then
publish the signed rotation event for the group identifier to all witnesses and wait for receipts.
Parameters:
args (parseargs): command line parameters
"""
kwa = args.__dict__
rotDoer = GroupMultisigRotate(**kwa)
doers = [rotDoer]
directing.runController(doers=doers, expire=0.0)
class GroupMultisigRotate(doing.DoDoer):
"""
Command line DoDoer to launch the needed coroutines to run launch Multisig rotation.
This DoDoer will remove the multisig coroutine and exit when it recieves a message
that the multisig coroutine has successfully completed a cooperative rotation.
"""
def __init__(self, name, **kwa):
self.hab, doers = existing.setupHabitat(name=name)
self.rotr = grouping.MultiSigGroupDoer(hab=self.hab)
self.msg = kwa
mbd = indirecting.MailboxDirector(hab=self.hab, topics=['/receipt', '/multisig'])
doers.extend([self.rotr, mbd])
self.toRemove = list(doers)
doers.extend([doing.doify(self.rotateDo)])
super(GroupMultisigRotate, self).__init__(doers=doers)
def rotateDo(self, tymth, tock=0.0, **opts):
# enter context
yield self.tock
msg = dict(op=grouping.Ops.rot, reason="Standard Rotation")
msg["group"] = self.msg["group"]
msg["sith"] = self.msg["sith"]
msg["toad"] = self.msg["toad"]
msg["data"] = self.msg["data"]
msg["wits"] = self.msg["witnesses"] if "witnesses" in msg else []
msg["cuts"] = self.msg["witness_cut"] if "witnesse_cut" in msg else []
msg["adds"] = self.msg["witness_add"] if "witnesse_add" in msg else []
self.rotr.msgs.append(msg)
while not self.rotr.cues:
yield self.tock
rep = self.rotr.cues.popleft()
print()
print("Group Identifier Rotation Complete:")
displaying.printIdentifier(self.hab, rep["pre"])
self.remove(self.toRemove)
|
the-stack_106_20213
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPU system metadata and associated tooling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
from tensorflow.contrib.tpu.python.tpu import tpu
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
_PINGING_MASTER_TIMEOUT_IN_MS = 60 * 1000 # 1 min
_RETRY_TIMES = 120
_INITIAL_TPU_SYSTEM_TIMEOUT_IN_MS = 300 * 1000 # 5 mins
_TPU_DEVICE_REG = re.compile(r'.*task:(\d+)/.*device:TPU:(\d+)$')
# _TPUSystemMetadata is used by TPUEstimator to hold TPU configuration,
# including num_cores and num_hosts.
_TPUSystemMetadata = collections.namedtuple('_TPUSystemMetadata', [
'num_cores',
'num_hosts',
'num_of_cores_per_host',
'topology',
'devices',
])
def _query_tpu_system_metadata(master_address, cluster_def=None,
query_topology=False):
"""Automatically detects the TPU system metadata in the system."""
tpu_core_count = 0
devices = []
device_dict = collections.defaultdict(list)
retry_count = 1
while True:
logging.info('Querying Tensorflow master (%s) for TPU system metadata.',
master_address)
try:
with ops.Graph().as_default():
with session_lib.Session(
master_address,
config=get_session_config_with_timeout(
_PINGING_MASTER_TIMEOUT_IN_MS,
cluster_def)) as sess:
devices = sess.list_devices()
for device in devices:
match = _TPU_DEVICE_REG.match(device.name)
if match:
host_id = match.group(1)
core_id = match.group(2)
device_dict[host_id].append(core_id)
tpu_core_count += 1
break
except errors.DeadlineExceededError:
msg = ('Failed to connect to the Tensorflow master. The TPU worker may '
'not be ready (still scheduling) or the Tensorflow master address '
'is incorrect: got (%s).' %
(master_address))
# TODO(xiejw): For local or grpc master we might not need retry logic
# here.
if retry_count <= _RETRY_TIMES:
logging.warning('%s', msg)
logging.warning('Retrying (%d/%d).', retry_count, _RETRY_TIMES)
retry_count += 1
else:
raise ValueError(msg)
num_of_cores_per_host = 0
if tpu_core_count:
num_cores_per_host_set = set(
[len(core_ids) for core_ids in device_dict.values()])
if len(num_cores_per_host_set) != 1:
raise RuntimeError(
'TPU cores on each host is not same. This should not happen!. '
'devices: {}'.format(devices))
num_of_cores_per_host = num_cores_per_host_set.pop()
topology = None
if query_topology:
if not tpu_core_count:
raise RuntimeError(
'Cannot find any TPU cores in the system (master address {}). '
'This usually means the master address is incorrect or the '
'TPU worker has some problems. Available devices: {}'.format(
master_address, devices))
topology = _obtain_topology(master_address, cluster_def)
metadata = _TPUSystemMetadata(
num_cores=tpu_core_count,
num_hosts=len(device_dict),
num_of_cores_per_host=num_of_cores_per_host,
topology=topology,
devices=devices)
if tpu_core_count:
logging.info('Found TPU system:')
logging.info('*** Num TPU Cores: %d', metadata.num_cores)
logging.info('*** Num TPU Workers: %d', metadata.num_hosts)
logging.info('*** Num TPU Cores Per Worker: %d',
metadata.num_of_cores_per_host)
for device in metadata.devices:
logging.info('*** Available Device: %s', device)
else:
logging.info('Failed to find TPU: %s', metadata)
return metadata
def _obtain_topology(master_address, cluster_def):
"""Obtains TPU fabric topology."""
try:
logging.info('Initializing TPU system (master: %s) to fetch topology '
'for model parallelism. This might take a while.',
master_address)
with ops.Graph().as_default():
session_config = get_session_config_with_timeout(
_INITIAL_TPU_SYSTEM_TIMEOUT_IN_MS, cluster_def)
with session_lib.Session(
master_address, config=session_config) as sess:
topology = sess.run(tpu.initialize_system())
return topology
except errors.DeadlineExceededError:
raise ValueError(
'Fail to initialize TPU system with master (%s). '
'Please double check the TPU system is functional.' % (
master_address))
def get_session_config_with_timeout(timeout_in_secs, cluster_def):
"""Returns a session given a timeout and a cluster configuration."""
config = config_pb2.ConfigProto(
operation_timeout_in_ms=timeout_in_secs, cluster_def=cluster_def)
return config
|
the-stack_106_20219
|
title = 'Addition of CH3 across a double bond in CH2O'
description = \
"""
This example illustrates how more complex explorer jobs work. In this case the source channel involves two reactants
and since CH3 can add across the double bond two ways this results in two pressure dependent networks.
"""
database(
thermoLibraries = ['primaryThermoLibrary'],
reactionLibraries = [],
kineticsDepositories = ['training'],
kineticsFamilies = 'default',
kineticsEstimator = 'rate rules',
)
species(
label='CH3',
structure=SMILES('[CH3]'),
)
species(
label='CH2O',
structure=SMILES('O=C'),
)
species(
label = 'N2',
structure = SMILES('N#N'),
molecularWeight = (28.04,"g/mol"),
collisionModel = TransportData(sigma=(3.70,'angstrom'), epsilon=(94.9,'K')),
reactive = False
)
pressureDependence(
label = 'CH2O+CH3',
Tmin = (300.0,'K'), Tmax = (1200,'K'), Tcount = 7,
Pmin = (1.0,'atm'), Pmax = (10,'atm'), Pcount = 7,
maximumGrainSize = (0.5,'kcal/mol'),
minimumGrainCount = 500,
method = 'modified strong collision',
interpolationModel = ('pdeparrhenius'),
activeKRotor = True,
rmgmode = False,
)
explorer(
source=['CH3','CH2O'],
explore_tol=1.0e-1,
bathGas={'N2':1.0},
maximumRadicalElectrons=1,
)
|
the-stack_106_20220
|
from path import path_code_dir
import sys
sys.path.insert(0, path_code_dir)
import numpy as np
from scipy import sparse
import cv2
from pymatreader import read_mat
# from extract_graph import dic_to_sparse
from amftrack.pipeline.functions.image_processing.extract_graph import (
generate_skeleton,
)
from amftrack.pipeline.functions.image_processing.extract_graph import (
from_sparse_to_graph,
generate_nx_graph,
prune_graph,
clean_degree_4,
)
from amftrack.util import get_dates_datetime, get_dirname
import scipy.sparse
import scipy.io as sio
from amftrack.pipeline.paths.directory import directory_scratch
import pandas as pd
i = int(sys.argv[-1])
op_id = int(sys.argv[-2])
threshold = float(sys.argv[1])
directory = str(sys.argv[2])
run_info = pd.read_json(f'{directory_scratch}temp/{op_id}.json')
folder_list = list(run_info['folder'])
folder_list.sort()
directory_name = folder_list[i]
path_snap = directory + directory_name
skel = read_mat(path_snap + "/Analysis/skeleton_masked.mat")["skeleton"]
skeleton = scipy.sparse.dok_matrix(skel)
# nx_graph_poss=[generate_nx_graph(from_sparse_to_graph(skeleton)) for skeleton in skels_aligned]
# nx_graphs_aligned=[nx_graph_pos[0] for nx_graph_pos in nx_graph_poss]
# poss_aligned=[nx_graph_pos[1] for nx_graph_pos in nx_graph_poss]
# nx_graph_pruned=[clean_degree_4(prune_graph(nx_graph),poss_aligned[i])[0] for i,nx_graph in enumerate(nx_graphs_aligned)]
nx_graph, pos = generate_nx_graph(from_sparse_to_graph(skeleton))
nx_graph_pruned = clean_degree_4(prune_graph(nx_graph, threshold), pos)[0]
skeleton = generate_skeleton(nx_graph_pruned, (30000, 60000))
skel = scipy.sparse.csc_matrix(skeleton, dtype=np.uint8)
sio.savemat(path_snap + "/Analysis/skeleton_pruned.mat", {"skeleton": skel})
dim = skel.shape
kernel = np.ones((5, 5), np.uint8)
itera = 1
sio.savemat(
path_snap + "/Analysis/skeleton_pruned_compressed.mat",
{
"skeleton": cv2.resize(
cv2.dilate(skel.todense(), kernel, iterations=itera),
(dim[1] // 5, dim[0] // 5),
)
},
)
|
the-stack_106_20221
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""File wrangling."""
import fnmatch
import ntpath
import os
import os.path
import posixpath
import re
import sys
from coverage import env
from coverage.backward import unicode_class
from coverage.misc import contract, CoverageException, join_regex, isolate_module
os = isolate_module(os)
def set_relative_directory():
"""Set the directory that `relative_filename` will be relative to."""
global RELATIVE_DIR, CANONICAL_FILENAME_CACHE
# The absolute path to our current directory.
RELATIVE_DIR = os.path.normcase(abs_file(os.curdir) + os.sep)
# Cache of results of calling the canonical_filename() method, to
# avoid duplicating work.
CANONICAL_FILENAME_CACHE = {}
def relative_directory():
"""Return the directory that `relative_filename` is relative to."""
return RELATIVE_DIR
@contract(returns='unicode')
def relative_filename(filename):
"""Return the relative form of `filename`.
The file name will be relative to the current directory when the
`set_relative_directory` was called.
"""
fnorm = os.path.normcase(filename)
if fnorm.startswith(RELATIVE_DIR):
filename = filename[len(RELATIVE_DIR):]
return unicode_filename(filename)
@contract(returns='unicode')
def canonical_filename(filename):
"""Return a canonical file name for `filename`.
An absolute path with no redundant components and normalized case.
"""
if filename not in CANONICAL_FILENAME_CACHE:
if not os.path.isabs(filename):
for path in [os.curdir] + sys.path:
if path is None:
continue
f = os.path.join(path, filename)
try:
exists = os.path.exists(f)
except UnicodeError:
exists = False
if exists:
filename = f
break
cf = abs_file(filename)
CANONICAL_FILENAME_CACHE[filename] = cf
return CANONICAL_FILENAME_CACHE[filename]
def flat_rootname(filename):
"""A base for a flat file name to correspond to this file.
Useful for writing files about the code where you want all the files in
the same directory, but need to differentiate same-named files from
different directories.
For example, the file a/b/c.py will return 'a_b_c_py'
"""
name = ntpath.splitdrive(filename)[1]
return re.sub(r"[\\/.:]", "_", name)
if env.WINDOWS:
_ACTUAL_PATH_CACHE = {}
_ACTUAL_PATH_LIST_CACHE = {}
def actual_path(path):
"""Get the actual path of `path`, including the correct case."""
if env.PY2 and isinstance(path, unicode_class):
path = path.encode(sys.getfilesystemencoding())
if path in _ACTUAL_PATH_CACHE:
return _ACTUAL_PATH_CACHE[path]
head, tail = os.path.split(path)
if not tail:
# This means head is the drive spec: normalize it.
actpath = head.upper()
elif not head:
actpath = tail
else:
head = actual_path(head)
if head in _ACTUAL_PATH_LIST_CACHE:
files = _ACTUAL_PATH_LIST_CACHE[head]
else:
try:
files = os.listdir(head)
except OSError:
files = []
_ACTUAL_PATH_LIST_CACHE[head] = files
normtail = os.path.normcase(tail)
for f in files:
if os.path.normcase(f) == normtail:
tail = f
break
actpath = os.path.join(head, tail)
_ACTUAL_PATH_CACHE[path] = actpath
return actpath
else:
def actual_path(filename):
"""The actual path for non-Windows platforms."""
return filename
if env.PY2:
@contract(returns='unicode')
def unicode_filename(filename):
"""Return a Unicode version of `filename`."""
if isinstance(filename, str):
encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
filename = filename.decode(encoding, "replace")
return filename
else:
@contract(filename='unicode', returns='unicode')
def unicode_filename(filename):
"""Return a Unicode version of `filename`."""
return filename
@contract(returns='unicode')
def abs_file(filename):
"""Return the absolute normalized form of `filename`."""
path = os.path.expandvars(os.path.expanduser(filename))
try:
path = os.path.realpath(path)
except UnicodeError:
pass
path = os.path.abspath(path)
path = actual_path(path)
path = unicode_filename(path)
return path
RELATIVE_DIR = None
CANONICAL_FILENAME_CACHE = None
set_relative_directory()
def isabs_anywhere(filename):
"""Is `filename` an absolute path on any OS?"""
return ntpath.isabs(filename) or posixpath.isabs(filename)
def prep_patterns(patterns):
"""Prepare the file patterns for use in a `FnmatchMatcher`.
If a pattern starts with a wildcard, it is used as a pattern
as-is. If it does not start with a wildcard, then it is made
absolute with the current directory.
If `patterns` is None, an empty list is returned.
"""
prepped = []
for p in patterns or []:
if p.startswith(("*", "?")):
prepped.append(p)
else:
prepped.append(abs_file(p))
return prepped
class TreeMatcher(object):
"""A matcher for files in a tree.
Construct with a list of paths, either files or directories. Paths match
with the `match` method if they are one of the files, or if they are
somewhere in a subtree rooted at one of the directories.
"""
def __init__(self, paths):
self.paths = list(paths)
def __repr__(self):
return "<TreeMatcher %r>" % self.paths
def info(self):
"""A list of strings for displaying when dumping state."""
return self.paths
def match(self, fpath):
"""Does `fpath` indicate a file in one of our trees?"""
for p in self.paths:
if fpath.startswith(p):
if fpath == p:
# This is the same file!
return True
if fpath[len(p)] == os.sep:
# This is a file in the directory
return True
return False
class ModuleMatcher(object):
"""A matcher for modules in a tree."""
def __init__(self, module_names):
self.modules = list(module_names)
def __repr__(self):
return "<ModuleMatcher %r>" % (self.modules)
def info(self):
"""A list of strings for displaying when dumping state."""
return self.modules
def match(self, module_name):
"""Does `module_name` indicate a module in one of our packages?"""
if not module_name:
return False
for m in self.modules:
if module_name.startswith(m):
if module_name == m:
return True
if module_name[len(m)] == '.':
# This is a module in the package
return True
return False
class FnmatchMatcher(object):
"""A matcher for files by file name pattern."""
def __init__(self, pats):
self.pats = pats[:]
# fnmatch is platform-specific. On Windows, it does the Windows thing
# of treating / and \ as equivalent. But on other platforms, we need to
# take care of that ourselves.
fnpats = (fnmatch.translate(p) for p in pats)
# Python3.7 fnmatch translates "/" as "/", before that, it translates as "\/",
# so we have to deal with maybe a backslash.
fnpats = (re.sub(r"\\?/", r"[\\\\/]", p) for p in fnpats)
flags = 0
if env.WINDOWS:
# Windows is also case-insensitive, so make the regex case-insensitive.
flags |= re.IGNORECASE
self.re = re.compile(join_regex(fnpats), flags=flags)
def __repr__(self):
return "<FnmatchMatcher %r>" % self.pats
def info(self):
"""A list of strings for displaying when dumping state."""
return self.pats
def match(self, fpath):
"""Does `fpath` match one of our file name patterns?"""
return self.re.match(fpath) is not None
def sep(s):
"""Find the path separator used in this string, or os.sep if none."""
sep_match = re.search(r"[\\/]", s)
if sep_match:
the_sep = sep_match.group(0)
else:
the_sep = os.sep
return the_sep
class PathAliases(object):
"""A collection of aliases for paths.
When combining data files from remote machines, often the paths to source
code are different, for example, due to OS differences, or because of
serialized checkouts on continuous integration machines.
A `PathAliases` object tracks a list of pattern/result pairs, and can
map a path through those aliases to produce a unified path.
"""
def __init__(self):
self.aliases = []
def pprint(self): # pragma: debugging
"""Dump the important parts of the PathAliases, for debugging."""
for regex, result, _, _ in self.aliases:
print("{0!r} --> {1!r}".format(regex.pattern, result))
def add(self, pattern, result):
"""Add the `pattern`/`result` pair to the list of aliases.
`pattern` is an `fnmatch`-style pattern. `result` is a simple
string. When mapping paths, if a path starts with a match against
`pattern`, then that match is replaced with `result`. This models
isomorphic source trees being rooted at different places on two
different machines.
`pattern` can't end with a wildcard component, since that would
match an entire tree, and not just its root.
"""
if len(pattern) > 1:
pattern = pattern.rstrip(r"\/")
# The pattern can't end with a wildcard component.
if pattern.endswith("*"):
raise CoverageException("Pattern must not end with wildcards.")
pattern_sep = sep(pattern)
# The pattern is meant to match a filepath. Let's make it absolute
# unless it already is, or is meant to match any prefix.
if not pattern.startswith('*') and not isabs_anywhere(pattern):
pattern = abs_file(pattern)
if not pattern.endswith(pattern_sep):
pattern += pattern_sep
# Make a regex from the pattern. fnmatch always adds a \Z to
# match the whole string, which we don't want, so we remove the \Z.
# While removing it, we only replace \Z if followed by paren, or at
# end, to keep from destroying a literal \Z in the pattern.
regex_pat = fnmatch.translate(pattern)
regex_pat = re.sub(r'\\Z(\(|$)', r'\1', regex_pat)
# We want */a/b.py to match on Windows too, so change slash to match
# either separator.
regex_pat = regex_pat.replace(r"\/", r"[\\/]")
# We want case-insensitive matching, so add that flag.
regex = re.compile(r"(?i)" + regex_pat)
# Normalize the result: it must end with a path separator.
result_sep = sep(result)
result = result.rstrip(r"\/") + result_sep
self.aliases.append((regex, result, pattern_sep, result_sep))
def map(self, path):
"""Map `path` through the aliases.
`path` is checked against all of the patterns. The first pattern to
match is used to replace the root of the path with the result root.
Only one pattern is ever used. If no patterns match, `path` is
returned unchanged.
The separator style in the result is made to match that of the result
in the alias.
Returns the mapped path. If a mapping has happened, this is a
canonical path. If no mapping has happened, it is the original value
of `path` unchanged.
"""
for regex, result, pattern_sep, result_sep in self.aliases:
m = regex.match(path)
if m:
new = path.replace(m.group(0), result)
if pattern_sep != result_sep:
new = new.replace(pattern_sep, result_sep)
new = canonical_filename(new)
return new
return path
def find_python_files(dirname):
"""Yield all of the importable Python files in `dirname`, recursively.
To be importable, the files have to be in a directory with a __init__.py,
except for `dirname` itself, which isn't required to have one. The
assumption is that `dirname` was specified directly, so the user knows
best, but sub-directories are checked for a __init__.py to be sure we only
find the importable files.
"""
for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dirname)):
if i > 0 and '__init__.py' not in filenames:
# If a directory doesn't have __init__.py, then it isn't
# importable and neither are its files
del dirnames[:]
continue
for filename in filenames:
# We're only interested in files that look like reasonable Python
# files: Must end with .py or .pyw, and must not have certain funny
# characters that probably mean they are editor junk.
if re.match(r"^[^.#~!$@%^&*()+=,]+\.pyw?$", filename):
yield os.path.join(dirpath, filename)
|
the-stack_106_20222
|
from contextlib import closing
import argparse
import json
import urllib2
import requests
import cStringIO
from PIL import Image
from PIL import ImageDraw
def highlight_faces(image_url, faces, output_filename):
"""Draws a polygon around the faces, then saves to output_filename.
Args:
image_url: a URL containing an image
faces: a list of faces found in the file. This should be in the format
returned by the Vision API.
output_filename: the name of the image file to be created, where the
faces have polygons drawn around them.
"""
# Context manager for urllib2.urlopen requires `contextlib` library
# Source: http://stackoverflow.com/a/14849166/234233
with closing(urllib2.urlopen(image_url)) as img:
if img.headers.maintype != "image":
raise TypeError("Invalid filetype given")
# Source: http://stackoverflow.com/a/7391991/234233
img_file = cStringIO.StringIO(img.read())
im = Image.open(img_file)
draw = ImageDraw.Draw(im)
for face in faces["responses"][0]["faceAnnotations"]:
box = [(v.get("x", 0.0), v.get("y", 0.0)) for v in
face["boundingPoly"]["vertices"]]
draw.line(box + [box[0]], width=5, fill="#00ff00")
del draw
im.save(output_filename)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Detects faces in the given image."
)
parser.add_argument(
"-i", "--image_url",
help="The image URL to send to Google Cloud Vision API ",
required=True
)
parser.add_argument(
"-m", "--max_results",
help="the max number of entities to detect. Default: %(default)s",
default=4,
type=int
)
parser.add_argument(
"-e", "--endpoint",
help="The API Gateway endpoint to use",
required=True
)
parser.add_argument(
"-o", "--output",
help="The filename of the output image. Default: %(default)s",
default="images/highlighted-faces.jpg"
)
args = parser.parse_args()
post_params = {
"image_url": args.image_url,
"detect_type": "FACE_DETECTION",
"max_results": args.max_results
}
# Lazy and used requests in addition to urllib2
r = requests.post(args.endpoint,
data=json.dumps(post_params),
headers={'content-type': 'application/json'})
detection_results = r.json()
highlight_faces(args.image_url, detection_results, args.output)
|
the-stack_106_20223
|
import psycopg2
from psycopg2.extras import execute_values
import fastkml as fk
import shapely.wkt
from shapely.geometry.point import Point
import sys
if len(sys.argv) < 3:
print("Usage: python load_takeout.py <userid> <Location History.json>")
sys.exit(1)
userid, location_file = sys.argv[1], sys.argv[2]
print(f"Loading {location_file} data for userid {userid}")
conn = psycopg2.connect("dbname=covid19 user=covid19 port=5434\
password=covid19databasepassword host=localhost")
k = fk.KML()
k.from_string(open(location_file).read())
doc = list(k.features())[0]
values = []
cur = conn.cursor()
for point in doc.features():
if type(point.geometry) != shapely.geometry.point.Point:
continue
start = point.begin.strftime('%Y-%m-%dT%H:%M:%S')
end = point.end.strftime('%Y-%m-%dT%H:%M:%S')
print(point.address,
'|',
point.name,
'|',
point.begin.strftime('%Y-%m-%dT%H:%M:%S'),
point.end.strftime('%Y-%m-%dT%H:%M:%S'),
point.geometry)
geopoint = Point(point.geometry.x, point.geometry.y)
geo = shapely.wkt.dumps(geopoint)
geopoint = f"SRID=4326;{geo}"
#values.append(
value = (userid, start, end, geopoint, point.name, point.address)
#)
cur.execute("INSERT INTO\
location_history(userid, start_time, end_time, \
geographic_location, semantic_location, address) \
VALUES %s", [value])
conn.commit()
|
the-stack_106_20224
|
import unittest
from unittest import mock
from betfairlightweight.resources.baseresource import BaseResource
from betfairlightweight.streaming.cache import (
OrderBookCache,
OrderBookRunner,
UnmatchedOrder,
MarketBookCache,
RunnerBook,
Available,
)
from betfairlightweight.exceptions import CacheError
from tests.unit.tools import create_mock_json
class TestAvailable(unittest.TestCase):
def setUp(self):
self.prices = [[1, 1.02, 34.45], [0, 1.01, 12]]
self.available = Available(self.prices, 2)
def test_init(self):
assert self.available.prices == self.prices
assert self.available.deletion_select == 2
assert self.available.reverse is False
def test_sort(self):
self.available.sort()
assert self.available.prices == self.prices
assert self.available.serialise == [
{"price": 1.01, "size": 12},
{"price": 1.02, "size": 34.45},
]
def test_sort_short(self):
current = [[27, 0.95], [13, 28.01], [1.02, 1157.21]]
available = Available(current, 1)
assert available.serialise == [
{"price": 1.02, "size": 1157.21},
{"price": 13, "size": 28.01},
{"price": 27, "size": 0.95},
]
def test_clear(self):
self.available.clear()
assert self.available.prices == []
def test_update_available_new_update(self):
# [price, size]
book_update = [[30, 6.9]]
current = [[27, 0.95], [13, 28.01], [1.02, 1157.21]]
expected = [[1.02, 1157.21], [13, 28.01], [27, 0.95], [30, 6.9]]
available = Available(current, 1)
available.update(book_update)
assert current == expected
book_update = [[30, 6.9], [1.01, 12]]
current = [[27, 0.95], [13, 28.01], [1.02, 1157.21]]
expected = [[1.01, 12], [1.02, 1157.21], [13, 28.01], [27, 0.95], [30, 6.9]]
available = Available(current, 1)
available.update(book_update)
assert current == expected
# [position, price, size]
book_update = [[0, 36, 0.57]]
current = []
expected = [[0, 36, 0.57]]
available = Available(current, 2)
available.update(book_update)
assert available.prices == expected
def test_update_available_new_replace(self):
# [price, size]
book_update = [[27, 6.9]]
current = [[27, 0.95], [13, 28.01], [1.02, 1157.21]]
expected = [[1.02, 1157.21], [13, 28.01], [27, 6.9]]
available = Available(current, 1)
available.update(book_update)
assert current == expected
# [position, price, size]
book_update = [[0, 36, 0.57]]
current = [[0, 36, 10.57], [1, 38, 3.57]]
expected = [[0, 36, 0.57], [1, 38, 3.57]]
available = Available(current, 2)
available.update(book_update)
assert current == expected
# tests handling of betfair bug, http://forum.bdp.betfair.com/showthread.php?t=3351
book_update = [[2, 0, 0], [1, 1.01, 9835.74], [0, 1.02, 1126.22]]
current = [[1, 1.01, 9835.74], [0, 1.02, 1126.22]]
expected = [[0, 1.02, 1126.22], [1, 1.01, 9835.74]]
available = Available(current, 2)
available.update(book_update)
assert current == expected
def test_update_available_new_remove(self):
book_update = [[27, 0]]
current = [[27, 0.95], [13, 28.01], [1.02, 1157.21]]
expected = [[1.02, 1157.21], [13, 28.01]]
available = Available(current, 1)
available.update(book_update)
assert current == expected
# [position, price, size]
book_update = [[0, 36, 0], [1, 38, 0], [0, 38, 3.57]]
current = [[0, 36, 10.57], [1, 38, 3.57]]
expected = [[0, 38, 3.57]]
available = Available(current, 2)
available.update(book_update)
assert current == expected
class TestMarketBookCache(unittest.TestCase):
def setUp(self):
self.market_book_cache = MarketBookCache(
**{"marketDefinition": {"runners": {}}}
)
def test_error(self):
with self.assertRaises(CacheError):
self.market_book_cache = MarketBookCache()
@mock.patch("betfairlightweight.streaming.cache.MarketBookCache.strip_datetime")
def test_update_cache_md(self, mock_strip_datetime):
publish_time = mock.Mock()
market_change = create_mock_json("tests/resources/streaming_mcm_UPDATE_md.json")
book_data = market_change.json().get("mc")
for book in book_data:
self.market_book_cache.update_cache(book, publish_time)
mock_strip_datetime.assert_called_with(publish_time)
assert self.market_book_cache.market_definition == book.get(
"marketDefinition"
)
self.assertEqual(self.market_book_cache.streaming_update, book)
@mock.patch("betfairlightweight.streaming.cache.MarketBookCache.strip_datetime")
def test_update_cache_tv(self, mock_strip_datetime):
publish_time = mock.Mock()
market_change = create_mock_json("tests/resources/streaming_mcm_UPDATE_tv.json")
book_data = market_change.json().get("mc")
for book in book_data:
self.market_book_cache.update_cache(book, publish_time)
mock_strip_datetime.assert_called_with(publish_time)
assert self.market_book_cache.total_matched == book.get("tv")
self.assertEqual(self.market_book_cache.streaming_update, book)
# @mock.patch('betfairlightweight.resources.streamingresources.MarketBookCache.strip_datetime')
# def test_update_cache_rc(self, mock_strip_datetime):
# publish_time = mock.Mock()
# market_change = create_mock_json('tests/resources/streaming_mcm_UPDATE.json')
# book_data = market_change.json().get('mc')
#
# for book in book_data:
# self.market_book_cache.update_cache(book, publish_time)
# mock_strip_datetime.assert_called_with(publish_time)
#
# assert self.market_book_cache.total_matched == book.get('tv')
@mock.patch(
"betfairlightweight.streaming.cache.MarketBookCache.serialise",
new_callable=mock.PropertyMock,
return_value={},
)
@mock.patch("betfairlightweight.streaming.cache.MarketDefinition")
@mock.patch("betfairlightweight.streaming.cache.MarketBook")
def test_create_resource(
self, mock_market_book, mock_market_definition, mock_serialise
):
# lightweight
market_book = self.market_book_cache.create_resource(1234, True)
assert market_book == {
"streaming_update": self.market_book_cache.streaming_update,
"streaming_unique_id": 1234,
"streaming_snap": False,
}
assert market_book == mock_serialise()
# not lightweight
market_book = self.market_book_cache.create_resource(1234, False)
assert market_book == mock_market_book()
@mock.patch(
"betfairlightweight.streaming.cache.MarketBookCache.serialise",
new_callable=mock.PropertyMock,
return_value={},
)
@mock.patch("betfairlightweight.streaming.cache.MarketDefinition")
@mock.patch("betfairlightweight.streaming.cache.MarketBook")
def test_create_resource_snap(self, *_):
market_book = self.market_book_cache.create_resource(1234, True, True)
assert market_book == {
"streaming_update": self.market_book_cache.streaming_update,
"streaming_unique_id": 1234,
"streaming_snap": True,
}
def test_update_runner_dict(self):
assert self.market_book_cache.runner_dict == {}
class Runner:
def __init__(self, selection_id, name, handicap):
self.selection_id = selection_id
self.name = name
self.handicap = handicap
(a, b) = (Runner(123, "a", 1.25), Runner(456, "b", -0.25))
self.market_book_cache.runners = [a, b]
self.market_book_cache._update_runner_dict()
assert self.market_book_cache.runner_dict == {(123, 1.25): a, (456, -0.25): b}
def test_init_multiple_rc(self):
# Initialize data with multiple rc entries for the same selection
data = {"marketDefinition": {"runners": {}}}
data["rc"] = [
{"atb": [[1.01, 200]], "id": 13536143},
{"atl": [[1000.0, 200]], "id": 13536143},
]
market_book_cache = MarketBookCache(**data)
assert len(market_book_cache.runners) == len(market_book_cache.runner_dict)
def test_closed(self):
self.assertFalse(self.market_book_cache.closed)
self.market_book_cache.market_definition = {"status": "CLOSED"}
self.assertTrue(self.market_book_cache.closed)
class TestRunnerBook(unittest.TestCase):
def setUp(self):
self.runner_book = RunnerBook(**{"id": 123})
def test_update_traded(self):
self.mock_traded = mock.Mock()
self.runner_book.traded = self.mock_traded
self.runner_book.update_traded([])
self.mock_traded.clear.assert_called_with()
self.runner_book.update_traded([1, 2])
self.mock_traded.update.assert_called_with([1, 2])
def test_serialise_back(self):
mock_available_to_back = mock.Mock()
mock_available_to_back.prices = True
mock_best_available_to_back = mock.Mock()
mock_best_available_to_back.prices = True
mock_best_display_available_to_back = mock.Mock()
mock_best_display_available_to_back.prices = True
self.runner_book.available_to_back = mock_available_to_back
assert (
self.runner_book.serialise_available_to_back()
== mock_available_to_back.serialise
)
mock_available_to_back.prices = False
self.runner_book.best_available_to_back = mock_best_available_to_back
assert (
self.runner_book.serialise_available_to_back()
== mock_best_available_to_back.serialise
)
mock_best_available_to_back.prices = False
self.runner_book.best_display_available_to_back = (
mock_best_display_available_to_back
)
assert (
self.runner_book.serialise_available_to_back()
== mock_best_display_available_to_back.serialise
)
def test_serialise_lay(self):
mock_available_to_lay = mock.Mock()
mock_available_to_lay.prices = True
mock_best_available_to_lay = mock.Mock()
mock_best_available_to_lay.prices = True
mock_best_display_available_to_lay = mock.Mock()
mock_best_display_available_to_lay.prices = True
self.runner_book.available_to_lay = mock_available_to_lay
assert (
self.runner_book.serialise_available_to_lay()
== mock_available_to_lay.serialise
)
mock_available_to_lay.prices = False
self.runner_book.best_available_to_lay = mock_best_available_to_lay
assert (
self.runner_book.serialise_available_to_lay()
== mock_best_available_to_lay.serialise
)
mock_best_available_to_lay.prices = False
self.runner_book.best_display_available_to_lay = (
mock_best_display_available_to_lay
)
assert (
self.runner_book.serialise_available_to_lay()
== mock_best_display_available_to_lay.serialise
)
def test_empty_serialise(self):
runner_definition = {"bdp": None}
serialise_d = self.runner_book.serialise(runner_definition)
ex = serialise_d["ex"]
# all empty lists
assert all(not ex[a] for a in ex.keys())
sp = serialise_d["sp"]
# all 'None' or empty lists
assert all(not sp[a] for a in sp.keys())
class TestOrderBookCache(unittest.TestCase):
def setUp(self):
self.order_book_cache = OrderBookCache(**{})
self.runner = mock.Mock()
self.runner.selection_id = 10895629
self.runner.handicap = 0
self.runner.serialise_orders = mock.Mock(return_value=[])
self.runner.unmatched_orders = [1]
self.order_book_cache.runners = {(10895629, 0): self.runner}
def test_full_image(self):
self.order_book_cache.runners = {}
mock_response = create_mock_json(
"tests/resources/streaming_ocm_FULL_IMAGE.json"
)
for order_book in mock_response.json().get("oc"):
self.order_book_cache.update_cache(order_book, 1234)
self.assertEqual(self.order_book_cache.streaming_update, order_book)
self.assertEqual(len(self.order_book_cache.runners), 5)
for k, v in self.order_book_cache.runners.items():
self.assertEqual(len(v.unmatched_orders), 1)
def test_update_cache(self):
mock_response = create_mock_json("tests/resources/streaming_ocm_UPDATE.json")
for order_book in mock_response.json().get("oc"):
self.order_book_cache.update_cache(order_book, 1234)
self.assertEqual(self.order_book_cache.streaming_update, order_book)
for order_changes in order_book.get("orc"):
# self.runner.matched_lays.update.assert_called_with(order_changes.get('ml', []))
# self.runner.matched_backs.update.assert_called_with(order_book.get('mb', []))
self.runner.update_unmatched.assert_called_with(
order_changes.get("uo", [])
)
@mock.patch("betfairlightweight.streaming.cache.OrderBookRunner")
def test_update_cache_new(self, mock_order_book_runner):
self.order_book_cache.runners = {(108956, 0): self.runner}
mock_response = create_mock_json("tests/resources/streaming_ocm_UPDATE.json")
for order_book in mock_response.json().get("oc"):
self.order_book_cache.update_cache(order_book, 1234)
self.assertEqual(self.order_book_cache.streaming_update, order_book)
for order_changes in order_book.get("orc"):
mock_order_book_runner.assert_called_with(**order_changes)
def test_update_cache_closed(self):
mock_response = create_mock_json("tests/resources/streaming_ocm_SUB_IMAGE.json")
for order_book in mock_response.json().get("oc"):
self.order_book_cache.update_cache(order_book, 1234)
self.assertEqual(self.order_book_cache.streaming_update, order_book)
self.assertTrue(self.order_book_cache.closed)
@mock.patch(
"betfairlightweight.streaming.cache.OrderBookCache.serialise",
new_callable=mock.PropertyMock,
return_value={},
)
@mock.patch("betfairlightweight.streaming.cache.CurrentOrders")
def test_create_resource(self, mock_current_orders, mock_serialise):
# lightweight
current_orders = self.order_book_cache.create_resource(123, True)
assert current_orders == mock_serialise()
assert current_orders == {
"streaming_update": self.order_book_cache.streaming_update,
"streaming_unique_id": 123,
"streaming_snap": False,
}
# not lightweight
current_orders = self.order_book_cache.create_resource(123, False)
assert current_orders == mock_current_orders()
def test_serialise(self):
mock_runner_one = mock.Mock()
mock_runner_one.serialise_orders.return_value = [1]
mock_runner_two = mock.Mock()
mock_runner_two.serialise_orders.return_value = [2, 3]
self.order_book_cache.runners = {
(123, 0): mock_runner_one,
(123, 1): mock_runner_two,
}
serialised = self.order_book_cache.serialise
assert serialised == {"currentOrders": [1, 2, 3], "moreAvailable": False}
class TestOrderBookRunner(unittest.TestCase):
def setUp(self):
uo = [
{
"id": 1,
"p": "a",
"s": "a",
"side": "a",
"ot": "a",
"pd": "a",
"sm": "a",
"sr": "a",
"sl": "a",
"sc": "a",
"sv": "a",
"rfo": "a",
"rfs": "a",
"status": "a",
},
{
"id": 2,
"p": "b",
"s": "a",
"side": "a",
"ot": "a",
"pd": "a",
"sm": "a",
"sr": "a",
"sl": "a",
"sc": "a",
"sv": "a",
"rfo": "a",
"rfs": "a",
"status": "b",
},
]
self.order_book_runner = OrderBookRunner(
**{"id": 1, "ml": [], "mb": [], "uo": uo}
)
def test_update_unmatched(self):
unmatched_orders = [
{
"id": 2,
"p": "b",
"s": "a",
"side": "a",
"ot": "a",
"pd": "a",
"sm": "a",
"sr": "a",
"sl": "a",
"sc": "a",
"sv": "a",
"rfo": "a",
"rfs": "a",
"status": "c",
}
]
self.order_book_runner.update_unmatched(unmatched_orders)
self.assertEqual(self.order_book_runner.unmatched_orders[1].status, "a")
self.assertEqual(self.order_book_runner.unmatched_orders[2].status, "c")
def test_serialise_orders(self):
mock_order = mock.Mock()
mock_order.id = 123
mock_order_two = mock.Mock()
mock_order_two.id = 456
unmatched_orders = {
mock_order.id: mock_order,
mock_order_two.id: mock_order_two,
}
self.order_book_runner.unmatched_orders = unmatched_orders
def mock_serialise(*args, **kwargs):
unmatched_orders[789] = "SYM"
return
mock_order_two.serialise = mock_serialise
assert len(self.order_book_runner.serialise_orders("1.1")), 2
class TestUnmatchedOrder(unittest.TestCase):
def setUp(self):
order = {
"id": 1,
"p": 2,
"s": 3,
"side": "L",
"status": "E",
"pt": "L",
"ot": "L",
"pd": 8,
"sm": 9,
"sr": 10,
"sl": 11,
"sc": 12,
"sv": 13,
"rfo": 14,
"rfs": 15,
"ld": 16,
"lsrc": 17,
"error": "test",
"md": 4,
}
self.unmatched_order = UnmatchedOrder(**order)
def test_init(self):
assert self.unmatched_order.bet_id == 1
assert self.unmatched_order.price == 2
assert self.unmatched_order.size == 3
assert self.unmatched_order.side == "L"
assert self.unmatched_order.status == "E"
assert self.unmatched_order.persistence_type == "L"
assert self.unmatched_order.order_type == "L"
assert self.unmatched_order.placed_date == BaseResource.strip_datetime(8)
assert self.unmatched_order.size_matched == 9
assert self.unmatched_order.size_remaining == 10
assert self.unmatched_order.size_lapsed == 11
assert self.unmatched_order.size_cancelled == 12
assert self.unmatched_order.size_voided == 13
assert self.unmatched_order.reference_order == 14
assert self.unmatched_order.reference_strategy == 15
assert self.unmatched_order.lapsed_date == BaseResource.strip_datetime(16)
assert self.unmatched_order.lapse_status_reason_code == 17
def test_placed_date_string(self):
now = BaseResource.strip_datetime(8)
assert self.unmatched_order.placed_date_string == now.strftime(
"%Y-%m-%dT%H:%M:%S.%fZ"
)
def test_matched_date_string(self):
now = BaseResource.strip_datetime(4)
assert self.unmatched_order.matched_date_string == now.strftime(
"%Y-%m-%dT%H:%M:%S.%fZ"
)
def test_serialise(self):
assert self.unmatched_order.serialise("1.23", 12345, 0.0) == {
"sizeLapsed": 11,
"persistenceType": "LAPSE",
"sizeRemaining": 10,
"placedDate": "1970-01-01T00:00:00.008000Z",
"sizeVoided": 13,
"sizeCancelled": 12,
"betId": 1,
"customerOrderRef": 14,
"orderType": "LIMIT",
"marketId": "1.23",
"side": "LAY",
"selectionId": 12345,
"bspLiability": None,
"sizeMatched": 9,
"handicap": 0.0,
"averagePriceMatched": 0.0,
"status": "EXECUTABLE",
"customerStrategyRef": 15,
"regulatorCode": None,
"priceSize": {"price": 2, "size": 3},
"matchedDate": "1970-01-01T00:00:00.004000Z",
}
|
the-stack_106_20226
|
# -*- coding: UTF-8 -*-
import logging
import traceback
import simplejson as json
from django.contrib.auth.models import Group
from django.db.models import F
from django.http import HttpResponse
from common.utils.extend_json_encoder import ExtendJSONEncoder
from common.utils.permission import superuser_required
from sql.models import ResourceGroup, ResourceGroupRelations, Users, Instance
from sql.utils.workflow_audit import Audit
logger = logging.getLogger('default')
# 获取资源组列表
@superuser_required
def group(request):
limit = int(request.POST.get('limit'))
offset = int(request.POST.get('offset'))
limit = offset + limit
search = request.POST.get('search', '')
# 全部工单里面包含搜索条件
group_obj = ResourceGroup.objects.filter(group_name__contains=search)
group_count = group_obj.count()
group_list = group_obj[offset:limit].values("group_id", "group_name", "ding_webhook")
# QuerySet 序列化
rows = [row for row in group_list]
result = {"total": group_count, "rows": rows}
# 返回查询结果
return HttpResponse(json.dumps(result, cls=ExtendJSONEncoder, bigint_as_string=True),
content_type='application/json')
# 获取资源组已关联对象信息
def associated_objects(request):
"""
type:(0, '用户'), (1, '实例')
"""
group_id = int(request.POST.get('group_id'))
object_type = request.POST.get('type')
limit = int(request.POST.get('limit'))
offset = int(request.POST.get('offset'))
limit = offset + limit
search = request.POST.get('search', '')
if object_type:
rows_obj = ResourceGroupRelations.objects.filter(group_id=group_id,
object_type=object_type,
object_name__contains=search)
else:
rows_obj = ResourceGroupRelations.objects.filter(group_id=group_id, object_name__contains=search)
count = rows_obj.count()
rows = rows_obj[offset:limit].values('id', 'object_id', 'object_name', 'group_id', 'group_name', 'object_type',
'create_time')
rows = [row for row in rows]
result = {'status': 0, 'msg': 'ok', "total": count, "rows": rows}
return HttpResponse(json.dumps(result, cls=ExtendJSONEncoder), content_type='application/json')
# 获取资源组未关联对象信息
def unassociated_objects(request):
"""
type:(0, '用户'), (1, '实例')
"""
group_id = int(request.POST.get('group_id'))
object_type = int(request.POST.get('object_type'))
associated_object_ids = [object_id['object_id'] for object_id in
ResourceGroupRelations.objects.filter(group_id=group_id,
object_type=object_type).values('object_id')]
if object_type == 0:
rows = Users.objects.exclude(pk__in=associated_object_ids).annotate(object_id=F('pk'),
object_name=F('display')
).values('object_id', 'object_name')
elif object_type == 1:
rows = Instance.objects.exclude(pk__in=associated_object_ids).annotate(object_id=F('pk'),
object_name=F('instance_name')
).values('object_id', 'object_name')
else:
rows = []
rows = [row for row in rows]
result = {'status': 0, 'msg': 'ok', "rows": rows, "total": len(rows)}
return HttpResponse(json.dumps(result), content_type='application/json')
# 获取资源组关联实例列表
def instances(request):
group_name = request.POST.get('group_name')
group_id = ResourceGroup.objects.get(group_name=group_name).group_id
type = request.POST.get('type')
# 先获取资源组关联所有实例列表
instance_ids = [group['object_id'] for group in
ResourceGroupRelations.objects.filter(group_id=group_id, object_type=1).values('object_id')]
# 获取实例信息
instances_ob = Instance.objects.filter(pk__in=instance_ids, type=type).values('id', 'instance_name')
rows = [row for row in instances_ob]
result = {'status': 0, 'msg': 'ok', "data": rows}
return HttpResponse(json.dumps(result), content_type='application/json')
# 添加资源组关联对象
@superuser_required
def addrelation(request):
"""
type:(0, '用户'), (1, '实例')
"""
group_id = int(request.POST.get('group_id'))
object_type = request.POST.get('object_type')
object_list = json.loads(request.POST.get('object_info'))
group_name = ResourceGroup.objects.get(group_id=group_id).group_name
try:
ResourceGroupRelations.objects.bulk_create(
[ResourceGroupRelations(object_id=int(object.split(',')[0]),
object_type=object_type,
object_name=object.split(',')[1],
group_id=group_id,
group_name=group_name) for object in object_list])
result = {'status': 0, 'msg': 'ok'}
except Exception as e:
logger.error(traceback.format_exc())
result = {'status': 1, 'msg': str(e)}
return HttpResponse(json.dumps(result), content_type='application/json')
# 获取资源组的审批流程
def auditors(request):
group_name = request.POST.get('group_name')
workflow_type = request.POST['workflow_type']
result = {'status': 0, 'msg': 'ok', 'data': {'auditors': '', 'auditors_display': ''}}
if group_name:
group_id = ResourceGroup.objects.get(group_name=group_name).group_id
audit_auth_groups = Audit.settings(group_id=group_id, workflow_type=workflow_type)
else:
result['status'] = 1
result['msg'] = '参数错误'
return HttpResponse(json.dumps(result), content_type='application/json')
# 获取权限组名称
if audit_auth_groups:
# 校验配置
for auth_group_id in audit_auth_groups.split(','):
try:
Group.objects.get(id=auth_group_id)
except Exception:
result['status'] = 1
result['msg'] = '审批流程权限组不存在,请重新配置!'
return HttpResponse(json.dumps(result), content_type='application/json')
audit_auth_groups_name = '->'.join(
[Group.objects.get(id=auth_group_id).name for auth_group_id in audit_auth_groups.split(',')])
result['data']['auditors'] = audit_auth_groups
result['data']['auditors_display'] = audit_auth_groups_name
return HttpResponse(json.dumps(result), content_type='application/json')
# 资源组审批流程配置
@superuser_required
def changeauditors(request):
auth_groups = request.POST.get('audit_auth_groups')
group_name = request.POST.get('group_name')
workflow_type = request.POST.get('workflow_type')
result = {'status': 0, 'msg': 'ok', 'data': []}
# 调用工作流修改审核配置
group_id = ResourceGroup.objects.get(group_name=group_name).group_id
audit_auth_groups = [str(Group.objects.get(name=auth_group).id) for auth_group in auth_groups.split(',')]
try:
Audit.change_settings(group_id, workflow_type, ','.join(audit_auth_groups))
except Exception as msg:
logger.error(traceback.format_exc())
result['msg'] = str(msg)
result['status'] = 1
# 返回结果
return HttpResponse(json.dumps(result), content_type='application/json')
|
the-stack_106_20228
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2021, Frank Dornheim <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
import os
from ansible.errors import AnsibleError
from ansible.inventory.data import InventoryData
from ansible_collections.community.general.plugins.inventory.lxd import InventoryModule
HOST_COMPARATIVE_DATA = {
'ansible_connection': 'ssh', 'ansible_host': '10.98.143.199', 'ansible_lxd_os': 'ubuntu', 'ansible_lxd_release': 'focal',
'ansible_lxd_profile': ['default'], 'ansible_lxd_state': 'running', 'ansible_lxd_location': 'Berlin',
'ansible_lxd_vlan_ids': {'my-macvlan': 666}, 'inventory_hostname': 'vlantest', 'inventory_hostname_short': 'vlantest'}
GROUP_COMPARATIVE_DATA = {
'all': [], 'ungrouped': [], 'testpattern': ['vlantest'], 'vlan666': ['vlantest'], 'locationBerlin': ['vlantest'],
'osUbuntu': ['vlantest'], 'releaseFocal': ['vlantest'], 'releaseBionic': [], 'profileDefault': ['vlantest'],
'profileX11': [], 'netRangeIPv4': ['vlantest'], 'netRangeIPv6': ['vlantest']}
GROUP_Config = {
'testpattern': {'type': 'pattern', 'attribute': 'test'},
'vlan666': {'type': 'vlanid', 'attribute': 666},
'locationBerlin': {'type': 'location', 'attribute': 'Berlin'},
'osUbuntu': {'type': 'os', 'attribute': 'ubuntu'},
'releaseFocal': {'type': 'release', 'attribute': 'focal'},
'releaseBionic': {'type': 'release', 'attribute': 'bionic'},
'profileDefault': {'type': 'profile', 'attribute': 'default'},
'profileX11': {'type': 'profile', 'attribute': 'x11'},
'netRangeIPv4': {'type': 'network_range', 'attribute': '10.98.143.0/24'},
'netRangeIPv6': {'type': 'network_range', 'attribute': 'fd42:bd00:7b11:2167:216:3eff::/96'}}
@pytest.fixture
def inventory():
inv = InventoryModule()
inv.inventory = InventoryData()
# Test Values
inv.data = inv.load_json_data('tests/unit/plugins/inventory/fixtures/lxd_inventory.atd') # Load Test Data
inv.groupby = GROUP_Config
inv.prefered_container_network_interface = 'eth'
inv.prefered_container_network_family = 'inet'
inv.filter = 'running'
inv.dump_data = False
return inv
def test_verify_file(tmp_path, inventory):
file = tmp_path / "foobar.lxd.yml"
file.touch()
assert inventory.verify_file(str(file)) is True
def test_verify_file_bad_config(inventory):
assert inventory.verify_file('foobar.lxd.yml') is False
def test_build_inventory_hosts(inventory):
"""Load example data and start the inventoryto test the host generation.
After the inventory plugin has run with the test data, the result of the host is checked."""
inventory._populate()
generated_data = inventory.inventory.get_host('vlantest').get_vars()
eq = True
for key, value in HOST_COMPARATIVE_DATA.items():
if generated_data[key] != value:
eq = False
assert eq
def test_build_inventory_groups(inventory):
"""Load example data and start the inventory to test the group generation.
After the inventory plugin has run with the test data, the result of the host is checked."""
inventory._populate()
generated_data = inventory.inventory.get_groups_dict()
eq = True
for key, value in GROUP_COMPARATIVE_DATA.items():
if generated_data[key] != value:
eq = False
assert eq
def test_build_inventory_groups_with_no_groupselection(inventory):
"""Load example data and start the inventory to test the group generation with groupby is none.
After the inventory plugin has run with the test data, the result of the host is checked."""
inventory.groupby = None
inventory._populate()
generated_data = inventory.inventory.get_groups_dict()
group_comparative_data = {'all': [], 'ungrouped': []}
eq = True
print("data: {0}".format(generated_data))
for key, value in group_comparative_data.items():
if generated_data[key] != value:
eq = False
assert eq
|
the-stack_106_20229
|
import os
import random
import time
from copy import deepcopy
# 打印出全部tensor,不要省略号
import numpy as np
import torch
import yaml
from tensorboardX import SummaryWriter
from tqdm import tqdm
from src.data.data_iterator import DataIterator
from src.data.dataset import TextLineDataset, ZipDataset
from src.data.vocabulary import Vocabulary, BOS, EOS, PAD
from src.decoding.beam_search_middle import beam_search
from src.metric.bleu_scorer import SacreBLEUScorer
from src.models import build_model
from src.modules.criterions import NMTCriterion
from src.optim import Optimizer
from src.optim.lr_scheduler import ReduceOnPlateauScheduler, NoamScheduler
from src.utils.common_utils import *
from src.utils.configs import default_configs, pretty_configs
from src.utils.logging import *
from src.utils.moving_average import MovingAverage
np.set_printoptions(threshold=np.inf)
def set_seed(seed):
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
random.seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
def load_model_parameters(path, map_location="cpu"):
state_dict = torch.load(path, map_location=map_location)
if "model" in state_dict:
return state_dict["model"]
return state_dict
def split_shard(*inputs, split_size=1):
if split_size <= 1:
yield inputs
else:
lengths = [len(s) for s in inputs[-1]] #
sorted_indices = np.argsort(lengths)
# sorting inputs
inputs = [
[inp[ii] for ii in sorted_indices]
for inp in inputs
]
# split shards
total_batch = sorted_indices.shape[0] # total number of batches
if split_size >= total_batch:
yield inputs
else:
shard_size = total_batch // split_size
_indices = list(range(total_batch))[::shard_size] + [total_batch]
for beg, end in zip(_indices[:-1], _indices[1:]):
yield (inp[beg:end] for inp in inputs)
def prepare_data(seqs_x, seqs_y=None, cuda=False, batch_first=True):
"""
Args:
eval ('bool'): indicator for eval/infer.
Returns:
"""
def _np_pad_batch_2D(samples, pad, batch_first=True, cuda=True):
batch_size = len(samples)
sizes = [len(s) for s in samples]
max_size = max(sizes)
# 起初是构造了一个全部为0的数据
x_np = np.full((batch_size, max_size), fill_value=pad, dtype='int64')
for ii in range(batch_size):
x_np[ii, :sizes[ii]] = samples[ii]
if batch_first is False:
x_np = np.transpose(x_np, [1, 0])
x = torch.tensor(x_np)
if cuda is True:
x = x.cuda()
return x
seqs_x = list(map(lambda s: [BOS] + s + [EOS], seqs_x))
x = _np_pad_batch_2D(samples=seqs_x, pad=PAD,
cuda=cuda, batch_first=batch_first)
if seqs_y is None:
return x
seqs_y = list(map(lambda s: [EOS] + s + [EOS], seqs_y))
y = _np_pad_batch_2D(seqs_y, pad=PAD,
cuda=cuda, batch_first=batch_first)
return x, y
def compute_forward(model,
critic,
seqs_x,
seqs_y,
eval=False,
normalization=1.0,
norm_by_words=False
):
"""
:type model: nn.Module
:type critic: NMTCriterion
"""
if not eval:
model.train()
critic.train()
# For training
with torch.enable_grad():
outputs, sorted_trg = model(seqs_x, seqs_y)
loss = critic(inputs=outputs, labels=sorted_trg, reduce=False, normalization=normalization)
if norm_by_words:
# 计算每个词语的loss还是每个句子的loss
words_norm = sorted_trg.ne(PAD).float().sum(1)
loss = loss.div(words_norm).sum()
else:
loss = loss.sum()
torch.autograd.backward(loss)
return loss.item()
else:
model.eval()
critic.eval()
# For compute loss
with torch.no_grad():
log_probs, sorted_trg = model(seqs_x, seqs_y)
loss = critic(inputs=log_probs, labels=sorted_trg, normalization=normalization, reduce=True)
return loss.item()
def loss_validation(model, critic, valid_iterator):
"""
:type model: Transformer
:type critic: NMTCriterion
:type valid_iterator: DataIterator
"""
n_sents = 0
n_tokens = 0.0
sum_loss = 0.0
valid_iter = valid_iterator.build_generator()
for batch in valid_iter:
_, seqs_x, seqs_y = batch
n_sents += len(seqs_x)
n_tokens += sum(len(s) for s in seqs_y)
x, y = prepare_data(seqs_x, seqs_y, cuda=GlobalNames.USE_GPU)
loss = compute_forward(model=model,
critic=critic,
seqs_x=x,
seqs_y=y,
eval=True)
if np.isnan(loss):
WARN("NaN detected!")
sum_loss += float(loss)
return float(sum_loss / n_sents)
def bleu_validation(uidx,
valid_iterator,
model,
bleu_scorer,
vocab_tgt,
batch_size,
valid_dir="./valid",
max_steps=10,
beam_size=5,
alpha=-1.0
):
model.eval()
numbers = []
trans = []
infer_progress_bar = tqdm(total=len(valid_iterator),
desc=' - (Infer) ',
unit="sents")
valid_iter = valid_iterator.build_generator(batch_size=batch_size)
for batch in valid_iter:
seq_nums = batch[0]
numbers += seq_nums
seqs_x = batch[1]
seqs_y = batch[2]
infer_progress_bar.update(len(seqs_x))
x, y = prepare_data(seqs_x, seqs_y, cuda=GlobalNames.USE_GPU)
with torch.no_grad():
word_ids = beam_search(nmt_model=model, beam_size=beam_size, max_steps=max_steps, src_seqs=x, alpha=alpha,
trg_seqs=y)
word_ids = word_ids.cpu().numpy().tolist()
# Append result
for sent_t in word_ids:
sent_t = [[wid for wid in line if wid != PAD] for line in sent_t]
x_tokens = []
for wid in sent_t[0]:
if wid == EOS:
break
x_tokens.append(vocab_tgt.id2token(wid))
if len(x_tokens) > 0:
trans.append(vocab_tgt.tokenizer.detokenize(x_tokens))
else:
trans.append('%s' % vocab_tgt.id2token(EOS))
origin_order = np.argsort(numbers).tolist()
trans = [trans[ii] for ii in origin_order]
infer_progress_bar.close()
if not os.path.exists(valid_dir):
os.mkdir(valid_dir)
hyp_path = os.path.join(valid_dir, 'trans.iter{0}.txt'.format(uidx))
with open(hyp_path, 'w') as f:
for line in trans:
f.write('%s\n' % line)
with open(hyp_path) as f:
bleu_v = bleu_scorer.corpus_bleu(f)
return bleu_v
def load_pretrained_model(nmt_model, pretrain_path, device, exclude_prefix=None):
"""
Args:
nmt_model: model.
pretrain_path ('str'): path to pretrained model.
map_dict ('dict'): mapping specific parameter names to those names
in current model.
exclude_prefix ('dict'): excluding parameters with specific names
for pretraining.
Raises:
ValueError: Size not match, parameter name not match or others.
"""
if exclude_prefix is None:
exclude_prefix = []
if pretrain_path != "":
INFO("Loading pretrained model from {}".format(pretrain_path))
pretrain_params = torch.load(pretrain_path, map_location=device)
for name, params in pretrain_params.items():
flag = False
for pp in exclude_prefix:
if name.startswith(pp):
flag = True
break
if flag:
continue
INFO("Loading param: {}...".format(name))
try:
nmt_model.load_state_dict({name: params}, strict=False)
except Exception as e:
WARN("{}: {}".format(str(Exception), e))
INFO("Pretrained model loaded.")
def train(FLAGS):
"""
FLAGS:
saveto: str
reload: store_true
config_path: str
pretrain_path: str, default=""
model_name: str
log_path: str
"""
# write log of training to file.
write_log_to_file(os.path.join(FLAGS.log_path, "%s.log" % time.strftime("%Y%m%d-%H%M%S")))
GlobalNames.USE_GPU = FLAGS.use_gpu
if not GlobalNames.USE_GPU:
CURRENT_DEVICE = "cpu"
else:
CURRENT_DEVICE = "cuda"
config_path = os.path.abspath(FLAGS.config_path)
with open(config_path.strip()) as f:
configs = yaml.load(f)
INFO(pretty_configs(configs))
# Add default configs
configs = default_configs(configs)
data_configs = configs['data_configs']
model_configs = configs['model_configs']
optimizer_configs = configs['optimizer_configs']
training_configs = configs['training_configs']
GlobalNames.SEED = training_configs['seed']
set_seed(GlobalNames.SEED)
best_model_prefix = os.path.join(FLAGS.saveto, FLAGS.model_name + GlobalNames.MY_BEST_MODEL_SUFFIX)
timer = Timer()
# ================================================================================== #
# Load Data
INFO('Loading data...')
timer.tic()
# Generate target dictionary
vocab_src = Vocabulary(**data_configs["vocabularies"][0])
vocab_tgt = Vocabulary(**data_configs["vocabularies"][1])
train_batch_size = training_configs["batch_size"] * max(1, training_configs["update_cycle"])
train_buffer_size = training_configs["buffer_size"] * max(1, training_configs["update_cycle"])
train_bitext_dataset = ZipDataset(
TextLineDataset(data_path=data_configs['train_data'][0],
vocabulary=vocab_src,
max_len=data_configs['max_len'][0],
),
TextLineDataset(data_path=data_configs['train_data'][1],
vocabulary=vocab_tgt,
max_len=data_configs['max_len'][1],
),
shuffle=training_configs['shuffle']
)
valid_bitext_dataset = ZipDataset(
TextLineDataset(data_path=data_configs['valid_data'][0],
vocabulary=vocab_src,
),
TextLineDataset(data_path=data_configs['valid_data'][1],
vocabulary=vocab_tgt,
)
)
training_iterator = DataIterator(dataset=train_bitext_dataset,
batch_size=train_batch_size,
use_bucket=training_configs['use_bucket'],
buffer_size=train_buffer_size,
batching_func=training_configs['batching_key'])
# 用于bleu计算
valid_iterator = DataIterator(dataset=valid_bitext_dataset,
batch_size=training_configs['valid_batch_size'],
use_bucket=True, buffer_size=100000, numbering=True)
bleu_scorer = SacreBLEUScorer(reference_path=data_configs["bleu_valid_reference"],
num_refs=data_configs["num_refs"],
lang_pair=data_configs["lang_pair"],
sacrebleu_args=training_configs["bleu_valid_configs"]['sacrebleu_args'],
postprocess=training_configs["bleu_valid_configs"]['postprocess']
)
INFO('Done. Elapsed time {0}'.format(timer.toc()))
lrate = optimizer_configs['learning_rate']
is_early_stop = False
# ================================ Begin ======================================== #
# Build Model & Optimizer
# We would do steps below on after another
# 1. build models & criterion
# 2. move models & criterion to gpu if needed
# 3. load pre-trained model if needed
# 4. build optimizer
# 5. build learning rate scheduler if needed
# 6. load checkpoints if needed
# 0. Initial
model_collections = Collections()
checkpoint_saver = Saver(save_prefix="{0}.ckpt".format(os.path.join(FLAGS.saveto, FLAGS.model_name)),
num_max_keeping=training_configs['num_kept_checkpoints']
)
best_model_saver = Saver(save_prefix=best_model_prefix, num_max_keeping=training_configs['num_kept_best_model'])
# 1. Build Model & Criterion
INFO('Building model...')
timer.tic()
nmt_model = build_model(n_src_vocab=vocab_src.max_n_words,
n_tgt_vocab=vocab_tgt.max_n_words, **model_configs)
INFO(nmt_model)
params_total = sum([p.numel() for n, p in nmt_model.named_parameters()])
params_with_embedding = sum([p.numel() for n, p in nmt_model.named_parameters() if n.find('embedding') == -1])
INFO('Total parameters: {}'.format(params_total))
INFO('Total parameters (excluding word embeddings): {}'.format(params_with_embedding))
critic = NMTCriterion(label_smoothing=model_configs['label_smoothing'])
INFO(critic)
INFO('Done. Elapsed time {0}'.format(timer.toc()))
# 2. Move to GPU
if GlobalNames.USE_GPU:
nmt_model = nmt_model.cuda()
critic = critic.cuda()
# 3. Load pretrained model if needed
load_pretrained_model(nmt_model, FLAGS.pretrain_path, exclude_prefix=None, device=CURRENT_DEVICE)
# 4. Build optimizer
INFO('Building Optimizer...')
optim = Optimizer(name=optimizer_configs['optimizer'],
model=nmt_model,
lr=lrate,
grad_clip=optimizer_configs['grad_clip'],
optim_args=optimizer_configs['optimizer_params']
)
# 5. Build scheduler for optimizer if needed
if optimizer_configs['schedule_method'] is not None:
if optimizer_configs['schedule_method'] == "loss":
scheduler = ReduceOnPlateauScheduler(optimizer=optim,
**optimizer_configs["scheduler_configs"]
)
elif optimizer_configs['schedule_method'] == "noam":
scheduler = NoamScheduler(optimizer=optim, **optimizer_configs['scheduler_configs'])
else:
WARN("Unknown scheduler name {0}. Do not use lr_scheduling.".format(optimizer_configs['schedule_method']))
scheduler = None
else:
scheduler = None
# 6. build moving average
if training_configs['moving_average_method'] is not None:
ma = MovingAverage(moving_average_method=training_configs['moving_average_method'],
named_params=nmt_model.named_parameters(),
alpha=training_configs['moving_average_alpha'])
else:
ma = None
INFO('Done. Elapsed time {0}'.format(timer.toc()))
# Reload from latest checkpoint
if FLAGS.reload:
checkpoint_saver.load_latest(model=nmt_model, optim=optim, lr_scheduler=scheduler,
collections=model_collections, ma=ma)
# ================================================================================== #
# Prepare training
eidx = model_collections.get_collection("eidx", [0])[-1]
uidx = model_collections.get_collection("uidx", [0])[-1]
bad_count = model_collections.get_collection("bad_count", [0])[-1]
oom_count = model_collections.get_collection("oom_count", [0])[-1]
summary_writer = SummaryWriter(log_dir=FLAGS.log_path)
cum_samples = 0
cum_words = 0
valid_loss = best_valid_loss = float('inf') # Max Float
# Timer for computing speed
timer_for_speed = Timer()
timer_for_speed.tic()
INFO('Begin training...')
while True:
summary_writer.add_scalar("Epoch", (eidx + 1), uidx)
# Build iterator and progress bar
training_iter = training_iterator.build_generator()
training_progress_bar = tqdm(desc=' - (Epc {}, Upd {}) '.format(eidx, uidx),
total=len(training_iterator),
unit="sents"
)
for batch in training_iter: # batch_size
uidx += 1
if optimizer_configs["schedule_method"] is not None and optimizer_configs["schedule_method"] != "loss":
scheduler.step(global_step=uidx)
seqs_x, seqs_y = batch
n_samples_t = len(seqs_x)
n_words_t = sum(len(s) for s in seqs_y)
cum_samples += n_samples_t
cum_words += n_words_t
train_loss = 0.
optim.zero_grad() # 在每一次运行模型之前使用
try:
# Prepare data for update_cycle
for seqs_x_t, seqs_y_t in split_shard(seqs_x, seqs_y, split_size=training_configs['update_cycle']):
x, y = prepare_data(seqs_x_t, seqs_y_t, cuda=GlobalNames.USE_GPU)
loss = compute_forward(model=nmt_model,
critic=critic,
seqs_x=x,
seqs_y=y,
eval=False,
normalization=n_samples_t,
norm_by_words=training_configs["norm_by_words"])
train_loss += loss / y.size(1)
optim.step() # 在这一步进行后向传播
except RuntimeError as e:
if 'out of memory' in str(e):
print('| WARNING: ran out of memory, skipping batch')
oom_count += 1
optim.zero_grad()
else:
raise e
if ma is not None and eidx >= training_configs['moving_average_start_epoch']:
ma.step()
training_progress_bar.update(n_samples_t)
training_progress_bar.set_description(' - (Epc {}, Upd {}) '.format(eidx, uidx))
training_progress_bar.set_postfix_str(
'TrainLoss: {:.2f}, ValidLoss(best): {:.2f} ({:.2f})'.format(train_loss, valid_loss, best_valid_loss))
summary_writer.add_scalar("train_loss", scalar_value=train_loss, global_step=uidx)
# ================================================================================== #
# Display some information
if should_trigger_by_steps(uidx, eidx, every_n_step=training_configs['disp_freq']):
# words per second and sents per second
words_per_sec = cum_words / (timer.toc(return_seconds=True))
sents_per_sec = cum_samples / (timer.toc(return_seconds=True))
lrate = list(optim.get_lrate())[0]
summary_writer.add_scalar("Speed(words/sec)", scalar_value=words_per_sec, global_step=uidx)
summary_writer.add_scalar("Speed(sents/sen)", scalar_value=sents_per_sec, global_step=uidx)
summary_writer.add_scalar("lrate", scalar_value=lrate, global_step=uidx)
summary_writer.add_scalar("oom_count", scalar_value=oom_count, global_step=uidx)
# Reset timer
timer.tic()
cum_words = 0
cum_samples = 0
# ================================================================================== #
# Saving checkpoints
if should_trigger_by_steps(uidx, eidx, every_n_step=training_configs['save_freq'], debug=FLAGS.debug):
model_collections.add_to_collection("uidx", uidx)
model_collections.add_to_collection("eidx", eidx)
model_collections.add_to_collection("bad_count", bad_count)
if not is_early_stop:
checkpoint_saver.save(global_step=uidx, model=nmt_model, optim=optim, lr_scheduler=scheduler,
collections=model_collections, ma=ma)
# ================================================================================== #
# Loss Validation & Learning rate annealing
if should_trigger_by_steps(global_step=uidx, n_epoch=eidx, every_n_step=training_configs['loss_valid_freq'],
debug=FLAGS.debug):
if ma is not None:
origin_state_dict = deepcopy(nmt_model.state_dict())
nmt_model.load_state_dict(ma.export_ma_params(), strict=False)
valid_loss = loss_validation(model=nmt_model,
critic=critic,
valid_iterator=valid_iterator,
)
model_collections.add_to_collection("history_losses", valid_loss)
min_history_loss = np.array(model_collections.get_collection("history_losses")).min()
summary_writer.add_scalar("loss", valid_loss, global_step=uidx)
summary_writer.add_scalar("best_loss", min_history_loss, global_step=uidx)
best_valid_loss = min_history_loss
if ma is not None:
nmt_model.load_state_dict(origin_state_dict)
del origin_state_dict
if optimizer_configs["schedule_method"] == "loss":
scheduler.step(metric=best_valid_loss)
# ================================================================================== #
# BLEU Validation & Early Stop
if True or should_trigger_by_steps(global_step=uidx, n_epoch=eidx,
every_n_step=training_configs['bleu_valid_freq'],
min_step=training_configs['bleu_valid_warmup'],
debug=FLAGS.debug):
if ma is not None:
origin_state_dict = deepcopy(nmt_model.state_dict())
nmt_model.load_state_dict(ma.export_ma_params(), strict=False)
valid_bleu = bleu_validation(uidx=uidx,
valid_iterator=valid_iterator,
batch_size=training_configs["bleu_valid_batch_size"],
model=nmt_model,
bleu_scorer=bleu_scorer,
vocab_tgt=vocab_tgt,
valid_dir=FLAGS.valid_path,
max_steps=training_configs["bleu_valid_configs"]["max_steps"],
beam_size=training_configs["bleu_valid_configs"]["beam_size"],
alpha=training_configs["bleu_valid_configs"]["alpha"]
)
model_collections.add_to_collection(key="history_bleus", value=valid_bleu)
best_valid_bleu = float(np.array(model_collections.get_collection("history_bleus")).max())
summary_writer.add_scalar("bleu", valid_bleu, uidx)
summary_writer.add_scalar("best_bleu", best_valid_bleu, uidx)
# If model get new best valid bleu score
if valid_bleu >= best_valid_bleu:
bad_count = 0
if is_early_stop is False:
# 1. save the best model
torch.save(nmt_model.state_dict(), best_model_prefix + ".final")
# 2. record all several best models
best_model_saver.save(global_step=uidx, model=nmt_model)
else:
bad_count += 1
# At least one epoch should be traversed
if bad_count >= training_configs['early_stop_patience'] and eidx > 0:
is_early_stop = True
WARN("Early Stop!")
exit(0)
summary_writer.add_scalar("bad_count", bad_count, uidx)
if ma is not None:
nmt_model.load_state_dict(origin_state_dict)
del origin_state_dict
INFO("{0} Loss: {1:.2f} BLEU: {2:.2f} lrate: {3:6f} patience: {4}".format(
uidx, valid_loss, valid_bleu, lrate, bad_count
))
training_progress_bar.close()
eidx += 1
if eidx > training_configs["max_epochs"]:
break
|
the-stack_106_20233
|
# -*- coding: utf-8 -*-
# base16-prompt-toolkit (https://github.com/memeplex/base16-prompt-toolkit)
# Base16 Prompt Toolkit template by Carlos Pita ([email protected]
# Paraiso scheme by Jan T. Sott
from prompt_toolkit.output.vt100 import _256_colors
from pygments.style import Style
from pygments.token import (Keyword, Name, Comment, String, Error, Text,
Number, Operator, Literal, Token)
# See http://chriskempson.com/projects/base16/ for a description of the role
# of the different colors in the base16 palette.
base00 = '#2f1e2e'
base01 = '#41323f'
base02 = '#4f424c'
base03 = '#776e71'
base04 = '#8d8687'
base05 = '#a39e9b'
base06 = '#b9b6b0'
base07 = '#e7e9db'
base08 = '#ef6155'
base09 = '#f99b15'
base0A = '#fec418'
base0B = '#48b685'
base0C = '#5bc4bf'
base0D = '#06b6ef'
base0E = '#815ba4'
base0F = '#e96ba8'
# See https://github.com/jonathanslenders/python-prompt-toolkit/issues/355
colors = (globals()['base0' + d] for d in '08BADEC5379F1246')
for i, color in enumerate(colors):
r, g, b = int(color[1:3], 16), int(color[3:5], 16), int(color[5:], 16)
_256_colors[r, g, b] = i + 6 if i > 8 else i
# See http://pygments.org/docs/tokens/ for a description of the different
# pygments tokens.
class Base16Style(Style):
background_color = base00
highlight_color = base02
default_style = base05
styles = {
Text: base05,
Error: '%s bold' % base08,
Comment: base03,
Keyword: base0E,
Keyword.Constant: base09,
Keyword.Namespace: base0D,
Name.Builtin: base0D,
Name.Function: base0D,
Name.Class: base0D,
Name.Decorator: base0E,
Name.Exception: base08,
Number: base09,
Operator: base0E,
Literal: base0B,
String: base0B
}
# See https://github.com/jonathanslenders/python-prompt-toolkit/blob/master/prompt_toolkit/styles/defaults.py
# for a description of prompt_toolkit related pseudo-tokens.
overrides = {
Token.Prompt: base0B,
Token.PromptNum: '%s bold' % base0B,
Token.OutPrompt: base08,
Token.OutPromptNum: '%s bold' % base08,
Token.Menu.Completions.Completion: 'bg:%s %s' % (base01, base04),
Token.Menu.Completions.Completion.Current: 'bg:%s %s' % (base04, base01),
Token.MatchingBracket.Other: 'bg:%s %s' % (base03, base00)
}
|
the-stack_106_20237
|
import torch
import json
from torch import nn
from .mlp import MLP
from ..constants import CATEGORICAL, LABEL, LOGITS, FEATURES
from typing import Optional, List
from .utils import init_weights
class CategoricalMLP(nn.Module):
"""
MLP for categorical input. The input dimension is automatically computed based on
the number of categories in each categorical column.
"""
def __init__(
self,
prefix: str,
num_categories: List[int],
out_features: Optional[int] = None,
num_layers: Optional[int] = 1,
activation: Optional[str] = "gelu",
dropout_prob: Optional[float] = 0.5,
normalization: Optional[str] = "layer_norm",
num_classes: Optional[int] = 0,
):
"""
Parameters
----------
prefix
The model prefix.
num_categories
A list of integers. Each one is the number of categories in one categorical column.
out_features
Dimension of output features.
num_layers
Number of MLP layers.
activation
Name of activation function.
dropout_prob
Dropout probability.
normalization
Name of normalization function.
num_classes
Number of classes. 1 for a regression task.
"""
super().__init__()
self.out_features = out_features
max_embedding_dim = 100
embed_exponent = 0.56
size_factor = 1.0
self.column_embeddings = nn.ModuleList()
self.column_mlps = nn.ModuleList()
assert isinstance(num_categories, list)
for num_categories_per_col in num_categories:
embedding_dim_per_col = int(
size_factor * max(2, min(
max_embedding_dim,
1.6 * num_categories_per_col ** embed_exponent
))
)
self.column_embeddings.append(
nn.Embedding(
num_embeddings=num_categories_per_col,
embedding_dim=embedding_dim_per_col,
)
)
self.column_mlps.append(
MLP(
in_features=embedding_dim_per_col,
hidden_features=out_features,
out_features=out_features,
num_layers=num_layers,
activation=activation,
dropout_prob=dropout_prob,
normalization=normalization,
)
)
self.aggregator_mlp = MLP(
in_features=out_features * len(num_categories),
hidden_features=out_features * len(num_categories),
out_features=out_features,
num_layers=num_layers,
activation=activation,
dropout_prob=dropout_prob,
normalization=normalization,
)
self.head = nn.Linear(out_features, num_classes) if num_classes > 0 else nn.Identity()
# init weights
self.apply(init_weights)
self.prefix = prefix
self.name_to_id = self.get_layer_ids()
self.head_layer_names = [n for n, layer_id in self.name_to_id.items() if layer_id == 0]
@property
def categorical_key(self):
return f"{self.prefix}_{CATEGORICAL}"
@property
def label_key(self):
return f"{self.prefix}_{LABEL}"
def forward(
self,
batch: dict,
):
"""
Parameters
----------
batch
A dictionary containing the input mini-batch data.
We need to use the keys with the model prefix to index required data.
Returns
-------
A dictionary with logits and features.
"""
assert len(batch[self.categorical_key]) == len(self.column_embeddings)
features = []
for categorical_id, embed, mlp in zip(batch[self.categorical_key], self.column_embeddings, self.column_mlps):
features.append(mlp(embed(categorical_id)))
cat_features = torch.cat(features, dim=1)
features = self.aggregator_mlp(cat_features)
logits = self.head(features)
return {
self.prefix: {
LOGITS: logits,
FEATURES: features,
}
}
def get_layer_ids(self,):
"""
All layers have the same id 0 since there is no pre-trained models used here.
Returns
-------
A dictionary mapping the layer names (keys) to their ids (values).
"""
name_to_id = {}
for n, _ in self.named_parameters():
name_to_id[n] = 0
return name_to_id
|
the-stack_106_20238
|
#
# Copyright 2017 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import requests
import json
import sys
import socket
import warnings
hostname = socket.gethostname()
class BaseClient(object):
printUrl = False
headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
def __init__(self, baseURL, baseResource):
self.baseURL = baseURL
self.baseResource = baseResource
def get(self, resPath, queryParams=None):
theUrl = "{}/{}".format(self.baseURL, resPath)
resp = requests.get(theUrl, params=queryParams)
self.__checkForSuccess(resp)
if(resp.content == b''):
return None
else:
return resp.json()
def post(self, resPath, queryParams, body, headers=None):
theUrl = "{}/{}".format(self.baseURL, resPath)
theHeader = self.headers
if headers is not None:
theHeader = self.mergeTwoDicts(self.headers, headers)
if body is not None:
jsonBody = json.dumps(body, ensure_ascii=False)
resp = requests.post(theUrl, params=queryParams, data=jsonBody, headers=theHeader)
else:
resp = requests.post(theUrl, params=queryParams, headers=theHeader)
self.__checkForSuccess(resp)
return self.__return(resp, theHeader)
def put(self, resPath, queryParams=None, body=None, headers=None):
theUrl = "{}/{}".format(self.baseURL, resPath)
theHeader = self.headers
if headers is not None:
theHeader = self.mergeTwoDicts(self.headers, headers)
if body is not None:
jsonBody = json.dumps(body, ensure_ascii=False)
resp = requests.put(theUrl, params=queryParams, data=jsonBody, headers=theHeader)
else:
resp = requests.put(theUrl, params=queryParams, headers=theHeader)
self.__print(resp)
self.__checkForSuccess(resp)
def delete(self, resPath, queryParams):
theUrl = "{}/{}".format(self.baseURL, resPath)
resp = requests.delete(theUrl, params=queryParams)
self.__print(resp)
self.__checkForSuccess(resp)
def makeUrl(self, urlformat=None, *argv):
url = self.baseResource + '/'
if urlformat:
url += urlformat.format(*argv)
return url
def makeParams(self, **kwargs):
return dict((k, v) for k, v in kwargs.items() if v is not None) or None
def mergeTwoDicts(self, x, y):
z = x.copy()
z.update(y)
return z
def __print(self, resp):
if self.printUrl:
print(resp.url)
def __return(self, resp, header):
retval = ''
if len(resp.text) > 0:
if header['Accept'] == 'text/plain':
retval = resp.text
elif header['Accept'] == 'application/json':
retval = resp.json()
else:
retval = resp.text
return retval
def __checkForSuccess(self, resp):
try:
resp.raise_for_status()
except requests.HTTPError:
print("ERROR: " + resp.text)
raise
class MetadataClient(BaseClient):
BASE_RESOURCE = 'metadata'
def __init__(self, baseURL):
BaseClient.__init__(self, baseURL, self.BASE_RESOURCE)
def getWorkflowDef(self, wfname, version=None):
url = self.makeUrl('workflow/{}', wfname)
return self.get(url, self.makeParams(version=version))
def createWorkflowDef(self, wfdObj):
url = self.makeUrl('workflow')
return self.post(url, None, wfdObj)
def updateWorkflowDefs(self, listOfWfdObj):
url = self.makeUrl('workflow')
self.put(url, None, listOfWfdObj)
def getAllWorkflowDefs(self):
url = self.makeUrl('workflow')
return self.get(url)
def unRegisterWorkflowDef(self, wfname, version):
url = self.makeUrl("workflow/{name}/{version}".format(name=wfname, version=version))
self.delete(url, None)
def getTaskDef(self, tdName):
url = self.makeUrl('taskdefs/{}', tdName)
return self.get(url)
def registerTaskDefs(self, listOfTaskDefObj):
url = self.makeUrl('taskdefs')
return self.post(url, None, listOfTaskDefObj)
def registerTaskDef(self, taskDefObj):
"""registerTaskDef is deprecated since PUT /metadata/taskdefs does not
register but updates a task definition. Use updateTaskDef function
instead.
"""
warnings.warn(self.registerTaskDef.__doc__, DeprecationWarning)
url = self.makeUrl('taskdefs')
self.put(url, None, taskDefObj)
def updateTaskDef(self, taskDefObj):
url = self.makeUrl('taskdefs')
self.put(url, None, taskDefObj)
def unRegisterTaskDef(self, tdName, reason=None):
url = self.makeUrl('taskdefs/{}', tdName)
self.delete(url, self.makeParams(reason=reason))
def getAllTaskDefs(self):
url = self.makeUrl('taskdefs')
return self.get(url)
class TaskClient(BaseClient):
BASE_RESOURCE = 'tasks'
def __init__(self, baseURL):
BaseClient.__init__(self, baseURL, self.BASE_RESOURCE)
def getTask(self, taskId):
url = self.makeUrl('{}', taskId)
return self.get(url)
def updateTask(self, taskObj):
url = self.makeUrl('')
headers = {'Accept': 'text/plain'}
self.post(url, None, taskObj, headers)
def pollForTask(self, taskType, workerid, domain=None):
url = self.makeUrl('poll/{}', taskType)
params = {}
params['workerid'] = workerid
if domain is not None:
params['domain'] = domain
try:
return self.get(url, params)
except Exception as err:
print('Error while polling ' + str(err))
return None
def pollForBatch(self, taskType, count, timeout, workerid, domain=None):
url = self.makeUrl('poll/batch/{}', taskType)
params = {}
params['workerid'] = workerid
params['count'] = count
params['timeout'] = timeout
if domain is not None:
params['domain'] = domain
try:
return self.get(url, params)
except Exception as err:
print('Error while polling ' + str(err))
return None
def ackTask(self, taskId, workerid):
url = self.makeUrl('{}/ack', taskId)
params = {}
params['workerid'] = workerid
headers = {'Accept': 'application/json'}
value = self.post(url, params, None, headers)
return value in ['true', True]
def getTasksInQueue(self, taskName):
url = self.makeUrl('queue/{}', taskName)
return self.get(url)
def removeTaskFromQueue(self, taskId, reason=None):
url = self.makeUrl('queue/{}', taskId)
params = {}
params['reason'] = reason
self.delete(url, params)
def getTaskQueueSizes(self, listOfTaskName):
url = self.makeUrl('queue/sizes')
return self.post(url, None, listOfTaskName)
class WorkflowClient(BaseClient):
BASE_RESOURCE = 'workflow'
def __init__(self, baseURL):
BaseClient.__init__(self, baseURL, self.BASE_RESOURCE)
def getWorkflow(self, wfId, includeTasks=True):
url = self.makeUrl('{}', wfId)
params = {}
params['includeTasks'] = includeTasks
return self.get(url, params)
def getRunningWorkflows(self, wfName, version=None, startTime=None, endTime=None):
url = self.makeUrl('running/{}', wfName)
params = {}
params['version'] = version
params['startTime'] = startTime
params['endTime'] = endTime
return self.get(url, params)
def startWorkflow(self, wfName, inputjson, version=None, correlationId=None):
url = self.makeUrl('{}', wfName)
params = {}
params['version'] = version
params['correlationId'] = correlationId
headers = {'Accept': 'text/plain'}
return self.post(url, params, inputjson, headers)
def terminateWorkflow(self, wfId, reason=None):
url = self.makeUrl('{}', wfId)
params = {}
params['reason'] = reason
self.delete(url, params)
def removeWorkflow(self, wfId, archiveWorkflow, reason=None):
url = self.makeUrl('{}/remove', wfId)
self.delete(url, self.makeParams(archiveWorkflow=archiveWorkflow, reason=reason))
def pauseWorkflow(self, wfId):
url = self.makeUrl('{}/pause', wfId)
self.put(url)
def resumeWorkflow(self, wfId):
url = self.makeUrl('{}/resume', wfId)
self.put(url)
def skipTaskFromWorkflow(self, wfId, taskRefName, skipTaskRequest):
url = self.makeUrl('{}/skiptask/{}', wfId, taskRefName)
self.post(url, None, skipTaskRequest)
def rerunWorkflow(self, wfId, taskRefName, rerunWorkflowRequest):
url = self.makeUrl('{}/rerun', wfId)
return self.post(url, None, rerunWorkflowRequest)
def restartWorkflow(self, wfId, taskRefName, fromTaskRef):
url = self.makeUrl('{}/restart', wfId)
params = {}
params['from'] = fromTaskRef
self.post(url, params, None)
class EventServicesClient(BaseClient):
BASE_RESOURCE = 'event'
def __init__(self, baseURL):
BaseClient.__init__(self, baseURL, self.BASE_RESOURCE)
def getEventHandlerDef(self, event, activeOnly=True):
url = self.makeUrl('{}', event)
params = {}
params['activeOnly'] = activeOnly
return self.get(url, params)
def getEventHandlerDefs(self):
url = self.makeUrl()
return self.get(url)
def createEventHandlerDef(self, ehObj):
url = self.makeUrl()
return self.post(url, None, ehObj)
def updateEventHandlerDef(self, ehObj):
url = self.makeUrl()
return self.put(url, None, ehObj)
def removeEventHandler(self, ehName):
url = self.makeUrl('{}', ehName)
self.delete(url, {})
def getEventHandlerQueues(self):
url = self.makeUrl('queues')
return self.get(url)
def getEventHandlerQueuesProviders(self):
url = self.makeUrl('queues/providers')
return self.get(url)
class WFClientMgr:
def __init__(self, server_url='http://localhost:8080/api/'):
self.workflowClient = WorkflowClient(server_url)
self.taskClient = TaskClient(server_url)
self.metadataClient = MetadataClient(server_url)
def main():
if len(sys.argv) < 3:
print("Usage - python conductor server_url command parameters...")
return None
server_url = sys.argv[1]
command = sys.argv[2]
wfcMgr = WFClientMgr(server_url)
wfc = wfcMgr.workflowClient
if command == 'start':
if len(sys.argv) < 7:
print('python conductor server_url start workflow_name input_json [version] [correlationId]')
return None
wfName = sys.argv[3]
input = json.loads(sys.argv[5])
correlationId = sys.argv[6]
workflowId = wfc.startWorkflow(wfName, input, 1, correlationId)
print(workflowId)
return workflowId
elif command == 'get':
if len(sys.argv) < 4:
print('python conductor server_url get workflow_id')
return None
wfId = sys.argv[3]
wfjson = wfc.getWorkflow(wfId)
print(json.dumps(wfjson, indent=True, separators=(',', ': ')))
return wfjson
elif command == 'terminate':
if len(sys.argv) < 4:
print('python conductor server_url terminate workflow_id')
return None
wfId = sys.argv[3]
wfc.terminateWorkflow(wfId)
print('OK')
return wfId
if __name__ == '__main__':
main()
|
the-stack_106_20240
|
"""
matrix multiplication is a binary operation that produces a product matrix
from two matrices . To multiply two matrices, the number of columns of first
matrix should be equal to the number of rows to second matrix.
This program finds the product of two given matrices
"""
Row_1 = int(input("Enter the number of rows for first matrix : "))
Col_1 = int(input("Enter the number of columns for first matrix : "))
matrix_1 = []
print("Enter the entries rowwise:")
for i in range(Row_1):
matrix_1.append([int(x) for x in input().split(" ")])
Row_2 = int(input("Enter the number of rows for second matrix : "))
Col_2 = int(input("Enter the number of columns for second matrix : "))
matrix_2 = []
print("Enter the entries rowwise:")
for i in range(Row_2):
matrix_2.append([int(x) for x in input().split(" ")])
result = [[0 for i in range(Col_2)] for j in range(Row_1)]
# Multiplying both matrices and storing in result
for i in range(Row_1):
for j in range(Col_2):
for k in range(Col_1):
result[i][j] += matrix_1[i][k] * matrix_2[k][j]
print("The result of the matrix multiplication is")
for i in range(Row_1):
for j in range(Col_2):
print(result[i][j], end=" ")
print()
"""
Sample I/O :
Enter the number of rows for first matrix : 3
Enter the number of columns for first matrix : 3
Enter the entries rowwise:
1 2 3
4 5 6
7 8 9
Enter the number of rows for second matrix : 3
Enter the number of columns for second matrix : 4
Enter the entries rowwise:
1 2 3 4
5 6 7 8
9 10 11 12
The result of the matrix multiplication is
38 44 50 56
83 98 113 128
128 152 176 200
Time complexity : O(n^3)
Space complexity : O(n^2)
"""
|
the-stack_106_20242
|
"""Primitive dict ops."""
from mypyc.ir.ops import ERR_FALSE, ERR_MAGIC, ERR_NEVER, ERR_NEG_INT
from mypyc.ir.rtypes import (
dict_rprimitive, object_rprimitive, bool_rprimitive, int_rprimitive,
list_rprimitive, dict_next_rtuple_single, dict_next_rtuple_pair, c_pyssize_t_rprimitive,
c_int_rprimitive
)
from mypyc.primitives.registry import (
name_ref_op, method_op,
simple_emit, name_emit, c_custom_op, c_method_op, c_function_op, c_binary_op
)
# Get the 'dict' type object.
name_ref_op('builtins.dict',
result_type=object_rprimitive,
error_kind=ERR_NEVER,
emit=name_emit('&PyDict_Type', target_type="PyObject *"),
is_borrowed=True)
# dict[key]
dict_get_item_op = c_method_op(
name='__getitem__',
arg_types=[dict_rprimitive, object_rprimitive],
return_type=object_rprimitive,
c_function_name='CPyDict_GetItem',
error_kind=ERR_MAGIC)
# dict[key] = value
dict_set_item_op = c_method_op(
name='__setitem__',
arg_types=[dict_rprimitive, object_rprimitive, object_rprimitive],
return_type=c_int_rprimitive,
c_function_name='CPyDict_SetItem',
error_kind=ERR_NEG_INT)
# key in dict
c_binary_op(
name='in',
arg_types=[object_rprimitive, dict_rprimitive],
return_type=c_int_rprimitive,
c_function_name='PyDict_Contains',
error_kind=ERR_NEG_INT,
truncated_type=bool_rprimitive,
ordering=[1, 0])
# dict1.update(dict2)
dict_update_op = c_method_op(
name='update',
arg_types=[dict_rprimitive, dict_rprimitive],
return_type=c_int_rprimitive,
c_function_name='CPyDict_Update',
error_kind=ERR_NEG_INT,
priority=2)
# Operation used for **value in dict displays.
# This is mostly like dict.update(obj), but has customized error handling.
dict_update_in_display_op = c_custom_op(
arg_types=[dict_rprimitive, dict_rprimitive],
return_type=c_int_rprimitive,
c_function_name='CPyDict_UpdateInDisplay',
error_kind=ERR_NEG_INT)
# dict.update(obj)
c_method_op(
name='update',
arg_types=[dict_rprimitive, object_rprimitive],
return_type=c_int_rprimitive,
c_function_name='CPyDict_UpdateFromAny',
error_kind=ERR_NEG_INT)
# dict.get(key, default)
c_method_op(
name='get',
arg_types=[dict_rprimitive, object_rprimitive, object_rprimitive],
return_type=object_rprimitive,
c_function_name='CPyDict_Get',
error_kind=ERR_MAGIC)
# dict.get(key)
method_op(
name='get',
arg_types=[dict_rprimitive, object_rprimitive],
result_type=object_rprimitive,
error_kind=ERR_MAGIC,
emit=simple_emit('{dest} = CPyDict_Get({args[0]}, {args[1]}, Py_None);'))
# Construct an empty dictionary.
dict_new_op = c_custom_op(
arg_types=[],
return_type=dict_rprimitive,
c_function_name='PyDict_New',
error_kind=ERR_MAGIC)
# Construct a dictionary from keys and values.
# Positional argument is the number of key-value pairs
# Variable arguments are (key1, value1, ..., keyN, valueN).
dict_build_op = c_custom_op(
arg_types=[c_pyssize_t_rprimitive],
return_type=dict_rprimitive,
c_function_name='CPyDict_Build',
error_kind=ERR_MAGIC,
var_arg_type=object_rprimitive)
# Construct a dictionary from another dictionary.
c_function_op(
name='builtins.dict',
arg_types=[dict_rprimitive],
return_type=dict_rprimitive,
c_function_name='PyDict_Copy',
error_kind=ERR_MAGIC,
priority=2)
# Generic one-argument dict constructor: dict(obj)
c_function_op(
name='builtins.dict',
arg_types=[object_rprimitive],
return_type=dict_rprimitive,
c_function_name='CPyDict_FromAny',
error_kind=ERR_MAGIC)
# dict.keys()
c_method_op(
name='keys',
arg_types=[dict_rprimitive],
return_type=object_rprimitive,
c_function_name='CPyDict_KeysView',
error_kind=ERR_MAGIC)
# dict.values()
c_method_op(
name='values',
arg_types=[dict_rprimitive],
return_type=object_rprimitive,
c_function_name='CPyDict_ValuesView',
error_kind=ERR_MAGIC)
# dict.items()
c_method_op(
name='items',
arg_types=[dict_rprimitive],
return_type=object_rprimitive,
c_function_name='CPyDict_ItemsView',
error_kind=ERR_MAGIC)
# list(dict.keys())
dict_keys_op = c_custom_op(
arg_types=[dict_rprimitive],
return_type=list_rprimitive,
c_function_name='CPyDict_Keys',
error_kind=ERR_MAGIC)
# list(dict.values())
dict_values_op = c_custom_op(
arg_types=[dict_rprimitive],
return_type=list_rprimitive,
c_function_name='CPyDict_Values',
error_kind=ERR_MAGIC)
# list(dict.items())
dict_items_op = c_custom_op(
arg_types=[dict_rprimitive],
return_type=list_rprimitive,
c_function_name='CPyDict_Items',
error_kind=ERR_MAGIC)
# PyDict_Next() fast iteration
dict_key_iter_op = c_custom_op(
arg_types=[dict_rprimitive],
return_type=object_rprimitive,
c_function_name='CPyDict_GetKeysIter',
error_kind=ERR_MAGIC)
dict_value_iter_op = c_custom_op(
arg_types=[dict_rprimitive],
return_type=object_rprimitive,
c_function_name='CPyDict_GetValuesIter',
error_kind=ERR_MAGIC)
dict_item_iter_op = c_custom_op(
arg_types=[dict_rprimitive],
return_type=object_rprimitive,
c_function_name='CPyDict_GetItemsIter',
error_kind=ERR_MAGIC)
dict_next_key_op = c_custom_op(
arg_types=[object_rprimitive, int_rprimitive],
return_type=dict_next_rtuple_single,
c_function_name='CPyDict_NextKey',
error_kind=ERR_NEVER)
dict_next_value_op = c_custom_op(
arg_types=[object_rprimitive, int_rprimitive],
return_type=dict_next_rtuple_single,
c_function_name='CPyDict_NextValue',
error_kind=ERR_NEVER)
dict_next_item_op = c_custom_op(
arg_types=[object_rprimitive, int_rprimitive],
return_type=dict_next_rtuple_pair,
c_function_name='CPyDict_NextItem',
error_kind=ERR_NEVER)
# check that len(dict) == const during iteration
dict_check_size_op = c_custom_op(
arg_types=[dict_rprimitive, int_rprimitive],
return_type=bool_rprimitive,
c_function_name='CPyDict_CheckSize',
error_kind=ERR_FALSE)
dict_size_op = c_custom_op(
arg_types=[dict_rprimitive],
return_type=c_pyssize_t_rprimitive,
c_function_name='PyDict_Size',
error_kind=ERR_NEVER)
|
the-stack_106_20244
|
# -*- coding: utf-8 -*-
'''
データの読み込みと確認
'''
# ライブラリのインポート
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# ランダムシードの設定
import random
np.random.seed(1234)
random.seed(1234)
# データの読み込み
train = pd.read_csv('./data/train.tsv', sep='\t')
test = pd.read_csv('./data/test.tsv', sep='\t')
submission = pd.read_csv('./data/sample_submit.csv')
# データの確認
print(train.head())
print(train.dtypes)
'''
特徴量エンジニアリング
'''
# ライブラリのインポート
from sklearn.preprocessing import LabelEncoder
# object型の変数の取得
categories = train.columns[train.dtypes == 'object']
print(categories)
# 'class'のダミー変数化
le = LabelEncoder()
le = le.fit(train['class'])
train['class'] = le.transform(train['class'])
'''
モデルの構築と評価
'''
# ライブラリのインポート
import lightgbm as lgb
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from statistics import mean
# 5分割する
folds = 5
kf = KFold(n_splits=folds)
# ハイパーパラメータの設定
params = {
# 多値分類問題
'objective': 'multiclass',
# クラス数は 3
'num_class': 3
}
# 説明変数と目的変数を指定
X_train = train.drop(['class', 'id'], axis=1)
Y_train = train['class']
# 各foldごとに作成したモデルごとの予測値を保存
models = []
scores = []
oof = np.zeros(len(X_train))
for train_index, val_index in kf.split(X_train):
x_train = X_train.iloc[train_index]
x_valid = X_train.iloc[val_index]
y_train = Y_train.iloc[train_index]
y_valid = Y_train.iloc[val_index]
lgb_train = lgb.Dataset(x_train, y_train)
lgb_eval = lgb.Dataset(x_valid, y_valid, reference=lgb_train)
model = lgb.train(params,
lgb_train,
valid_sets=lgb_eval,
num_boost_round=100, # 学習回数の実行回数
early_stopping_rounds=20, # early_stoppingの判定基準
verbose_eval=10)
y_pred = model.predict(x_valid, num_iteration=model.best_iteration)
y_pred_max = np.argmax(y_pred, axis=1) # 最尤と判断したクラスの値にする
score = accuracy_score(y_valid, y_pred_max)
print(score)
models.append(model)
scores.append(score)
oof[val_index] = y_pred_max
# 混同行列の作成
cm = confusion_matrix(y_valid, y_pred_max)
# heatmapによる混同行列の可視化
sns.heatmap(cm, annot=True, cmap='Blues')
plt.show()
# 平均accuracy scoreを計算する
print(mean(scores))
"""
予測精度:
0.9466666666666667
"""
|
the-stack_106_20246
|
import pickle
import matplotlib.pyplot as plt
from binomial_model import *
from scipy.special import loggamma
from invasion_threshold import *
def poisson(xvec, xmean):
return np.exp(xvec*np.log(xmean)-xmean-loggamma(xvec+1))
#parameter
mu = 0.05
f = lambda m: 1
K = 1
tmin = 1
T = np.inf
mmax = 40
kmax = 20
mmean = 10
kmean = 5
mvec = np.arange(mmax+1)
kvec = np.arange(kmax+1)
pm = poisson(mvec,mmean)
pk = poisson(kvec,kmean)
alpha_list = [0.5,1.,1.5]
integrand = exponential_integrand
beta_list = np.linspace(0.15,0.00001,500)
result = dict()
for alpha in alpha_list:
result[alpha] = dict()
result[alpha]['beta'] = []
result[alpha]['I'] = []
beta_c = invasion_threshold(pm, pk, mu, f,
K=K,alpha=alpha,tmin=tmin,T=T,
integrand=integrand)
result[alpha]['beta_c'] = beta_c
print(alpha, beta_c)
for alpha in alpha_list:
Ik = 0.9*np.ones(kvec.shape)
print(f"----------------")
print(f"alpha {alpha}")
print(f"----------------")
for beta in beta_list:
thetami = get_thetami_mat(mmax,beta,K=K,alpha=alpha,tmin=tmin,T=T)
Ilast = None
Inow = np.sum(Ik*pk)
while Ilast is None or (np.abs(Inow - Ilast)/Inow > 10**(-8)
and Inow > 10**(-4)):
Ik = evolution(Ik, pk, kvec, pm, mvec, thetami, mu)
Ilast = Inow
Inow = np.sum(Ik*pk)
print(f"beta {beta}, I : {Inow}")
if Inow <= 10**(-4):
break
result[alpha]['I'].append(Inow)
result[alpha]['beta'].append(beta)
plt.plot(result[alpha]['beta'],result[alpha]['I'],label=f'alpha = {alpha}')
plt.legend()
plt.show()
with open('./dat/figure2_bifurcation.pk','wb') as filename:
pickle.dump(result,filename)
|
the-stack_106_20249
|
'''
Tree View
=========
.. image:: images/treeview.png
:align: right
.. versionadded:: 1.0.4
:class:`TreeView` is a widget used to represent a tree structure. It is
currently very basic, supporting a minimal feature set.
Introduction
------------
A :class:`TreeView` is populated with :class:`TreeViewNode` instances, but you
cannot use a :class:`TreeViewNode` directly. You must combine it with another
widget, such as :class:`~kivy.uix.label.Label`,
:class:`~kivy.uix.button.Button` or even your own widget. The TreeView
always creates a default root node, based on :class:`TreeViewLabel`.
:class:`TreeViewNode` is a class object containing needed properties for
serving as a tree node. Extend :class:`TreeViewNode` to create custom node
types for use with a :class:`TreeView`.
For constructing your own subclass, follow the pattern of TreeViewLabel which
combines a Label and a TreeViewNode, producing a :class:`TreeViewLabel` for
direct use in a TreeView instance.
To use the TreeViewLabel class, you could create two nodes directly attached
to root::
tv = TreeView()
tv.add_node(TreeViewLabel(text='My first item'))
tv.add_node(TreeViewLabel(text='My second item'))
Or, create two nodes attached to a first::
tv = TreeView()
n1 = tv.add_node(TreeViewLabel(text='Item 1'))
tv.add_node(TreeViewLabel(text='SubItem 1'), n1)
tv.add_node(TreeViewLabel(text='SubItem 2'), n1)
If you have a large tree structure, perhaps you would need a utility function
to populate the tree view::
def populate_tree_view(tree_view, parent, node):
if parent is None:
tree_node = tree_view.add_node(TreeViewLabel(text=node['node_id'],
is_open=True))
else:
tree_node = tree_view.add_node(TreeViewLabel(text=node['node_id'],
is_open=True), parent)
for child_node in node['children']:
populate_tree_view(tree_view, tree_node, child_node)
tree = {'node_id': '1',
'children': [{'node_id': '1.1',
'children': [{'node_id': '1.1.1',
'children': [{'node_id': '1.1.1.1',
'children': []}]},
{'node_id': '1.1.2',
'children': []},
{'node_id': '1.1.3',
'children': []}]},
{'node_id': '1.2',
'children': []}]}
class TreeWidget(FloatLayout):
def __init__(self, **kwargs):
super(TreeWidget, self).__init__(**kwargs)
tv = TreeView(root_options=dict(text='Tree One'),
hide_root=False,
indent_level=4)
populate_tree_view(tv, None, tree)
self.add_widget(tv)
The root widget in the tree view is opened by default and has text set as
'Root'. If you want to change that, you can use the
:attr:`TreeView.root_options`
property. This will pass options to the root widget::
tv = TreeView(root_options=dict(text='My root label'))
Creating Your Own Node Widget
-----------------------------
For a button node type, combine a :class:`~kivy.uix.button.Button` and a
:class:`TreeViewNode` as follows::
class TreeViewButton(Button, TreeViewNode):
pass
You must know that, for a given node, only the
:attr:`~kivy.uix.widget.Widget.size_hint_x` will be honored. The allocated
width for the node will depend of the current width of the TreeView and the
level of the node. For example, if a node is at level 4, the width
allocated will be:
treeview.width - treeview.indent_start - treeview.indent_level * node.level
You might have some trouble with that. It is the developer's responsibility to
correctly handle adapting the graphical representation nodes, if needed.
'''
from kivy.clock import Clock
from kivy.uix.label import Label
from kivy.uix.widget import Widget
from kivy.properties import BooleanProperty, ListProperty, ObjectProperty, \
AliasProperty, NumericProperty, ReferenceListProperty
class TreeViewException(Exception):
'''Exception for errors in the :class:`TreeView`.
'''
pass
class TreeViewNode(object):
'''TreeViewNode class, used to build a node class for a TreeView object.
'''
def __init__(self, **kwargs):
if self.__class__ is TreeViewNode:
raise TreeViewException('You cannot use directly TreeViewNode.')
super(TreeViewNode, self).__init__(**kwargs)
is_leaf = BooleanProperty(True)
'''Boolean to indicate whether this node is a leaf or not. Used to adjust
the graphical representation.
:attr:`is_leaf` is a :class:`~kivy.properties.BooleanProperty` and defaults
to True. It is automatically set to False when child is added.
'''
is_open = BooleanProperty(False)
'''Boolean to indicate whether this node is opened or not, in case there
are child nodes. This is used to adjust the graphical representation.
.. warning::
This property is automatically set by the :class:`TreeView`. You can
read but not write it.
:attr:`is_open` is a :class:`~kivy.properties.BooleanProperty` and defaults
to False.
'''
is_loaded = BooleanProperty(False)
'''Boolean to indicate whether this node is already loaded or not. This
property is used only if the :class:`TreeView` uses asynchronous loading.
:attr:`is_loaded` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
is_selected = BooleanProperty(False)
'''Boolean to indicate whether this node is selected or not. This is used
adjust the graphical representation.
.. warning::
This property is automatically set by the :class:`TreeView`. You can
read but not write it.
:attr:`is_selected` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
no_selection = BooleanProperty(False)
'''Boolean used to indicate whether selection of the node is allowed or
not.
:attr:`no_selection` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
nodes = ListProperty([])
'''List of nodes. The nodes list is different than the children list. A
node in the nodes list represents a node on the tree. An item in the
children list represents the widget associated with the node.
.. warning::
This property is automatically set by the :class:`TreeView`. You can
read but not write it.
:attr:`nodes` is a :class:`~kivy.properties.ListProperty` and defaults to
[].
'''
parent_node = ObjectProperty(None, allownone=True)
'''Parent node. This attribute is needed because the :attr:`parent` can be
None when the node is not displayed.
.. versionadded:: 1.0.7
:attr:`parent_node` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
level = NumericProperty(-1)
'''Level of the node.
:attr:`level` is a :class:`~kivy.properties.NumericProperty` and defaults
to -1.
'''
color_selected = ListProperty([.3, .3, .3, 1.])
'''Background color of the node when the node is selected.
:attr:`color_selected` is a :class:`~kivy.properties.ListProperty` and
defaults to [.1, .1, .1, 1].
'''
odd = BooleanProperty(False)
'''
This property is set by the TreeView widget automatically and is read-only.
:attr:`odd` is a :class:`~kivy.properties.BooleanProperty` and defaults to
False.
'''
odd_color = ListProperty([1., 1., 1., .0])
'''Background color of odd nodes when the node is not selected.
:attr:`odd_color` is a :class:`~kivy.properties.ListProperty` and defaults
to [1., 1., 1., 0.].
'''
even_color = ListProperty([0.5, 0.5, 0.5, 0.1])
'''Background color of even nodes when the node is not selected.
:attr:`bg_color` is a :class:`~kivy.properties.ListProperty` ans defaults
to [.5, .5, .5, .1].
'''
class TreeViewLabel(Label, TreeViewNode):
'''Combines a :class:`~kivy.uix.label.Label` and a :class:`TreeViewNode` to
create a :class:`TreeViewLabel` that can be used as a text node in the
tree.
See module documentation for more information.
'''
class TreeView(Widget):
'''TreeView class. See module documentation for more information.
:Events:
`on_node_expand`: (node, )
Fired when a node is being expanded
`on_node_collapse`: (node, )
Fired when a node is being collapsed
'''
__events__ = ('on_node_expand', 'on_node_collapse')
def __init__(self, **kwargs):
self._trigger_layout = Clock.create_trigger(self._do_layout, -1)
super(TreeView, self).__init__(**kwargs)
tvlabel = TreeViewLabel(text='Root', is_open=True, level=0)
for key, value in self.root_options.items():
setattr(tvlabel, key, value)
self._root = self.add_node(tvlabel, None)
trigger = self._trigger_layout
fbind = self.fbind
fbind('pos', trigger)
fbind('size', trigger)
fbind('indent_level', trigger)
fbind('indent_start', trigger)
trigger()
def add_node(self, node, parent=None):
'''Add a new node to the tree.
:Parameters:
`node`: instance of a :class:`TreeViewNode`
Node to add into the tree
`parent`: instance of a :class:`TreeViewNode`, defaults to None
Parent node to attach the new node. If `None`, it is added to
the :attr:`root` node.
:returns:
the node `node`.
'''
# check if the widget is "ok" for a node
if not isinstance(node, TreeViewNode):
raise TreeViewException(
'The node must be a subclass of TreeViewNode')
# create node
if parent is None and self._root:
parent = self._root
if parent:
parent.is_leaf = False
parent.nodes.append(node)
node.parent_node = parent
node.level = parent.level + 1
node.fbind('size', self._trigger_layout)
self._trigger_layout()
return node
def remove_node(self, node):
'''Removes a node from the tree.
.. versionadded:: 1.0.7
:Parameters:
`node`: instance of a :class:`TreeViewNode`
Node to remove from the tree. If `node` is :attr:`root`, it is
not removed.
'''
# check if the widget is "ok" for a node
if not isinstance(node, TreeViewNode):
raise TreeViewException(
'The node must be a subclass of TreeViewNode')
parent = node.parent_node
if parent is not None:
if node == self._selected_node:
node.is_selected = False
self._selected_node = None
nodes = parent.nodes
if node in nodes:
nodes.remove(node)
parent.is_leaf = not bool(len(nodes))
node.parent_node = None
node.funbind('size', self._trigger_layout)
self._trigger_layout()
def on_node_expand(self, node):
pass
def on_node_collapse(self, node):
pass
def select_node(self, node):
'''Select a node in the tree.
'''
if node.no_selection:
return
if self._selected_node:
self._selected_node.is_selected = False
node.is_selected = True
self._selected_node = node
def toggle_node(self, node):
'''Toggle the state of the node (open/collapsed).
'''
node.is_open = not node.is_open
if node.is_open:
if self.load_func and not node.is_loaded:
self._do_node_load(node)
self.dispatch('on_node_expand', node)
else:
self.dispatch('on_node_collapse', node)
self._trigger_layout()
def get_node_at_pos(self, pos):
'''Get the node at the position (x, y).
'''
x, y = pos
for node in self.iterate_open_nodes(self.root):
if self.x <= x <= self.right and \
node.y <= y <= node.top:
return node
def iterate_open_nodes(self, node=None):
'''Generator to iterate over all the expended nodes starting from
`node` and down. If `node` is `None`, the generator start with
:attr:`root`.
To get all the open nodes::
treeview = TreeView()
# ... add nodes ...
for node in treeview.iterate_open_nodes():
print(node)
'''
if not node:
node = self.root
if self.hide_root and node is self.root:
pass
else:
yield node
if not node.is_open:
return
f = self.iterate_open_nodes
for cnode in node.nodes:
for ynode in f(cnode):
yield ynode
def iterate_all_nodes(self, node=None):
'''Generator to iterate over all nodes from `node` and down whether
expanded or not. If `node` is `None`, the generator start with
:attr:`root`.
'''
if not node:
node = self.root
yield node
f = self.iterate_all_nodes
for cnode in node.nodes:
for ynode in f(cnode):
yield ynode
#
# Private
#
def on_load_func(self, instance, value):
if value:
Clock.schedule_once(self._do_initial_load)
def _do_initial_load(self, *largs):
if not self.load_func:
return
self._do_node_load(None)
def _do_node_load(self, node):
gen = self.load_func(self, node)
if node:
node.is_loaded = True
if not gen:
return
for cnode in gen:
self.add_node(cnode, node)
def on_root_options(self, instance, value):
if not self.root:
return
for key, value in value.items():
setattr(self.root, key, value)
def _do_layout(self, *largs):
self.clear_widgets()
# display only the one who are is_open
self._do_open_node(self.root)
# now do layout
self._do_layout_node(self.root, 0, self.top)
# now iterate for calculating minimum size
min_width = min_height = 0
count = 0
for node in self.iterate_open_nodes(self.root):
node.odd = False if count % 2 else True
count += 1
min_width = max(min_width, node.right - self.x)
min_height += node.height
self.minimum_size = (min_width, min_height)
def _do_open_node(self, node):
if self.hide_root and node is self.root:
height = 0
else:
self.add_widget(node)
height = node.height
if not node.is_open:
return height
for cnode in node.nodes:
height += self._do_open_node(cnode)
return height
def _do_layout_node(self, node, level, y):
if self.hide_root and node is self.root:
level -= 1
else:
node.x = self.x + self.indent_start + level * self.indent_level
node.top = y
if node.size_hint_x:
node.width = (self.width - (node.x - self.x)) \
* node.size_hint_x
y -= node.height
if not node.is_open:
return y
for cnode in node.nodes:
y = self._do_layout_node(cnode, level + 1, y)
return y
def on_touch_down(self, touch):
node = self.get_node_at_pos(touch.pos)
if not node:
return
if node.disabled:
return
# toggle node or selection ?
if node.x - self.indent_start <= touch.x < node.x:
self.toggle_node(node)
elif node.x <= touch.x:
self.select_node(node)
node.dispatch('on_touch_down', touch)
return True
#
# Private properties
#
_root = ObjectProperty(None)
_selected_node = ObjectProperty(None, allownone=True)
#
# Properties
#
minimum_width = NumericProperty(0)
'''Minimum width needed to contain all children.
.. versionadded:: 1.0.9
:attr:`minimum_width` is a :class:`kivy.properties.NumericProperty` and
defaults to 0.
'''
minimum_height = NumericProperty(0)
'''Minimum height needed to contain all children.
.. versionadded:: 1.0.9
:attr:`minimum_height` is a :class:`kivy.properties.NumericProperty` and
defaults to 0.
'''
minimum_size = ReferenceListProperty(minimum_width, minimum_height)
'''Minimum size needed to contain all children.
.. versionadded:: 1.0.9
:attr:`minimum_size` is a :class:`~kivy.properties.ReferenceListProperty`
of (:attr:`minimum_width`, :attr:`minimum_height`) properties.
'''
indent_level = NumericProperty('16dp')
'''Width used for the indentation of each level except the first level.
Computation of indent for each level of the tree is::
indent = indent_start + level * indent_level
:attr:`indent_level` is a :class:`~kivy.properties.NumericProperty` and
defaults to 16.
'''
indent_start = NumericProperty('24dp')
'''Indentation width of the level 0 / root node. This is mostly the initial
size to accommodate a tree icon (collapsed / expanded). See
:attr:`indent_level` for more information about the computation of level
indentation.
:attr:`indent_start` is a :class:`~kivy.properties.NumericProperty` and
defaults to 24.
'''
hide_root = BooleanProperty(False)
'''Use this property to show/hide the initial root node. If True, the root
node will be appear as a closed node.
:attr:`hide_root` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
def get_selected_node(self):
return self._selected_node
selected_node = AliasProperty(get_selected_node, None,
bind=('_selected_node', ))
'''Node selected by :meth:`TreeView.select_node` or by touch.
:attr:`selected_node` is a :class:`~kivy.properties.AliasProperty` and
defaults to None. It is read-only.
'''
def get_root(self):
return self._root
root = AliasProperty(get_root, None, bind=('_root', ))
'''Root node.
By default, the root node widget is a :class:`TreeViewLabel` with text
'Root'. If you want to change the default options passed to the widget
creation, use the :attr:`root_options` property::
treeview = TreeView(root_options={
'text': 'Root directory',
'font_size': 15})
:attr:`root_options` will change the properties of the
:class:`TreeViewLabel` instance. However, you cannot change the class used
for root node yet.
:attr:`root` is an :class:`~kivy.properties.AliasProperty` and defaults to
None. It is read-only. However, the content of the widget can be changed.
'''
root_options = ObjectProperty({})
'''Default root options to pass for root widget. See :attr:`root` property
for more information about the usage of root_options.
:attr:`root_options` is an :class:`~kivy.properties.ObjectProperty` and
defaults to {}.
'''
load_func = ObjectProperty(None)
'''Callback to use for asynchronous loading. If set, asynchronous loading
will be automatically done. The callback must act as a Python generator
function, using yield to send data back to the treeview.
The callback should be in the format::
def callback(treeview, node):
for name in ('Item 1', 'Item 2'):
yield TreeViewLabel(text=name)
:attr:`load_func` is a :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
if __name__ == '__main__':
from kivy.app import App
class TestApp(App):
def build(self):
tv = TreeView(hide_root=True)
add = tv.add_node
root = add(TreeViewLabel(text='Level 1, entry 1', is_open=True))
for x in range(5):
add(TreeViewLabel(text='Element %d' % x), root)
root2 = add(TreeViewLabel(text='Level 1, entry 2', is_open=False))
for x in range(24):
add(TreeViewLabel(text='Element %d' % x), root2)
for x in range(5):
add(TreeViewLabel(text='Element %d' % x), root)
root2 = add(TreeViewLabel(text='Element childs 2', is_open=False),
root)
for x in range(24):
add(TreeViewLabel(text='Element %d' % x), root2)
return tv
TestApp().run()
|
the-stack_106_20250
|
# -*- coding: utf-8 -*-
from django.conf import settings
from sys import version_info
from yats.api import *
def update_permissions_after_migration(app,**kwargs):
"""
Update app permission just after every migration.
This is based on app django_extensions update_permissions management command.
"""
from django.db.models import get_app, get_models
from django.contrib.auth.management import create_permissions
create_permissions(get_app(app), get_models(), 2 if settings.DEBUG else 0)
version = '@version@'
if 'version' in version:
VERSION = ('a', 'b', 'c', '', 0)
else:
VERSION = version.split('.')
VERSION.append('')
VERSION.append(0)
def get_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
version = '%s.%s' % (version, VERSION[2])
if VERSION[3:] == ('alpha', 0):
version = '%s pre-alpha' % version
else:
if VERSION[3] != 'final':
version = '%s %s %s' % (version, VERSION[3], VERSION[4])
return version
def get_python_version():
version = '%s.%s' % (version_info[0], version_info[1])
if version_info[2]:
version = '%s.%s' % (version, version_info[2])
if version_info[3:] == ('alpha', 0):
version = '%s pre-alpha' % version
else:
if version_info[3] != 'final':
version = '%s %s %s' % (version, version_info[3], version_info[4])
return version
def access_to_settings(request):
return {'SETTINGS': settings}
|
the-stack_106_20251
|
"""
Demo of HMR.
Note that HMR requires the bounding box of the person in the image. The best performance is obtained when max length of the person in the image is roughly 150px.
When only the image path is supplied, it assumes that the image is centered on a person whose length is roughly 150px.
Alternatively, you can supply output of the openpose to figure out the bbox and the right scale factor.
Sample usage:
# On images on a tightly cropped image around the person
python -m demo --img_path data/im1963.jpg
python -m demo --img_path data/coco1.png
# On images, with openpose output
python -m demo --img_path data/random.jpg --json_path data/random_keypoints.json
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from absl import flags
import numpy as np
import skimage.io as io
import tensorflow as tf
from src.util import renderer as vis_util
from src.util import image as img_util
from src.util import openpose as op_util
import src.config
from src.RunModel import RunModel
flags.DEFINE_string('img_path', 'data/im1963.jpg', 'Image to run')
flags.DEFINE_string(
'json_path', None,
'If specified, uses the openpose output to crop the image.')
def visualize(img, proc_param, joints, verts, cam):
"""
Renders the result in original image coordinate frame.
"""
cam_for_render, vert_shifted, joints_orig = vis_util.get_original(
proc_param, verts, cam, joints, img_size=img.shape[:2])
# Render results
skel_img = vis_util.draw_skeleton(img, joints_orig)
rend_img_overlay = renderer(
vert_shifted, cam=cam_for_render, img=img, do_alpha=True)
rend_img = renderer(
vert_shifted, cam=cam_for_render, img_size=img.shape[:2])
rend_img_vp1 = renderer.rotated(
vert_shifted, 60, cam=cam_for_render, img_size=img.shape[:2])
rend_img_vp2 = renderer.rotated(
vert_shifted, -60, cam=cam_for_render, img_size=img.shape[:2])
import matplotlib.pyplot as plt
# plt.ion()
plt.figure(1)
plt.clf()
plt.subplot(231)
plt.imshow(img)
plt.title('input')
plt.axis('off')
plt.subplot(232)
plt.imshow(skel_img)
plt.title('joint projection')
plt.axis('off')
plt.subplot(233)
plt.imshow(rend_img_overlay)
plt.title('3D Mesh overlay')
plt.axis('off')
plt.subplot(234)
plt.imshow(rend_img)
plt.title('3D mesh')
plt.axis('off')
plt.subplot(235)
plt.imshow(rend_img_vp1)
plt.title('diff vp')
plt.axis('off')
plt.subplot(236)
plt.imshow(rend_img_vp2)
plt.title('diff vp')
plt.axis('off')
plt.draw()
plt.show()
def visualize_all(num_persons, img, proc_params, joints, verts, cam):
"""
Renders the result in original image coordinate frame.
"""
skel_img = np.copy(img)
rend_img_overlay = np.copy(img)
rend_img = np.zeros(shape=img.shape)
# rend_img_vp1 = np.zeros(shape=img.shape)
# rend_img_vp2 = np.zeros(shape=img.shape)
for idx in range(num_persons):
cam_for_render, vert_shifted, joints_orig = vis_util.get_original(
proc_params[idx], verts[idx], cam[idx], joints[idx], img_size=img.shape[:2])
# Render results
skel_img = vis_util.draw_skeleton(skel_img, joints_orig)
rend_img_overlay = renderer(
vert_shifted, cam=cam_for_render, img=rend_img_overlay, do_alpha=True, color_id=idx)
rend_img_overlay = rend_img_overlay[:, :, :3]
rend_img = renderer(
vert_shifted, cam=cam_for_render, img=rend_img, img_size=img.shape[:2], color_id=idx)
# rend_img_vp1 = renderer.rotated(
# vert_shifted, 60, cam=cam_for_render, img=rend_img_vp1, img_size=img.shape[:2])
# rend_img_vp2 = renderer.rotated(
# vert_shifted, -60, cam=cam_for_render, img=rend_img_vp2, img_size=img.shape[:2])
import matplotlib.pyplot as plt
# plt.ion()
plt.figure(1)
plt.clf()
plt.subplot(221)
plt.imshow(img)
plt.title('Image')
plt.axis('off')
plt.subplot(222)
plt.imshow(skel_img)
plt.title('Joints 2D Projection')
plt.axis('off')
plt.subplot(223)
plt.imshow(rend_img_overlay)
plt.title('3D Mesh Overlay')
plt.axis('off')
plt.subplot(224)
plt.imshow(rend_img)
plt.title('3D Mesh')
plt.axis('off')
# plt.subplot(235)
# plt.imshow(rend_img_vp1)
# plt.title('diff vp')
# plt.axis('off')
# plt.subplot(236)
# plt.imshow(rend_img_vp2)
# plt.title('diff vp')
# plt.axis('off')
plt.draw()
plt.show()
def preprocess_image(img_path, person_bbox=None):
img = io.imread(img_path)
if img.shape[2] == 4:
img = img[:, :, :3]
if person_bbox is None:
if np.max(img.shape[:2]) != config.img_size:
print('Resizing so the max image size is %d..' % config.img_size)
scale = (float(config.img_size) / np.max(img.shape[:2]))
else:
scale = 1.
center = np.round(np.array(img.shape[:2]) / 2).astype(int)
# image center in (x,y)
center = center[::-1]
else:
x1, y1, x2, y2 = person_bbox
center = np.array([(x1 + x2) // 2, (y1 + y2) // 2])
person_height = np.linalg.norm(y2 - y1)
scale = 150. / person_height
crop, proc_param = img_util.scale_and_crop(img, scale, center,
config.img_size)
# Normalize image to [-1, 1]
crop = 2 * ((crop / 255.) - 0.5)
return crop, proc_param, img
def main(img_path):
sess = tf.compat.v1.Session()
# meva_sample_1: person_bboxes = [[171, 102, 225, 244], [63, 71, 104, 199]]
# meva sample 2: person_bboxes = [[95, 132, 429, 551], [0, 2, 245, 485], [319, 43, 539, 427]]
# meva_sample 3: person_bboxes = [[155, 224, 381, 508], [19, 112, 238, 499], [305, 158, 508, 404]]
person_bboxes = [[319, 43, 539, 427], [0, 2, 245, 485], [95, 132, 429, 551]]
num_persons = len(person_bboxes)
# Demo only processes one image at a time
config.batch_size = num_persons
model = RunModel(config, sess=sess)
input_array = np.zeros(shape=[num_persons, config.img_size, config.img_size, 3])
proc_params = []
for person_idx, person_bbox in enumerate(person_bboxes):
input_img, proc_param, img = preprocess_image(img_path, person_bbox)
proc_params.append(proc_param)
# Add batch dimension: 1 x D x D x 3
input_array[person_idx] = input_img
#input_img = np.expand_dims(input_img, 0)
# Theta is the 85D vector holding [camera, pose, shape]
# where camera is 3D [s, tx, ty]
# pose is 72D vector holding the rotation of 24 joints of SMPL in axis angle format
# shape is 10D shape coefficients of SMPL
joints, verts, cams, joints3d, theta = model.predict(
input_array, get_theta=True)
visualize_all(num_persons, img, proc_params, joints, verts, cams)
if __name__ == '__main__':
config = flags.FLAGS
config(sys.argv)
# Using pre-trained model, change this to use your own.
config.load_path = src.config.PRETRAINED_MODEL
# Global renderer needs to be declared
renderer = vis_util.SMPLRenderer(face_path=config.smpl_face_path)
main(config.img_path)
|
the-stack_106_20252
|
import re
import dns.resolver
import tldextract
from app.plugin.data.dns_provider import DNS_PROVIDER
__plugin__ = "DNS Scanner"
SEQUENCE = 1
RESOLVER_NAMESERVERS = ["223.5.5.5", "1.1.1.1", "114.114.114.114"]
RESOLVER_TIMEOUT = 2
RESOLVER_LIFETIME = 8
def run(url):
scan_result = {"name": __plugin__, "sequence": SEQUENCE, "result": []}
error_result = {"name": __plugin__, "sequence": SEQUENCE, "result": []}
error_result["result"] = [
{"name": "Error", "result": [{"name": f"{__plugin__} can't scan this website"}]}
]
result_map = {
"providers": {"name": "DNS providers", "sequence": 0, "result": []},
"ns_server": {"name": "NS servers", "sequence": 1, "result": []},
}
providers = []
try:
registered_domain = tldextract.extract(url.netloc).registered_domain
resolver = dns.resolver.Resolver(configure=False)
resolver.nameservers = RESOLVER_NAMESERVERS
resolver.timeout = RESOLVER_TIMEOUT
resolver.lifetime = RESOLVER_LIFETIME
answers = resolver.query(registered_domain, rdtype=dns.rdatatype.NS)
for answer in answers:
result_map["ns_server"]["result"].append(answer.to_text())
except dns.resolver.Timeout:
error_result["result"][0]["result"] = f"{__plugin__} scan timeout"
return error_result
except:
return error_result
for ns_server in result_map["ns_server"]["result"]:
for dns_provider in DNS_PROVIDER:
if re.search(f"\\.{dns_provider}", ns_server):
providers.append(DNS_PROVIDER[dns_provider])
result_map["providers"]["result"] = list(set(providers)) if providers else ["Unknown"]
for result in result_map.values():
result["result"] = [{"name": item} for item in result["result"]]
scan_result["result"] = sorted(
[item for item in result_map.values()], key=lambda x: x.get("sequence", 0)
)
return scan_result
|
the-stack_106_20254
|
import sys
import time
import rospy
from sensor_msgs.msg import Image as msg_Image
from sensor_msgs.msg import PointCloud2 as msg_PointCloud2
import sensor_msgs.point_cloud2 as pc2
from sensor_msgs.msg import Imu as msg_Imu
import numpy as np
from cv_bridge import CvBridge, CvBridgeError
import inspect
import ctypes
import struct
import tf
try:
from theora_image_transport.msg import Packet as msg_theora
except Exception:
pass
def pc2_to_xyzrgb(point):
# Thanks to Panos for his code used in this function.
x, y, z = point[:3]
rgb = point[3]
# cast float32 to int so that bitwise operations are possible
s = struct.pack('>f', rgb)
i = struct.unpack('>l', s)[0]
# you can get back the float value by the inverse operations
pack = ctypes.c_uint32(i).value
r = (pack & 0x00FF0000) >> 16
g = (pack & 0x0000FF00) >> 8
b = (pack & 0x000000FF)
return x, y, z, r, g, b
class CWaitForMessage:
def __init__(self, params={}):
self.result = None
self.break_timeout = False
self.timeout = params.get('timeout_secs', -1) * 1e-3
self.seq = params.get('seq', -1)
self.time = params.get('time', None)
self.node_name = params.get('node_name', 'rs2_listener')
self.bridge = CvBridge()
self.listener = None
self.prev_msg_time = 0
self.fout = None
self.themes = {'depthStream': {'topic': '/camera/depth/image_rect_raw', 'callback': self.imageColorCallback, 'msg_type': msg_Image},
'colorStream': {'topic': '/camera/color/image_raw', 'callback': self.imageColorCallback, 'msg_type': msg_Image},
'pointscloud': {'topic': '/camera/depth/color/points', 'callback': self.pointscloudCallback, 'msg_type': msg_PointCloud2},
'alignedDepthInfra1': {'topic': '/camera/aligned_depth_to_infra1/image_raw', 'callback': self.imageColorCallback, 'msg_type': msg_Image},
'alignedDepthColor': {'topic': '/camera/aligned_depth_to_color/image_raw', 'callback': self.imageColorCallback, 'msg_type': msg_Image},
'static_tf': {'topic': '/camera/color/image_raw', 'callback': self.imageColorCallback, 'msg_type': msg_Image},
'accelStream': {'topic': '/camera/accel/sample', 'callback': self.imuCallback, 'msg_type': msg_Imu},
}
self.func_data = dict()
def imuCallback(self, theme_name):
def _imuCallback(data):
if self.listener is None:
self.listener = tf.TransformListener()
self.prev_time = time.time()
self.func_data[theme_name].setdefault('value', [])
self.func_data[theme_name].setdefault('ros_value', [])
try:
frame_id = data.header.frame_id
value = data.linear_acceleration
(trans,rot) = self.listener.lookupTransform('/camera_link', frame_id, rospy.Time(0))
quat = tf.transformations.quaternion_matrix(rot)
point = np.matrix([value.x, value.y, value.z, 1], dtype='float32')
point.resize((4, 1))
rotated = quat*point
rotated.resize(1,4)
rotated = np.array(rotated)[0][:3]
except Exception as e:
print(e)
return
self.func_data[theme_name]['value'].append(value)
self.func_data[theme_name]['ros_value'].append(rotated)
return _imuCallback
def imageColorCallback(self, theme_name):
def _imageColorCallback(data):
self.prev_time = time.time()
self.func_data[theme_name].setdefault('avg', [])
self.func_data[theme_name].setdefault('ok_percent', [])
self.func_data[theme_name].setdefault('num_channels', [])
self.func_data[theme_name].setdefault('shape', [])
self.func_data[theme_name].setdefault('reported_size', [])
try:
cv_image = self.bridge.imgmsg_to_cv2(data, data.encoding)
except CvBridgeError as e:
print(e)
return
channels = cv_image.shape[2] if len(cv_image.shape) > 2 else 1
pyimg = np.asarray(cv_image)
ok_number = (pyimg != 0).sum()
self.func_data[theme_name]['avg'].append(pyimg.sum() / ok_number)
self.func_data[theme_name]['ok_percent'].append(float(ok_number) / (pyimg.shape[0] * pyimg.shape[1]) / channels)
self.func_data[theme_name]['num_channels'].append(channels)
self.func_data[theme_name]['shape'].append(cv_image.shape)
self.func_data[theme_name]['reported_size'].append((data.width, data.height, data.step))
return _imageColorCallback
def imageDepthCallback(self, data):
pass
def pointscloudCallback(self, theme_name):
def _pointscloudCallback(data):
self.prev_time = time.time()
print ('Got pointcloud: %d, %d' % (data.width, data.height))
self.func_data[theme_name].setdefault('frame_counter', 0)
self.func_data[theme_name].setdefault('avg', [])
self.func_data[theme_name].setdefault('size', [])
self.func_data[theme_name].setdefault('width', [])
self.func_data[theme_name].setdefault('height', [])
# until parsing pointcloud is done in real time, I'll use only the first frame.
self.func_data[theme_name]['frame_counter'] += 1
if self.func_data[theme_name]['frame_counter'] == 1:
# Known issue - 1st pointcloud published has invalid texture. Skip 1st frame.
return
try:
points = np.array([pc2_to_xyzrgb(pp) for pp in pc2.read_points(data, skip_nans=True, field_names=("x", "y", "z", "rgb")) if pp[0] > 0])
except Exception as e:
print(e)
return
self.func_data[theme_name]['avg'].append(points.mean(0))
self.func_data[theme_name]['size'].append(len(points))
self.func_data[theme_name]['width'].append(data.width)
self.func_data[theme_name]['height'].append(data.height)
return _pointscloudCallback
def wait_for_message(self, params, msg_type=msg_Image):
topic = params['topic']
print ('connect to ROS with name: %s' % self.node_name)
rospy.init_node(self.node_name, anonymous=True)
out_filename = params.get('filename', None)
if (out_filename):
self.fout = open(out_filename, 'w')
if msg_type is msg_Imu:
col_w = 20
print ('Writing to file: %s' % out_filename)
columns = ['frame_number', 'frame_time(sec)', 'accel.x', 'accel.y', 'accel.z', 'gyro.x', 'gyro.y', 'gyro.z']
line = ('{:<%d}'*len(columns) % (col_w, col_w, col_w, col_w, col_w, col_w, col_w, col_w)).format(*columns) + '\n'
sys.stdout.write(line)
self.fout.write(line)
rospy.loginfo('Subscribing on topic: %s' % topic)
self.sub = rospy.Subscriber(topic, msg_type, self.callback)
self.prev_time = time.time()
break_timeout = False
while not any([rospy.core.is_shutdown(), break_timeout, self.result]):
rospy.rostime.wallsleep(0.5)
if self.timeout > 0 and time.time() - self.prev_time > self.timeout:
break_timeout = True
self.sub.unregister()
return self.result
@staticmethod
def unregister_all(registers):
for test_name in registers:
rospy.loginfo('Un-Subscribing test %s' % test_name)
registers[test_name]['sub'].unregister()
def wait_for_messages(self, themes):
# tests_params = {<name>: {'callback', 'topic', 'msg_type', 'internal_params'}}
self.func_data = dict([[theme_name, {}] for theme_name in themes])
print ('connect to ROS with name: %s' % self.node_name)
rospy.init_node(self.node_name, anonymous=True)
for theme_name in themes:
theme = self.themes[theme_name]
rospy.loginfo('Subscribing %s on topic: %s' % (theme_name, theme['topic']))
self.func_data[theme_name]['sub'] = rospy.Subscriber(theme['topic'], theme['msg_type'], theme['callback'](theme_name))
self.prev_time = time.time()
break_timeout = False
while not any([rospy.core.is_shutdown(), break_timeout]):
rospy.rostime.wallsleep(0.5)
if self.timeout > 0 and time.time() - self.prev_time > self.timeout:
break_timeout = True
self.unregister_all(self.func_data)
return self.func_data
def callback(self, data):
msg_time = data.header.stamp.secs + 1e-9 * data.header.stamp.nsecs
if (self.prev_msg_time > msg_time):
rospy.loginfo('Out of order: %.9f > %.9f' % (self.prev_msg_time, msg_time))
if type(data) == msg_Imu:
col_w = 20
frame_number = data.header.seq
accel = data.linear_acceleration
gyro = data.angular_velocity
line = ('\n{:<%d}{:<%d.6f}{:<%d.4f}{:<%d.4f}{:<%d.4f}{:<%d.4f}{:<%d.4f}{:<%d.4f}' % (col_w, col_w, col_w, col_w, col_w, col_w, col_w, col_w)).format(frame_number, msg_time, accel.x, accel.y, accel.z, gyro.x, gyro.y, gyro.z)
sys.stdout.write(line)
if self.fout:
self.fout.write(line)
self.prev_msg_time = msg_time
self.prev_msg_data = data
self.prev_time = time.time()
if any([self.seq < 0 and self.time is None,
self.seq > 0 and data.header.seq >= self.seq,
self.time and data.header.stamp.secs == self.time['secs'] and data.header.stamp.nsecs == self.time['nsecs']]):
self.result = data
self.sub.unregister()
def main():
if len(sys.argv) < 2 or '--help' in sys.argv or '/?' in sys.argv:
print ('USAGE:')
print ('------')
print ('rs2_listener.py <topic | theme> [Options]')
print ('example: rs2_listener.py /camera/color/image_raw --time 1532423022.044515610 --timeout 3')
print ('example: rs2_listener.py pointscloud')
print ('')
print ('Application subscribes on <topic>, wait for the first message matching [Options].')
print ('When found, prints the timestamp.')
print
print ('[Options:]')
print ('-s <sequential number>')
print ('--time <secs.nsecs>')
print ('--timeout <secs>')
print ('--filename <filename> : write output to file')
exit(-1)
# wanted_topic = '/device_0/sensor_0/Depth_0/image/data'
# wanted_seq = 58250
wanted_topic = sys.argv[1]
msg_params = {}
if 'points' in wanted_topic:
msg_type = msg_PointCloud2
elif ('imu' in wanted_topic) or ('gyro' in wanted_topic) or ('accel' in wanted_topic):
msg_type = msg_Imu
elif 'theora' in wanted_topic:
try:
msg_type = msg_theora
except NameError as e:
print ('theora_image_transport is not installed. \nType "sudo apt-get install ros-kinetic-theora-image-transport" to enable registering on messages of type theora.')
raise
else:
msg_type = msg_Image
for idx in range(2, len(sys.argv)):
if sys.argv[idx] == '-s':
msg_params['seq'] = int(sys.argv[idx + 1])
if sys.argv[idx] == '--time':
msg_params['time'] = dict(zip(['secs', 'nsecs'], [int(part) for part in sys.argv[idx + 1].split('.')]))
if sys.argv[idx] == '--timeout':
msg_params['timeout_secs'] = int(sys.argv[idx + 1])
if sys.argv[idx] == '--filename':
msg_params['filename'] = sys.argv[idx+1]
msg_retriever = CWaitForMessage(msg_params)
if '/' in wanted_topic:
msg_params.setdefault('topic', wanted_topic)
res = msg_retriever.wait_for_message(msg_params, msg_type)
rospy.loginfo('Got message: %s' % res.header)
else:
themes = [wanted_topic]
res = msg_retriever.wait_for_messages(themes)
print (res)
if __name__ == '__main__':
main()
|
the-stack_106_20258
|
"""
Extending the Button Context Menu
+++++++++++++++++++++++++++++++++
This example enables you to insert your own menu entry into the common
right click menu that you get while hovering over a value field,
color, string, etc.
To make the example work, you have to first select an object
then right click on an user interface element (maybe a color in the
material properties) and choose *Execute Custom Action*.
Executing the operator will then print all values.
"""
import bpy
from bpy.types import Menu
def dump(obj, text):
for attr in dir(obj):
print("%r.%s = %s" % (obj, attr, getattr(obj, attr)))
class WM_OT_button_context_test(bpy.types.Operator):
"""Right click entry test"""
bl_idname = "wm.button_context_test"
bl_label = "Run Context Test"
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
value = getattr(context, "button_pointer", None)
if value is not None:
dump(value, "button_pointer")
value = getattr(context, "button_prop", None)
if value is not None:
dump(value, "button_prop")
value = getattr(context, "button_operator", None)
if value is not None:
dump(value, "button_operator")
return {'FINISHED'}
# This class has to be exactly named like that to insert an entry in the right click menu
class WM_MT_button_context(Menu):
bl_label = "Unused"
def draw(self, context):
pass
def menu_func(self, context):
layout = self.layout
layout.separator()
layout.operator(WM_OT_button_context_test.bl_idname)
classes = (
WM_OT_button_context_test,
WM_MT_button_context,
)
def register():
for cls in classes:
bpy.utils.register_class(cls)
bpy.types.WM_MT_button_context.append(menu_func)
def unregister():
for cls in classes:
bpy.utils.unregister_class(cls)
bpy.types.WM_MT_button_context.remove(menu_func)
if __name__ == "__main__":
register()
|
the-stack_106_20260
|
import json
STATUSES = {
200: "OK",
400: "Bad requests",
409: "Nick already exists",
410: "Wrong nick",
403: "Forbidden",
418: "Unclassified error"
}
class ProtocolException(Exception):
def __init__(self, message):
super().__init__(message, None)
def form_service(attr={}):
service_message = {"type": "service"}
service_message.update(attr)
return service_message
def update_service(service, attr):
service.update(attr)
return service
# form status. ext_inf must be str with error info.
# message_id must be id of delievered message.
# if message id not passed to function, result will not
# contain "messageId" field
def new_status(code=200, ext_inf=None, message_id=None):
status = form_service({"status": code})
if code != 200:
status["error_code"] = code
if ext_inf != None:
status["error_info"] = ext_inf
else:
status["error_info"] = STATUSES[code]
if message_id != None:
status["messageId"] = message_id
return status
# form message dict from string
def form_message(attr={}):
message = {"type": "message"}
message.update(attr)
return message
# the "message" arg must be dict (e.g. {"type":"message", "message": "bla-bla"})
# of string that contains text of message (e.g. "bla-bla")
def new_message(message):
if type(message) == str:
message = {"message": message}
return form_message(message)
def dump(message):
return json.dumps(message)
def load(json_protocol):
try:
parsed_protocol = json.loads(json_protocol)
_type = parsed_protocol["type"]
return _type == "message", parsed_protocol
except json.JSONDecodeError:
raise ProtocolException("Non-JSON message detected")
except KeyError:
raise ProtocolException('Missing "type" field')
def check_error(parsed_protocol):
if parsed_protocol["status"] // 100 == 2:
return None
return parsed_protocol["error_info"]
|
the-stack_106_20261
|
import onmt
import torch
import argparse
import math
parser = argparse.ArgumentParser(description='translate.py')
parser.add_argument('-model', required=True,
help='Path to model .pt file')
parser.add_argument('-src', required=True,
help='Source sequence to decode (one line per sequence)')
parser.add_argument('-tgt',
help='True target sequence (optional)')
parser.add_argument('-output', default='pred.txt',
help="""Path to output the predictions (each line will
be the decoded sequence""")
parser.add_argument('-beam_size', type=int, default=5,
help='Beam size')
parser.add_argument('-batch_size', type=int, default=30,
help='Batch size')
parser.add_argument('-max_sent_length', default=100,
help='Maximum sentence length.')
parser.add_argument('-replace_unk', action="store_true",
help="""Replace the generated UNK tokens with the source
token that had the highest attention weight. If phrase_table
is provided, it will lookup the identified source token and
give the corresponding target token. If it is not provided
(or the identified source token does not exist in the
table) then it will copy the source token""")
# parser.add_argument('-phrase_table',
# help="""Path to source-target dictionary to replace UNK
# tokens. See README.md for the format of this file.""")
parser.add_argument('-verbose', action="store_true",
help='Print scores and predictions for each sentence')
parser.add_argument('-n_best', type=int, default=1,
help="""If verbose is set, will output the n_best
decoded sentences""")
parser.add_argument('-gpu', type=int, default=-1,
help="Device to run on")
def reportScore(name, scoreTotal, wordsTotal):
print("%s AVG SCORE: %.4f, %s PPL: %.4f" % (
name, scoreTotal / wordsTotal,
name, math.exp(-scoreTotal/wordsTotal)))
def addone(f):
for line in f:
yield line
yield None
def main():
opt = parser.parse_args()
opt.cuda = opt.gpu > -1
if opt.cuda:
torch.cuda.set_device(opt.gpu)
translator = onmt.Translator(opt)
outF = open(opt.output, 'w')
predScoreTotal, predWordsTotal, goldScoreTotal, goldWordsTotal = 0, 0, 0, 0
srcBatch, tgtBatch = [], []
count = 0
tgtF = open(opt.tgt) if opt.tgt else None
for line in addone(open(opt.src)):
if line is not None:
srcTokens = line.split()
srcBatch += [srcTokens]
if tgtF:
tgtTokens = tgtF.readline().split() if tgtF else None
tgtBatch += [tgtTokens]
if len(srcBatch) < opt.batch_size:
continue
else:
# at the end of file, check last batch
if len(srcBatch) == 0:
break
predBatch, predScore, goldScore = translator.translate(srcBatch, tgtBatch)
predScoreTotal += sum(score[0] for score in predScore)
predWordsTotal += sum(len(x[0]) for x in predBatch)
if tgtF is not None:
goldScoreTotal += sum(goldScore)
goldWordsTotal += sum(len(x) for x in tgtBatch)
for b in range(len(predBatch)):
count += 1
outF.write(" ".join(predBatch[b][0]) + '\n')
outF.flush()
if opt.verbose:
srcSent = ' '.join(srcBatch[b])
if translator.tgt_dict.lower:
srcSent = srcSent.lower()
print('SENT %d: %s' % (count, srcSent))
print('PRED %d: %s' % (count, " ".join(predBatch[b][0])))
print("PRED SCORE: %.4f" % predScore[b][0])
if tgtF is not None:
tgtSent = ' '.join(tgtBatch[b])
if translator.tgt_dict.lower:
tgtSent = tgtSent.lower()
print('GOLD %d: %s ' % (count, tgtSent))
print("GOLD SCORE: %.4f" % goldScore[b])
if opt.n_best > 1:
print('\nBEST HYP:')
for n in range(opt.n_best):
print("[%.4f] %s" % (predScore[b][n], " ".join(predBatch[b][n])))
print('')
srcBatch, tgtBatch = [], []
reportScore('PRED', predScoreTotal, predWordsTotal)
if tgtF:
reportScore('GOLD', goldScoreTotal, goldWordsTotal)
if tgtF:
tgtF.close()
if __name__ == "__main__":
main()
|
the-stack_106_20262
|
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Marijn van Vliet <[email protected]>
# Jona Sassenhagen <[email protected]>
# Teon Brooks <[email protected]>
#
# License: Simplified BSD
import logging
from collections import defaultdict
from itertools import combinations
import os.path as op
import numpy as np
from ..transforms import _polar_to_cartesian, _cartesian_to_sphere
from ..bem import fit_sphere_to_headshape
from ..io.pick import pick_types
from ..io.constants import FIFF
from ..io.meas_info import Info
from ..utils import _clean_names, warn
from ..externals.six.moves import map
class Layout(object):
"""Sensor layouts
Layouts are typically loaded from a file using read_layout. Only use this
class directly if you're constructing a new layout.
Parameters
----------
box : tuple of length 4
The box dimension (x_min, x_max, y_min, y_max).
pos : array, shape=(n_channels, 4)
The positions of the channels in 2d (x, y, width, height).
names : list
The channel names.
ids : list
The channel ids.
kind : str
The type of Layout (e.g. 'Vectorview-all').
"""
def __init__(self, box, pos, names, ids, kind):
self.box = box
self.pos = pos
self.names = names
self.ids = ids
self.kind = kind
def save(self, fname):
"""Save Layout to disk
Parameters
----------
fname : str
The file name (e.g. 'my_layout.lout').
See Also
--------
read_layout
"""
x = self.pos[:, 0]
y = self.pos[:, 1]
width = self.pos[:, 2]
height = self.pos[:, 3]
if fname.endswith('.lout'):
out_str = '%8.2f %8.2f %8.2f %8.2f\n' % self.box
elif fname.endswith('.lay'):
out_str = ''
else:
raise ValueError('Unknown layout type. Should be of type '
'.lout or .lay.')
for ii in range(x.shape[0]):
out_str += ('%03d %8.2f %8.2f %8.2f %8.2f %s\n' % (self.ids[ii],
x[ii], y[ii], width[ii], height[ii], self.names[ii]))
f = open(fname, 'w')
f.write(out_str)
f.close()
def __repr__(self):
return '<Layout | %s - Channels: %s ...>' % (self.kind,
', '.join(self.names[:3]))
def plot(self, show=True):
"""Plot the sensor positions.
Parameters
----------
show : bool
Show figure if True. Defaults to True.
Returns
-------
fig : instance of matplotlib figure
Figure containing the sensor topography.
Notes
-----
.. versionadded:: 0.12.0
"""
from ..viz.topomap import plot_layout
return plot_layout(self, show=show)
def _read_lout(fname):
"""Aux function"""
with open(fname) as f:
box_line = f.readline() # first line contains box dimension
box = tuple(map(float, box_line.split()))
names, pos, ids = [], [], []
for line in f:
splits = line.split()
if len(splits) == 7:
cid, x, y, dx, dy, chkind, nb = splits
name = chkind + ' ' + nb
else:
cid, x, y, dx, dy, name = splits
pos.append(np.array([x, y, dx, dy], dtype=np.float))
names.append(name)
ids.append(int(cid))
pos = np.array(pos)
return box, pos, names, ids
def _read_lay(fname):
"""Aux function"""
with open(fname) as f:
box = None
names, pos, ids = [], [], []
for line in f:
splits = line.split()
if len(splits) == 7:
cid, x, y, dx, dy, chkind, nb = splits
name = chkind + ' ' + nb
else:
cid, x, y, dx, dy, name = splits
pos.append(np.array([x, y, dx, dy], dtype=np.float))
names.append(name)
ids.append(int(cid))
pos = np.array(pos)
return box, pos, names, ids
def read_layout(kind, path=None, scale=True):
"""Read layout from a file
Parameters
----------
kind : str
The name of the .lout file (e.g. kind='Vectorview-all' for
'Vectorview-all.lout').
path : str | None
The path of the folder containing the Layout file. Defaults to the
mne/channels/data/layouts folder inside your mne-python installation.
scale : bool
Apply useful scaling for out the box plotting using layout.pos.
Defaults to True.
Returns
-------
layout : instance of Layout
The layout.
See Also
--------
Layout.save
"""
if path is None:
path = op.join(op.dirname(__file__), 'data', 'layouts')
if not kind.endswith('.lout') and op.exists(op.join(path, kind + '.lout')):
kind += '.lout'
elif not kind.endswith('.lay') and op.exists(op.join(path, kind + '.lay')):
kind += '.lay'
if kind.endswith('.lout'):
fname = op.join(path, kind)
kind = kind[:-5]
box, pos, names, ids = _read_lout(fname)
elif kind.endswith('.lay'):
fname = op.join(path, kind)
kind = kind[:-4]
box, pos, names, ids = _read_lay(fname)
kind.endswith('.lay')
else:
raise ValueError('Unknown layout type. Should be of type '
'.lout or .lay.')
if scale:
pos[:, 0] -= np.min(pos[:, 0])
pos[:, 1] -= np.min(pos[:, 1])
scaling = max(np.max(pos[:, 0]), np.max(pos[:, 1])) + pos[0, 2]
pos /= scaling
pos[:, :2] += 0.03
pos[:, :2] *= 0.97 / 1.03
pos[:, 2:] *= 0.94
return Layout(box=box, pos=pos, names=names, kind=kind, ids=ids)
def make_eeg_layout(info, radius=0.5, width=None, height=None, exclude='bads'):
"""Create .lout file from EEG electrode digitization
Parameters
----------
info : instance of Info
Measurement info (e.g., raw.info).
radius : float
Viewport radius as a fraction of main figure height. Defaults to 0.5.
width : float | None
Width of sensor axes as a fraction of main figure height. By default,
this will be the maximum width possible without axes overlapping.
height : float | None
Height of sensor axes as a fraction of main figure height. By default,
this will be the maximum height possible withough axes overlapping.
exclude : list of string | str
List of channels to exclude. If empty do not exclude any.
If 'bads', exclude channels in info['bads'] (default).
Returns
-------
layout : Layout
The generated Layout.
See Also
--------
make_grid_layout, generate_2d_layout
"""
if not (0 <= radius <= 0.5):
raise ValueError('The radius parameter should be between 0 and 0.5.')
if width is not None and not (0 <= width <= 1.0):
raise ValueError('The width parameter should be between 0 and 1.')
if height is not None and not (0 <= height <= 1.0):
raise ValueError('The height parameter should be between 0 and 1.')
picks = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude=exclude)
loc2d = _auto_topomap_coords(info, picks)
names = [info['chs'][i]['ch_name'] for i in picks]
# Scale [x, y] to [-0.5, 0.5]
loc2d_min = np.min(loc2d, axis=0)
loc2d_max = np.max(loc2d, axis=0)
loc2d = (loc2d - (loc2d_max + loc2d_min) / 2.) / (loc2d_max - loc2d_min)
# If no width or height specified, calculate the maximum value possible
# without axes overlapping.
if width is None or height is None:
width, height = _box_size(loc2d, width, height, padding=0.1)
# Scale to viewport radius
loc2d *= 2 * radius
# Some subplot centers will be at the figure edge. Shrink everything so it
# fits in the figure.
scaling = min(1 / (1. + width), 1 / (1. + height))
loc2d *= scaling
width *= scaling
height *= scaling
# Shift to center
loc2d += 0.5
n_channels = loc2d.shape[0]
pos = np.c_[loc2d[:, 0] - 0.5 * width,
loc2d[:, 1] - 0.5 * height,
width * np.ones(n_channels),
height * np.ones(n_channels)]
box = (0, 1, 0, 1)
ids = 1 + np.arange(n_channels)
layout = Layout(box=box, pos=pos, names=names, kind='EEG', ids=ids)
return layout
def make_grid_layout(info, picks=None, n_col=None):
""" Generate .lout file for custom data, i.e., ICA sources
Parameters
----------
info : instance of Info | None
Measurement info (e.g., raw.info). If None, default names will be
employed.
picks : array-like of int | None
The indices of the channels to be included. If None, al misc channels
will be included.
n_col : int | None
Number of columns to generate. If None, a square grid will be produced.
Returns
-------
layout : Layout
The generated layout.
See Also
--------
make_eeg_layout, generate_2d_layout
"""
if picks is None:
picks = pick_types(info, misc=True, ref_meg=False, exclude='bads')
names = [info['chs'][k]['ch_name'] for k in picks]
if not names:
raise ValueError('No misc data channels found.')
ids = list(range(len(picks)))
size = len(picks)
if n_col is None:
# prepare square-like layout
n_row = n_col = np.sqrt(size) # try square
if n_col % 1:
# try n * (n-1) rectangle
n_col, n_row = int(n_col + 1), int(n_row)
if n_col * n_row < size: # jump to the next full square
n_row += 1
else:
n_row = int(np.ceil(size / float(n_col)))
# setup position grid
x, y = np.meshgrid(np.linspace(-0.5, 0.5, n_col),
np.linspace(-0.5, 0.5, n_row))
x, y = x.ravel()[:size], y.ravel()[:size]
width, height = _box_size(np.c_[x, y], padding=0.1)
# Some axes will be at the figure edge. Shrink everything so it fits in the
# figure. Add 0.01 border around everything
border_x, border_y = (0.01, 0.01)
x_scaling = 1 / (1. + width + border_x)
y_scaling = 1 / (1. + height + border_y)
x = x * x_scaling
y = y * y_scaling
width *= x_scaling
height *= y_scaling
# Shift to center
x += 0.5
y += 0.5
# calculate pos
pos = np.c_[x - 0.5 * width, y - 0.5 * height,
width * np.ones(size), height * np.ones(size)]
box = (0, 1, 0, 1)
layout = Layout(box=box, pos=pos, names=names, kind='grid-misc', ids=ids)
return layout
def find_layout(info, ch_type=None, exclude='bads'):
"""Choose a layout based on the channels in the info 'chs' field
Parameters
----------
info : instance of Info
The measurement info.
ch_type : {'mag', 'grad', 'meg', 'eeg'} | None
The channel type for selecting single channel layouts.
Defaults to None. Note, this argument will only be considered for
VectorView type layout. Use `meg` to force using the full layout
in situations where the info does only contain one sensor type.
exclude : list of string | str
List of channels to exclude. If empty do not exclude any.
If 'bads', exclude channels in info['bads'] (default).
Returns
-------
layout : Layout instance | None
None if layout not found.
"""
our_types = ' or '.join(['`None`', '`mag`', '`grad`', '`meg`'])
if ch_type not in (None, 'meg', 'mag', 'grad', 'eeg'):
raise ValueError('Invalid channel type (%s) requested '
'`ch_type` must be %s' % (ch_type, our_types))
chs = info['chs']
coil_types = set([ch['coil_type'] for ch in chs])
channel_types = set([ch['kind'] for ch in chs])
has_vv_mag = any(k in coil_types for k in
[FIFF.FIFFV_COIL_VV_MAG_T1, FIFF.FIFFV_COIL_VV_MAG_T2,
FIFF.FIFFV_COIL_VV_MAG_T3])
has_vv_grad = any(k in coil_types for k in [FIFF.FIFFV_COIL_VV_PLANAR_T1,
FIFF.FIFFV_COIL_VV_PLANAR_T2,
FIFF.FIFFV_COIL_VV_PLANAR_T3])
has_vv_meg = has_vv_mag and has_vv_grad
has_vv_only_mag = has_vv_mag and not has_vv_grad
has_vv_only_grad = has_vv_grad and not has_vv_mag
is_old_vv = ' ' in chs[0]['ch_name']
has_4D_mag = FIFF.FIFFV_COIL_MAGNES_MAG in coil_types
ctf_other_types = (FIFF.FIFFV_COIL_CTF_REF_MAG,
FIFF.FIFFV_COIL_CTF_REF_GRAD,
FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD)
has_CTF_grad = (FIFF.FIFFV_COIL_CTF_GRAD in coil_types or
(FIFF.FIFFV_MEG_CH in channel_types and
any(k in ctf_other_types for k in coil_types)))
# hack due to MNE-C bug in IO of CTF
n_kit_grads = sum(ch['coil_type'] == FIFF.FIFFV_COIL_KIT_GRAD
for ch in chs)
has_any_meg = any([has_vv_mag, has_vv_grad, has_4D_mag, has_CTF_grad,
n_kit_grads])
has_eeg_coils = (FIFF.FIFFV_COIL_EEG in coil_types and
FIFF.FIFFV_EEG_CH in channel_types)
has_eeg_coils_and_meg = has_eeg_coils and has_any_meg
has_eeg_coils_only = has_eeg_coils and not has_any_meg
if ch_type == "meg" and not has_any_meg:
raise RuntimeError('No MEG channels present. Cannot find MEG layout.')
if ch_type == "eeg" and not has_eeg_coils:
raise RuntimeError('No EEG channels present. Cannot find EEG layout.')
if ((has_vv_meg and ch_type is None) or
(any([has_vv_mag, has_vv_grad]) and ch_type == 'meg')):
layout_name = 'Vectorview-all'
elif has_vv_only_mag or (has_vv_meg and ch_type == 'mag'):
layout_name = 'Vectorview-mag'
elif has_vv_only_grad or (has_vv_meg and ch_type == 'grad'):
if info['ch_names'][0].endswith('X'):
layout_name = 'Vectorview-grad_norm'
else:
layout_name = 'Vectorview-grad'
elif ((has_eeg_coils_only and ch_type in [None, 'eeg']) or
(has_eeg_coils_and_meg and ch_type == 'eeg')):
if not isinstance(info, (dict, Info)):
raise RuntimeError('Cannot make EEG layout, no measurement info '
'was passed to `find_layout`')
return make_eeg_layout(info, exclude=exclude)
elif has_4D_mag:
layout_name = 'magnesWH3600'
elif has_CTF_grad:
layout_name = 'CTF-275'
elif n_kit_grads > 0:
layout_name = _find_kit_layout(info, n_kit_grads)
else:
return None
layout = read_layout(layout_name)
if not is_old_vv:
layout.names = _clean_names(layout.names, remove_whitespace=True)
if has_CTF_grad:
layout.names = _clean_names(layout.names, before_dash=True)
return layout
def _find_kit_layout(info, n_grads):
"""Determine the KIT layout
Parameters
----------
info : Info
Info object.
n_grads : int
Number of KIT-gradiometers in the info.
Returns
-------
kit_layout : str
One of 'KIT-AD', 'KIT-157' or 'KIT-UMD'.
"""
if info['kit_system_id'] is not None:
# avoid circular import
from ..io.kit.constants import KIT_LAYOUT
if info['kit_system_id'] in KIT_LAYOUT:
kit_layout = KIT_LAYOUT[info['kit_system_id']]
if kit_layout is not None:
return kit_layout
raise NotImplementedError("The layout for the KIT system with ID %i "
"is missing. Please contact the developers "
"about adding it." % info['kit_system_id'])
elif n_grads > 157:
return 'KIT-AD'
# channels which are on the left hemisphere for NY and right for UMD
test_chs = ('MEG 13', 'MEG 14', 'MEG 15', 'MEG 16', 'MEG 25',
'MEG 26', 'MEG 27', 'MEG 28', 'MEG 29', 'MEG 30',
'MEG 31', 'MEG 32', 'MEG 57', 'MEG 60', 'MEG 61',
'MEG 62', 'MEG 63', 'MEG 64', 'MEG 73', 'MEG 90',
'MEG 93', 'MEG 95', 'MEG 96', 'MEG 105', 'MEG 112',
'MEG 120', 'MEG 121', 'MEG 122', 'MEG 123', 'MEG 124',
'MEG 125', 'MEG 126', 'MEG 142', 'MEG 144', 'MEG 153',
'MEG 154', 'MEG 155', 'MEG 156')
x = [ch['loc'][0] < 0 for ch in info['chs'] if ch['ch_name'] in test_chs]
if np.all(x):
return 'KIT-157' # KIT-NY
elif np.all(np.invert(x)):
raise NotImplementedError("Guessing sensor layout for legacy UMD "
"files is not implemented. Please convert "
"your files using MNE-Python 0.13 or "
"higher.")
else:
raise RuntimeError("KIT system could not be determined for data")
def _box_size(points, width=None, height=None, padding=0.0):
""" Given a series of points, calculate an appropriate box size.
Parameters
----------
points : array, shape (n_points, 2)
The centers of the axes as a list of (x, y) coordinate pairs. Normally
these are points in the range [0, 1] centered at 0.5.
width : float | None
An optional box width to enforce. When set, only the box height will be
calculated by the function.
height : float | None
An optional box height to enforce. When set, only the box width will be
calculated by the function.
padding : float
Portion of the box to reserve for padding. The value can range between
0.0 (boxes will touch, default) to 1.0 (boxes consist of only padding).
Returns
-------
width : float
Width of the box
height : float
Height of the box
"""
from scipy.spatial.distance import pdist
def xdiff(a, b):
return np.abs(a[0] - b[0])
def ydiff(a, b):
return np.abs(a[1] - b[1])
points = np.asarray(points)
all_combinations = list(combinations(points, 2))
if width is None and height is None:
if len(points) <= 1:
# Trivial case first
width = 1.0
height = 1.0
else:
# Find the closest two points A and B.
a, b = all_combinations[np.argmin(pdist(points))]
# The closest points define either the max width or max height.
w, h = xdiff(a, b), ydiff(a, b)
if w > h:
width = w
else:
height = h
# At this point, either width or height is known, or both are known.
if height is None:
# Find all axes that could potentially overlap horizontally.
hdist = pdist(points, xdiff)
candidates = [all_combinations[i] for i, d in enumerate(hdist)
if d < width]
if len(candidates) == 0:
# No axes overlap, take all the height you want.
height = 1.0
else:
# Find an appropriate height so all none of the found axes will
# overlap.
height = np.min([ydiff(*c) for c in candidates])
elif width is None:
# Find all axes that could potentially overlap vertically.
vdist = pdist(points, ydiff)
candidates = [all_combinations[i] for i, d in enumerate(vdist)
if d < height]
if len(candidates) == 0:
# No axes overlap, take all the width you want.
width = 1.0
else:
# Find an appropriate width so all none of the found axes will
# overlap.
width = np.min([xdiff(*c) for c in candidates])
# Add a bit of padding between boxes
width *= 1 - padding
height *= 1 - padding
return width, height
def _find_topomap_coords(info, picks, layout=None):
"""Try to guess the E/MEG layout and return appropriate topomap coordinates
Parameters
----------
info : instance of Info
Measurement info.
picks : list of int
Channel indices to generate topomap coords for.
layout : None | instance of Layout
Enforce using a specific layout. With None, a new map is generated.
With None, a layout is chosen based on the channels in the chs
parameter.
Returns
-------
coords : array, shape = (n_chs, 2)
2 dimensional coordinates for each sensor for a topomap plot.
"""
if len(picks) == 0:
raise ValueError("Need more than 0 channels.")
if layout is not None:
chs = [info['chs'][i] for i in picks]
pos = [layout.pos[layout.names.index(ch['ch_name'])] for ch in chs]
pos = np.asarray(pos)
else:
pos = _auto_topomap_coords(info, picks)
return pos
def _auto_topomap_coords(info, picks, ignore_overlap=False):
"""Make a 2 dimensional sensor map from sensor positions in an info dict.
The default is to use the electrode locations. The fallback option is to
attempt using digitization points of kind FIFFV_POINT_EEG. This only works
with EEG and requires an equal number of digitization points and sensors.
Parameters
----------
info : instance of Info
The measurement info.
picks : list of int
The channel indices to generate topomap coords for.
Returns
-------
locs : array, shape = (n_sensors, 2)
An array of positions of the 2 dimensional map.
"""
from scipy.spatial.distance import pdist, squareform
chs = [info['chs'][i] for i in picks]
# Use channel locations if available
locs3d = np.array([ch['loc'][:3] for ch in chs])
# If electrode locations are not available, use digization points
if len(locs3d) == 0 or np.allclose(locs3d, 0):
logging.warning('Did not find any electrode locations the info, '
'will attempt to use digitization points instead. '
'However, if digitization points do not correspond to '
'the EEG electrodes, this will lead to bad results. '
'Please verify that the sensor locations in the plot '
'are accurate.')
# MEG/EOG/ECG sensors don't have digitization points; all requested
# channels must be EEG
for ch in chs:
if ch['kind'] != FIFF.FIFFV_EEG_CH:
raise ValueError("Cannot determine location of MEG/EOG/ECG "
"channels using digitization points.")
eeg_ch_names = [ch['ch_name'] for ch in info['chs']
if ch['kind'] == FIFF.FIFFV_EEG_CH]
# Get EEG digitization points
if info['dig'] is None or len(info['dig']) == 0:
raise RuntimeError('No digitization points found.')
locs3d = np.array([point['r'] for point in info['dig']
if point['kind'] == FIFF.FIFFV_POINT_EEG])
if len(locs3d) == 0:
raise RuntimeError('Did not find any digitization points of '
'kind FIFFV_POINT_EEG (%d) in the info.'
% FIFF.FIFFV_POINT_EEG)
if len(locs3d) != len(eeg_ch_names):
raise ValueError("Number of EEG digitization points (%d) "
"doesn't match the number of EEG channels "
"(%d)" % (len(locs3d), len(eeg_ch_names)))
# Center digitization points on head origin
dig_kinds = (FIFF.FIFFV_POINT_CARDINAL,
FIFF.FIFFV_POINT_EEG,
FIFF.FIFFV_POINT_EXTRA)
_, origin_head, _ = fit_sphere_to_headshape(info, dig_kinds, units='m')
locs3d -= origin_head
# Match the digitization points with the requested
# channels.
eeg_ch_locs = dict(zip(eeg_ch_names, locs3d))
locs3d = np.array([eeg_ch_locs[ch['ch_name']] for ch in chs])
# Duplicate points cause all kinds of trouble during visualization
dist = pdist(locs3d)
if np.min(dist) < 1e-10 and not ignore_overlap:
problematic_electrodes = [
chs[elec_i]['ch_name']
for elec_i in squareform(dist < 1e-10).any(axis=0).nonzero()[0]
]
raise ValueError('The following electrodes have overlapping positions:'
'\n ' + str(problematic_electrodes) + '\nThis '
'causes problems during visualization.')
x, y, z = locs3d.T
az, el, r = _cartesian_to_sphere(x, y, z)
locs2d = np.c_[_polar_to_cartesian(az, np.pi / 2 - el)]
return locs2d
def _topo_to_sphere(pos, eegs):
"""Helper function for transforming xy-coordinates to sphere.
Parameters
----------
pos : array-like, shape (n_channels, 2)
xy-oordinates to transform.
eegs : list of int
Indices of eeg channels that are included when calculating the sphere.
Returns
-------
coords : array, shape (n_channels, 3)
xyz-coordinates.
"""
xs, ys = np.array(pos).T
sqs = np.max(np.sqrt((xs[eegs] ** 2) + (ys[eegs] ** 2)))
xs /= sqs # Shape to a sphere and normalize
ys /= sqs
xs += 0.5 - np.mean(xs[eegs]) # Center the points
ys += 0.5 - np.mean(ys[eegs])
xs = xs * 2. - 1. # Values ranging from -1 to 1
ys = ys * 2. - 1.
rs = np.clip(np.sqrt(xs ** 2 + ys ** 2), 0., 1.)
alphas = np.arccos(rs)
zs = np.sin(alphas)
return np.column_stack([xs, ys, zs])
def _pair_grad_sensors(info, layout=None, topomap_coords=True, exclude='bads',
raise_error=True):
"""Find the picks for pairing grad channels
Parameters
----------
info : instance of Info
An info dictionary containing channel information.
layout : Layout | None
The layout if available. Defaults to None.
topomap_coords : bool
Return the coordinates for a topomap plot along with the picks. If
False, only picks are returned. Defaults to True.
exclude : list of str | str
List of channels to exclude. If empty do not exclude any (default).
If 'bads', exclude channels in info['bads']. Defaults to 'bads'.
raise_error : bool
Whether to raise an error when no pairs are found. If False, raises a
warning.
Returns
-------
picks : array of int
Picks for the grad channels, ordered in pairs.
coords : array, shape = (n_grad_channels, 3)
Coordinates for a topomap plot (optional, only returned if
topomap_coords == True).
"""
# find all complete pairs of grad channels
pairs = defaultdict(list)
grad_picks = pick_types(info, meg='grad', ref_meg=False, exclude=exclude)
for i in grad_picks:
ch = info['chs'][i]
name = ch['ch_name']
if name.startswith('MEG'):
if name.endswith(('2', '3')):
key = name[-4:-1]
pairs[key].append(ch)
pairs = [p for p in pairs.values() if len(p) == 2]
if len(pairs) == 0:
if raise_error:
raise ValueError("No 'grad' channel pairs found.")
else:
warn("No 'grad' channel pairs found.")
return list()
# find the picks corresponding to the grad channels
grad_chs = sum(pairs, [])
ch_names = info['ch_names']
picks = [ch_names.index(c['ch_name']) for c in grad_chs]
if topomap_coords:
shape = (len(pairs), 2, -1)
coords = (_find_topomap_coords(info, picks, layout)
.reshape(shape).mean(axis=1))
return picks, coords
else:
return picks
# this function is used to pair grad when info is not present
# it is the case of Projection that don't have the info.
def _pair_grad_sensors_from_ch_names(ch_names):
"""Find the indexes for pairing grad channels
Parameters
----------
ch_names : list of str
A list of channel names.
Returns
-------
indexes : list of int
Indexes of the grad channels, ordered in pairs.
"""
pairs = defaultdict(list)
for i, name in enumerate(ch_names):
if name.startswith('MEG'):
if name.endswith(('2', '3')):
key = name[-4:-1]
pairs[key].append(i)
pairs = [p for p in pairs.values() if len(p) == 2]
grad_chs = sum(pairs, [])
return grad_chs
def _merge_grad_data(data):
"""Merge data from channel pairs using the RMS
Parameters
----------
data : array, shape = (n_channels, n_times)
Data for channels, ordered in pairs.
Returns
-------
data : array, shape = (n_channels / 2, n_times)
The root mean square for each pair.
"""
data = data.reshape((len(data) // 2, 2, -1))
data = np.sqrt(np.sum(data ** 2, axis=1) / 2)
return data
def generate_2d_layout(xy, w=.07, h=.05, pad=.02, ch_names=None,
ch_indices=None, name='ecog', bg_image=None):
"""Generate a custom 2D layout from xy points.
Generates a 2-D layout for plotting with plot_topo methods and
functions. XY points will be normalized between 0 and 1, where
normalization extremes will be either the min/max of xy, or
the width/height of bg_image.
Parameters
----------
xy : ndarray (N x 2)
The xy coordinates of sensor locations.
w : float
The width of each sensor's axis (between 0 and 1)
h : float
The height of each sensor's axis (between 0 and 1)
pad : float
Portion of the box to reserve for padding. The value can range between
0.0 (boxes will touch, default) to 1.0 (boxes consist of only padding).
ch_names : list
The names of each channel. Must be a list of strings, with one
string per channel.
ch_indices : list
Index of each channel - must be a collection of unique integers,
one index per channel.
name : string
The name of this layout type.
bg_image : str | ndarray
The image over which sensor axes will be plotted. Either a path to an
image file, or an array that can be plotted with plt.imshow. If
provided, xy points will be normalized by the width/height of this
image. If not, xy points will be normalized by their own min/max.
Returns
-------
layout : Layout
A Layout object that can be plotted with plot_topo
functions and methods.
See Also
--------
make_eeg_layout, make_grid_layout
Notes
-----
.. versionadded:: 0.9.0
"""
from scipy.ndimage import imread
if ch_indices is None:
ch_indices = np.arange(xy.shape[0])
if ch_names is None:
ch_names = ['{0}'.format(i) for i in ch_indices]
if len(ch_names) != len(ch_indices):
raise ValueError('# ch names and indices must be equal')
if len(ch_names) != len(xy):
raise ValueError('# ch names and xy vals must be equal')
x, y = xy.copy().astype(float).T
# Normalize xy to 0-1
if bg_image is not None:
# Normalize by image dimensions
if isinstance(bg_image, str):
img = imread(bg_image)
else:
img = bg_image
x /= img.shape[1]
y /= img.shape[0]
else:
# Normalize x and y by their maxes
for i_dim in [x, y]:
i_dim -= i_dim.min(0)
i_dim /= (i_dim.max(0) - i_dim.min(0))
# Create box and pos variable
box = _box_size(np.vstack([x, y]).T, padding=pad)
box = (0, 0, box[0], box[1])
w, h = [np.array([i] * x.shape[0]) for i in [w, h]]
loc_params = np.vstack([x, y, w, h]).T
layout = Layout(box, loc_params, ch_names, ch_indices, name)
return layout
|
the-stack_106_20265
|
#!/usr/bin/env python3
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
Extra supported commands are:
* gen, to generate the classes required for Telethon to run or docs
* pypi, to generate sdist, bdist_wheel, and push to PyPi
"""
import itertools
import json
import os
import re
import shutil
import sys
from pathlib import Path
from subprocess import run
from setuptools import find_packages, setup
# Needed since we're importing local files
sys.path.insert(0, os.path.dirname(__file__))
class TempWorkDir:
"""Switches the working directory to be the one on which this file lives,
while within the 'with' block.
"""
def __init__(self, new=None):
self.original = None
self.new = new or str(Path(__file__).parent.resolve())
def __enter__(self):
# os.chdir does not work with Path in Python 3.5.x
self.original = str(Path(".").resolve())
os.makedirs(self.new, exist_ok=True)
os.chdir(self.new)
return self
def __exit__(self, *args):
os.chdir(self.original)
GENERATOR_DIR = Path("telethon_generator")
LIBRARY_DIR = Path("telethon")
ERRORS_IN = GENERATOR_DIR / "data/errors.csv"
ERRORS_OUT = LIBRARY_DIR / "errors/_generated.py"
METHODS_IN = GENERATOR_DIR / "data/methods.csv"
# Which raw API methods are covered by *friendly* methods in the client?
FRIENDLY_IN = GENERATOR_DIR / "data/friendly.csv"
TLOBJECT_IN_TLS = [Path(x) for x in sorted(GENERATOR_DIR.glob("data/*.tl"))]
TLOBJECT_OUT = LIBRARY_DIR / "_tl"
TLOBJECT_MOD = "telethon._tl"
DOCS_IN_RES = GENERATOR_DIR / "data/html"
DOCS_OUT = Path("docs")
def generate(which, action="gen"):
from telethon_generator.generators import (
clean_tlobjects,
generate_docs,
generate_errors,
generate_tlobjects,
)
from telethon_generator.parsers import (
find_layer,
parse_errors,
parse_methods,
parse_tl,
)
layer = next(filter(None, map(find_layer, TLOBJECT_IN_TLS)))
errors = list(parse_errors(ERRORS_IN))
methods = list(
parse_methods(METHODS_IN, FRIENDLY_IN, {e.str_code: e for e in errors})
)
tlobjects = list(
itertools.chain(*(parse_tl(file, layer, methods) for file in TLOBJECT_IN_TLS))
)
if not which:
which.extend(("tl", "errors"))
clean = action == "clean"
action = "Cleaning" if clean else "Generating"
if "all" in which:
which.remove("all")
for x in ("tl", "errors", "docs"):
if x not in which:
which.append(x)
if "tl" in which:
which.remove("tl")
print(action, "TLObjects...")
if clean:
clean_tlobjects(TLOBJECT_OUT)
else:
generate_tlobjects(tlobjects, layer, TLOBJECT_MOD, TLOBJECT_OUT)
if "errors" in which:
which.remove("errors")
print(action, "RPCErrors...")
if clean:
if ERRORS_OUT.is_file():
ERRORS_OUT.unlink()
else:
with ERRORS_OUT.open("w") as file:
generate_errors(errors, file)
if "docs" in which:
which.remove("docs")
print(action, "documentation...")
if clean:
if DOCS_OUT.is_dir():
shutil.rmtree(str(DOCS_OUT))
else:
in_path = DOCS_IN_RES.resolve()
with TempWorkDir(DOCS_OUT):
generate_docs(tlobjects, methods, layer, in_path)
if "json" in which:
which.remove("json")
print(action, "JSON schema...")
json_files = [x.with_suffix(".json") for x in TLOBJECT_IN_TLS]
if clean:
for file in json_files:
if file.is_file():
file.unlink()
else:
def gen_json(fin, fout):
meths = []
constructors = []
for tl in parse_tl(fin, layer):
if tl.is_function:
meths.append(tl.to_dict())
else:
constructors.append(tl.to_dict())
what = {"constructors": constructors, "methods": meths}
with open(fout, "w") as f:
json.dump(what, f, indent=2)
for fs in zip(TLOBJECT_IN_TLS, json_files):
gen_json(*fs)
if which:
print(
"The following items were not understood:",
which,
'\n Consider using only "tl", "errors" and/or "docs".'
'\n Using only "clean" will clean them. "all" to act on all.'
'\n For instance "gen tl errors".',
)
def main(argv):
if len(argv) >= 2 and argv[1] in ("gen", "clean"):
generate(argv[2:], argv[1])
elif len(argv) >= 2 and argv[1] == "pypi":
# (Re)generate the code to make sure we don't push without it
generate(["tl", "errors"])
# Try importing the telethon module to assert it has no errors
try:
pass
except BaseException:
print("Packaging for PyPi aborted, importing the module failed.")
return
remove_dirs = ["__pycache__", "build", "dist", "Telethon.egg-info"]
for root, _dirs, _files in os.walk(LIBRARY_DIR, topdown=False):
# setuptools is including __pycache__ for some reason (#1605)
if root.endswith("/__pycache__"):
remove_dirs.append(root)
for x in remove_dirs:
shutil.rmtree(x, ignore_errors=True)
run("python3 setup.py sdist", shell=True)
run("python3 setup.py bdist_wheel", shell=True)
run("twine upload dist/*", shell=True)
for x in ("build", "dist", "Telethon.egg-info"):
shutil.rmtree(x, ignore_errors=True)
else:
# e.g. install from GitHub
if GENERATOR_DIR.is_dir():
generate(["tl", "errors"])
# Get the long description from the README file
with open("README.rst", "r", encoding="utf-8") as f:
long_description = f.read()
with open("telethon/version.py", "r", encoding="utf-8") as f:
version = re.search(
r"^__version__\s*=\s*'(.*)'.*$", f.read(), flags=re.MULTILINE
).group(1)
setup(
name="Telethon",
version=version,
description="Full-featured Telegram client library for Python 3",
long_description=long_description,
url="https://github.com/LonamiWebs/Telethon",
download_url="https://github.com/LonamiWebs/Telethon/releases",
author="Lonami Exo",
author_email="[email protected]",
license="MIT",
# See https://stackoverflow.com/a/40300957/4759433
# -> https://www.python.org/dev/peps/pep-0345/#requires-python
# -> http://setuptools.readthedocs.io/en/latest/setuptools.html
python_requires=">=3.7",
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Topic :: Communications :: Chat",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
],
keywords="telegram api chat client library messaging mtproto",
packages=find_packages(exclude=["telethon_*", "tests*"]),
install_requires=["pyaes", "rsa"],
extras_require={"cryptg": ["cryptg"]},
)
if __name__ == "__main__":
with TempWorkDir():
main(sys.argv)
|
the-stack_106_20268
|
"""HTML sanitizer for Gruyere, a web application with holes.
Copyright 2010 Google Inc. All rights reserved.
This code is licensed under the http://creativecommons.org/licenses/by-nd/3.0/us
Creative Commons Attribution-No Derivative Works 3.0 United States license.
DO NOT COPY THIS CODE!
This application is a small self-contained web application with numerous
security holes. It is provided for use with the Web Application Exploits and
Defenses codelab. You may modify the code for your own use while doing the
codelab but you may not distribute the modified code. Brief excerpts of this
code may be used for educational or instructional purposes provided this
notice is kept intact. By using Gruyere you agree to the Terms of Service
http://code.google.com/terms.html
"""
__author__ = 'Bruce Leban'
# system modules
import re
def SanitizeHtml(s):
"""Makes html safe for embedding in a document.
Filters the html to exclude all but a small subset of html by
removing script tags/attributes.
Args:
s: some html to sanitize.
Returns:
The html with all unsafe html removed.
"""
processed = ''
while s:
start = s.find('<')
if start >= 0:
end = s.find('>', start)
if end >= 0:
before = s[:start]
tag = s[start:end+1]
after = s[end+1:]
else:
before = s[:start]
tag = s[start:]
after = ''
else:
before = s
tag = ''
after = ''
processed += before + _SanitizeTag(tag)
s = after
return processed
TAG_RE = re.compile(r'<(.*?)(\s|>)') # matches the start of an html tag
def _SanitizeTag(t):
"""Sanitizes a single html tag.
This does both a 'whitelist' for
the allowed tags and a 'blacklist' for the disallowed attributes.
Args:
t: a tag to sanitize.
Returns:
a safe tag.
"""
allowed_tags = [
'a', 'b', 'big', 'br', 'center', 'code', 'em', 'h1', 'h2', 'h3',
'h4', 'h5', 'h6', 'hr', 'i', 'img', 'li', 'ol', 'p', 's', 'small',
'span', 'strong', 'table', 'td', 'tr', 'u', 'ul',
]
disallowed_attributes = [
'onblur', 'onchange', 'onclick', 'ondblclick', 'onfocus',
'onkeydown', 'onkeypress', 'onkeyup', 'onload', 'onmousedown',
'onmousemove', 'onmouseout', 'onmouseup', 'onreset',
'onselect', 'onsubmit', 'onunload'
]
# Extract the tag name and make sure it's allowed.
if t.startswith('</'):
return t
m = TAG_RE.match(t)
if m is None:
return t
tag_name = m.group(1)
if tag_name not in allowed_tags:
t = t[:m.start(1)] + 'blocked' + t[m.end(1):]
# This is a bit heavy handed but we want to be sure we don't
# allow any to get through.
for a in disallowed_attributes:
t = t.replace(a, 'blocked')
return t
|
the-stack_106_20269
|
import subprocess
import os
import sys
import subprocess
import numpy as np
import pycnal
import pycnal_toolbox
from remap_bdry import remap_bdry
from remap_bdry_uv import remap_bdry_uv
year = int(sys.argv[1])
lst_year = [year]
data_dir = '/Volumes/R1/Data/SODA_2.1.6/'
dst_dir='./'
lst_file = []
for year in lst_year:
year = np.str(year)
lst = subprocess.getoutput('ls ' + data_dir + 'SODA_2.1.6_' + year + '*')
lst = lst.split()
lst_file = lst_file + lst
print('Build OBC file from the following file list:')
print(lst_file)
print(' ')
src_grd_file = data_dir + 'SODA_grid.cdf'
src_grd = pycnal_toolbox.BGrid_SODA.get_nc_BGrid_SODA('/Volumes/R1/DATA/SODA_2.1.6/SODA_grid.cdf', name='SODA_2.1.6_YELLOW', xrange=(225, 275), yrange=(190, 240))
dst_grd = pycnal.grid.get_ROMS_grid('YELLOW')
for file in lst_file:
zeta = remap_bdry(file, 'ssh', src_grd, dst_grd, dst_dir=dst_dir)
dst_grd = pycnal.grid.get_ROMS_grid('YELLOW', zeta=zeta)
remap_bdry(file, 'temp', src_grd, dst_grd, dst_dir=dst_dir)
remap_bdry(file, 'salt', src_grd, dst_grd, dst_dir=dst_dir)
remap_bdry_uv(file, src_grd, dst_grd, dst_dir=dst_dir)
# merge file
bdry_file = dst_dir + file.rsplit('/')[-1][:-4] + '_bdry_' + dst_grd.name + '.nc'
out_file = dst_dir + file.rsplit('/')[-1][:-4] + '_ssh_bdry_' + dst_grd.name + '.nc'
command = ('ncks', '-a', '-O', out_file, bdry_file)
subprocess.check_call(command)
os.remove(out_file)
out_file = dst_dir + file.rsplit('/')[-1][:-4] + '_temp_bdry_' + dst_grd.name + '.nc'
command = ('ncks', '-a', '-A', out_file, bdry_file)
subprocess.check_call(command)
os.remove(out_file)
out_file = dst_dir + file.rsplit('/')[-1][:-4] + '_salt_bdry_' + dst_grd.name + '.nc'
command = ('ncks', '-a', '-A', out_file, bdry_file)
subprocess.check_call(command)
os.remove(out_file)
out_file = dst_dir + file.rsplit('/')[-1][:-4] + '_u_bdry_' + dst_grd.name + '.nc'
command = ('ncks', '-a', '-A', out_file, bdry_file)
subprocess.check_call(command)
os.remove(out_file)
out_file = dst_dir + file.rsplit('/')[-1][:-4] + '_v_bdry_' + dst_grd.name + '.nc'
command = ('ncks', '-a', '-A', out_file, bdry_file)
subprocess.check_call(command)
os.remove(out_file)
|
the-stack_106_20270
|
"""Support for the Fibaro devices."""
from __future__ import annotations
from collections import defaultdict
import logging
from fiblary3.client.v4.client import Client as FibaroClient, StateHandler
import voluptuous as vol
from homeassistant.const import (
ATTR_ARMED,
ATTR_BATTERY_LEVEL,
CONF_DEVICE_CLASS,
CONF_EXCLUDE,
CONF_ICON,
CONF_PASSWORD,
CONF_URL,
CONF_USERNAME,
CONF_WHITE_VALUE,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import convert, slugify
_LOGGER = logging.getLogger(__name__)
ATTR_CURRENT_ENERGY_KWH = "current_energy_kwh"
ATTR_CURRENT_POWER_W = "current_power_w"
CONF_COLOR = "color"
CONF_DEVICE_CONFIG = "device_config"
CONF_DIMMING = "dimming"
CONF_GATEWAYS = "gateways"
CONF_PLUGINS = "plugins"
CONF_RESET_COLOR = "reset_color"
DOMAIN = "fibaro"
FIBARO_CONTROLLERS = "fibaro_controllers"
FIBARO_DEVICES = "fibaro_devices"
PLATFORMS = [
"binary_sensor",
"climate",
"cover",
"light",
"scene",
"sensor",
"lock",
"switch",
]
FIBARO_TYPEMAP = {
"com.fibaro.multilevelSensor": "sensor",
"com.fibaro.binarySwitch": "switch",
"com.fibaro.multilevelSwitch": "switch",
"com.fibaro.FGD212": "light",
"com.fibaro.FGR": "cover",
"com.fibaro.doorSensor": "binary_sensor",
"com.fibaro.doorWindowSensor": "binary_sensor",
"com.fibaro.FGMS001": "binary_sensor",
"com.fibaro.heatDetector": "binary_sensor",
"com.fibaro.lifeDangerSensor": "binary_sensor",
"com.fibaro.smokeSensor": "binary_sensor",
"com.fibaro.remoteSwitch": "switch",
"com.fibaro.sensor": "sensor",
"com.fibaro.colorController": "light",
"com.fibaro.securitySensor": "binary_sensor",
"com.fibaro.hvac": "climate",
"com.fibaro.setpoint": "climate",
"com.fibaro.FGT001": "climate",
"com.fibaro.thermostatDanfoss": "climate",
"com.fibaro.doorLock": "lock",
}
DEVICE_CONFIG_SCHEMA_ENTRY = vol.Schema(
{
vol.Optional(CONF_DIMMING): cv.boolean,
vol.Optional(CONF_COLOR): cv.boolean,
vol.Optional(CONF_WHITE_VALUE): cv.boolean,
vol.Optional(CONF_RESET_COLOR): cv.boolean,
vol.Optional(CONF_DEVICE_CLASS): cv.string,
vol.Optional(CONF_ICON): cv.string,
}
)
FIBARO_ID_LIST_SCHEMA = vol.Schema([cv.string])
GATEWAY_CONFIG = vol.Schema(
{
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_URL): cv.url,
vol.Optional(CONF_PLUGINS, default=False): cv.boolean,
vol.Optional(CONF_EXCLUDE, default=[]): FIBARO_ID_LIST_SCHEMA,
vol.Optional(CONF_DEVICE_CONFIG, default={}): vol.Schema(
{cv.string: DEVICE_CONFIG_SCHEMA_ENTRY}
),
},
extra=vol.ALLOW_EXTRA,
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{vol.Required(CONF_GATEWAYS): vol.All(cv.ensure_list, [GATEWAY_CONFIG])}
)
},
extra=vol.ALLOW_EXTRA,
)
class FibaroController:
"""Initiate Fibaro Controller Class."""
def __init__(self, config):
"""Initialize the Fibaro controller."""
self._client = FibaroClient(
config[CONF_URL], config[CONF_USERNAME], config[CONF_PASSWORD]
)
self._scene_map = None
# Whether to import devices from plugins
self._import_plugins = config[CONF_PLUGINS]
self._device_config = config[CONF_DEVICE_CONFIG]
self._room_map = None # Mapping roomId to room object
self._device_map = None # Mapping deviceId to device object
self.fibaro_devices = None # List of devices by type
self._callbacks = {} # Update value callbacks by deviceId
self._state_handler = None # Fiblary's StateHandler object
self._excluded_devices = config[CONF_EXCLUDE]
self.hub_serial = None # Unique serial number of the hub
def connect(self):
"""Start the communication with the Fibaro controller."""
try:
login = self._client.login.get()
info = self._client.info.get()
self.hub_serial = slugify(info.serialNumber)
except AssertionError:
_LOGGER.error("Can't connect to Fibaro HC. Please check URL")
return False
if login is None or login.status is False:
_LOGGER.error(
"Invalid login for Fibaro HC. Please check username and password"
)
return False
self._room_map = {room.id: room for room in self._client.rooms.list()}
self._read_devices()
self._read_scenes()
return True
def enable_state_handler(self):
"""Start StateHandler thread for monitoring updates."""
self._state_handler = StateHandler(self._client, self._on_state_change)
def disable_state_handler(self):
"""Stop StateHandler thread used for monitoring updates."""
self._state_handler.stop()
self._state_handler = None
def _on_state_change(self, state):
"""Handle change report received from the HomeCenter."""
callback_set = set()
for change in state.get("changes", []):
try:
dev_id = change.pop("id")
if dev_id not in self._device_map:
continue
device = self._device_map[dev_id]
for property_name, value in change.items():
if property_name == "log":
if value and value != "transfer OK":
_LOGGER.debug("LOG %s: %s", device.friendly_name, value)
continue
if property_name == "logTemp":
continue
if property_name in device.properties:
device.properties[property_name] = value
_LOGGER.debug(
"<- %s.%s = %s", device.ha_id, property_name, str(value)
)
else:
_LOGGER.warning("%s.%s not found", device.ha_id, property_name)
if dev_id in self._callbacks:
callback_set.add(dev_id)
except (ValueError, KeyError):
pass
for item in callback_set:
self._callbacks[item]()
def register(self, device_id, callback):
"""Register device with a callback for updates."""
self._callbacks[device_id] = callback
def get_children(self, device_id):
"""Get a list of child devices."""
return [
device
for device in self._device_map.values()
if device.parentId == device_id
]
def get_children2(self, device_id, endpoint_id):
"""Get a list of child devices for the same endpoint."""
return [
device
for device in self._device_map.values()
if device.parentId == device_id
and (
"endPointId" not in device.properties
or device.properties.endPointId == endpoint_id
)
]
def get_siblings(self, device):
"""Get the siblings of a device."""
if "endPointId" in device.properties:
return self.get_children2(
self._device_map[device.id].parentId,
self._device_map[device.id].properties.endPointId,
)
return self.get_children(self._device_map[device.id].parentId)
@staticmethod
def _map_device_to_type(device):
"""Map device to HA device type."""
# Use our lookup table to identify device type
device_type = None
if "type" in device:
device_type = FIBARO_TYPEMAP.get(device.type)
if device_type is None and "baseType" in device:
device_type = FIBARO_TYPEMAP.get(device.baseType)
# We can also identify device type by its capabilities
if device_type is None:
if "setBrightness" in device.actions:
device_type = "light"
elif "turnOn" in device.actions:
device_type = "switch"
elif "open" in device.actions:
device_type = "cover"
elif "secure" in device.actions:
device_type = "lock"
elif "value" in device.properties:
if device.properties.value in ("true", "false"):
device_type = "binary_sensor"
else:
device_type = "sensor"
# Switches that control lights should show up as lights
if device_type == "switch" and device.properties.get("isLight", False):
device_type = "light"
return device_type
def _read_scenes(self):
scenes = self._client.scenes.list()
self._scene_map = {}
for device in scenes:
if "name" not in device or "id" not in device:
continue
device.fibaro_controller = self
if "roomID" not in device or device.roomID == 0:
room_name = "Unknown"
else:
room_name = self._room_map[device.roomID].name
device.room_name = room_name
device.friendly_name = f"{room_name} {device.name}"
device.ha_id = (
f"scene_{slugify(room_name)}_{slugify(device.name)}_{device.id}"
)
device.unique_id_str = f"{self.hub_serial}.scene.{device.id}"
self._scene_map[device.id] = device
self.fibaro_devices["scene"].append(device)
_LOGGER.debug("%s scene -> %s", device.ha_id, device)
def _read_devices(self):
"""Read and process the device list."""
devices = self._client.devices.list()
self._device_map = {}
self.fibaro_devices = defaultdict(list)
last_climate_parent = None
last_endpoint = None
for device in devices:
try:
if "name" not in device or "id" not in device:
continue
device.fibaro_controller = self
if "roomID" not in device or device.roomID == 0:
room_name = "Unknown"
else:
room_name = self._room_map[device.roomID].name
device.room_name = room_name
device.friendly_name = f"{room_name} {device.name}"
device.ha_id = (
f"{slugify(room_name)}_{slugify(device.name)}_{device.id}"
)
if (
device.enabled
and (
"isPlugin" not in device
or (not device.isPlugin or self._import_plugins)
)
and device.ha_id not in self._excluded_devices
):
device.mapped_type = self._map_device_to_type(device)
device.device_config = self._device_config.get(device.ha_id, {})
else:
device.mapped_type = None
if (dtype := device.mapped_type) is None:
continue
device.unique_id_str = f"{self.hub_serial}.{device.id}"
self._device_map[device.id] = device
_LOGGER.debug(
"%s (%s, %s) -> %s %s",
device.ha_id,
device.type,
device.baseType,
dtype,
str(device),
)
if dtype != "climate":
self.fibaro_devices[dtype].append(device)
continue
# We group climate devices into groups with the same
# endPointID belonging to the same parent device.
if "endPointId" in device.properties:
_LOGGER.debug(
"climate device: %s, endPointId: %s",
device.ha_id,
device.properties.endPointId,
)
else:
_LOGGER.debug("climate device: %s, no endPointId", device.ha_id)
# If a sibling of this device has been added, skip this one
# otherwise add the first visible device in the group
# which is a hack, but solves a problem with FGT having
# hidden compatibility devices before the real device
if last_climate_parent != device.parentId or (
"endPointId" in device.properties
and last_endpoint != device.properties.endPointId
):
_LOGGER.debug("Handle separately")
self.fibaro_devices[dtype].append(device)
last_climate_parent = device.parentId
if "endPointId" in device.properties:
last_endpoint = device.properties.endPointId
else:
last_endpoint = 0
else:
_LOGGER.debug("not handling separately")
except (KeyError, ValueError):
pass
def setup(hass, base_config):
"""Set up the Fibaro Component."""
if DOMAIN not in base_config:
# AIS new config_flow way
hass.data[DOMAIN] = {}
hass.data[DOMAIN][CONF_GATEWAYS] = {}
hass.data[FIBARO_CONTROLLERS] = {}
return True
# old configuration.yaml way
gateways = base_config[DOMAIN][CONF_GATEWAYS]
hass.data[FIBARO_CONTROLLERS] = {}
def stop_fibaro(event):
"""Stop Fibaro Thread."""
_LOGGER.info("Shutting down Fibaro connection")
for controller in hass.data[FIBARO_CONTROLLERS].values():
controller.disable_state_handler()
hass.data[FIBARO_DEVICES] = {}
for platform in PLATFORMS:
hass.data[FIBARO_DEVICES][platform] = []
for gateway in gateways:
controller = FibaroController(gateway)
if controller.connect():
hass.data[FIBARO_CONTROLLERS][controller.hub_serial] = controller
for platform in PLATFORMS:
hass.data[FIBARO_DEVICES][platform].extend(
controller.fibaro_devices[platform]
)
if hass.data[FIBARO_CONTROLLERS]:
for platform in PLATFORMS:
discovery.load_platform(hass, platform, DOMAIN, {}, base_config)
for controller in hass.data[FIBARO_CONTROLLERS].values():
controller.enable_state_handler()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_fibaro)
return True
return False
class FibaroDevice(Entity):
"""Representation of a Fibaro device entity."""
def __init__(self, fibaro_device):
"""Initialize the device."""
self.fibaro_device = fibaro_device
self.controller = fibaro_device.fibaro_controller
self._name = fibaro_device.friendly_name
self.ha_id = fibaro_device.ha_id
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self.controller.register(self.fibaro_device.id, self._update_callback)
def _update_callback(self):
"""Update the state."""
self.schedule_update_ha_state(True)
@property
def device_info(self):
sw_version = 1
manufacturer = "Fibaro"
if "properties" in self.fibaro_device:
if "zwaveVersion" in self.fibaro_device.properties:
sw_version = self.fibaro_device.properties.zwaveVersion
if "zwaveCompany" in self.fibaro_device.properties:
manufacturer = self.fibaro_device.properties.zwaveCompany
return {
"identifiers": {(DOMAIN, self.ha_id)},
"name": self._name,
"manufacturer": manufacturer,
"model": self.fibaro_device.type,
"sw_version": sw_version,
"via_device": None,
}
@property
def level(self):
"""Get the level of Fibaro device."""
if "value" in self.fibaro_device.properties:
return self.fibaro_device.properties.value
return None
@property
def level2(self):
"""Get the tilt level of Fibaro device."""
if "value2" in self.fibaro_device.properties:
return self.fibaro_device.properties.value2
return None
def dont_know_message(self, action):
"""Make a warning in case we don't know how to perform an action."""
_LOGGER.warning(
"Not sure how to setValue: %s (available actions: %s)",
str(self.ha_id),
str(self.fibaro_device.actions),
)
def set_level(self, level):
"""Set the level of Fibaro device."""
self.action("setValue", level)
if "value" in self.fibaro_device.properties:
self.fibaro_device.properties.value = level
if "brightness" in self.fibaro_device.properties:
self.fibaro_device.properties.brightness = level
def set_level2(self, level):
"""Set the level2 of Fibaro device."""
self.action("setValue2", level)
if "value2" in self.fibaro_device.properties:
self.fibaro_device.properties.value2 = level
def call_turn_on(self):
"""Turn on the Fibaro device."""
self.action("turnOn")
def call_turn_off(self):
"""Turn off the Fibaro device."""
self.action("turnOff")
def call_set_color(self, red, green, blue, white):
"""Set the color of Fibaro device."""
red = int(max(0, min(255, red)))
green = int(max(0, min(255, green)))
blue = int(max(0, min(255, blue)))
white = int(max(0, min(255, white)))
color_str = f"{red},{green},{blue},{white}"
self.fibaro_device.properties.color = color_str
self.action("setColor", str(red), str(green), str(blue), str(white))
def action(self, cmd, *args):
"""Perform an action on the Fibaro HC."""
if cmd in self.fibaro_device.actions:
getattr(self.fibaro_device, cmd)(*args)
_LOGGER.debug("-> %s.%s%s called", str(self.ha_id), str(cmd), str(args))
else:
self.dont_know_message(cmd)
@property
def current_power_w(self):
"""Return the current power usage in W."""
if "power" in self.fibaro_device.properties and (
power := self.fibaro_device.properties.power
):
return convert(power, float, 0.0)
return None
@property
def current_binary_state(self):
"""Return the current binary state."""
if self.fibaro_device.properties.value == "false":
return False
if (
self.fibaro_device.properties.value == "true"
or int(self.fibaro_device.properties.value) > 0
):
return True
return False
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self.fibaro_device.unique_id_str
@property
def name(self) -> str | None:
"""Return the name of the device."""
return self._name
@property
def should_poll(self):
"""Get polling requirement from fibaro device."""
return False
@property
def extra_state_attributes(self):
"""Return the state attributes of the device."""
attr = {"fibaro_id": self.fibaro_device.id}
try:
if "battery" in self.fibaro_device.interfaces:
attr[ATTR_BATTERY_LEVEL] = int(
self.fibaro_device.properties.batteryLevel
)
if "fibaroAlarmArm" in self.fibaro_device.interfaces:
attr[ATTR_ARMED] = bool(self.fibaro_device.properties.armed)
if "power" in self.fibaro_device.interfaces:
attr[ATTR_CURRENT_POWER_W] = convert(
self.fibaro_device.properties.power, float, 0.0
)
if "energy" in self.fibaro_device.interfaces:
attr[ATTR_CURRENT_ENERGY_KWH] = convert(
self.fibaro_device.properties.energy, float, 0.0
)
except (ValueError, KeyError):
pass
return attr
# AIS
async def async_setup_entry(hass, config_entry):
"""Set up config entry."""
import threading
# discover_devices is a sync function.
t = threading.Thread(target=discover_devices, args=(hass, config_entry))
t.start()
return True
async def async_unload_entry(hass, config_entry):
"""Unload a config entry."""
# TODO await hass.config_entries.async_forward_entry_unload(config_entry, "xxx")
return True
def discover_devices(hass, config_entry):
"""
Run periodically to discover new devices.
Currently it's only run at startup.
"""
# ------------
gateway = {
CONF_URL: config_entry.data[CONF_URL],
CONF_USERNAME: config_entry.data[CONF_USERNAME],
CONF_PASSWORD: config_entry.data[CONF_PASSWORD],
CONF_PLUGINS: False,
CONF_DEVICE_CONFIG: {},
CONF_EXCLUDE: [],
}
def stop_fibaro(event):
"""Stop Fibaro Thread."""
_LOGGER.info("Shutting down Fibaro connection")
for controller in hass.data[FIBARO_CONTROLLERS].values():
controller.disable_state_handler()
hass.data[FIBARO_DEVICES] = {}
for component in FIBARO_COMPONENTS:
hass.data[FIBARO_DEVICES][component] = []
controller = FibaroController(gateway)
if controller.connect():
hass.data[FIBARO_CONTROLLERS][controller.hub_serial] = controller
for component in FIBARO_COMPONENTS:
hass.data[FIBARO_DEVICES][component].extend(
controller.fibaro_devices[component]
)
if hass.data[FIBARO_CONTROLLERS]:
for component in FIBARO_COMPONENTS:
# discovery.load_platform(hass, component, DOMAIN, {}, config_entry)
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, component)
)
for controller in hass.data[FIBARO_CONTROLLERS].values():
controller.enable_state_handler()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_fibaro)
return True
return False
|
the-stack_106_20271
|
"""
Reimplementation of the particle energy histogram for the electron species, directly on top of h5py.
The resulting speedup compared to the openPMD-viewer-based `particle_energy_histogram` is a factor ~4.
To view all groups datasets and corresponding attributes in an .h5 file, use `h5ls -rv filename.h5`.
"""
import pathlib
from dataclasses import dataclass, field
from typing import Any, Dict
import h5py
import numexpr as ne
import numpy as np
import pint
import signac
from fast_histogram import histogram1d
import job_util
import util
ureg = pint.UnitRegistry()
m_e = ureg.electron_mass
e = ureg.elementary_charge
c = ureg.speed_of_light
e_pC = (1 * e).to("pC").magnitude
mc2 = (1 * m_e * c ** 2).to("MeV").magnitude
mc = (1 * m_e * c).to("kilogram * meter / second").magnitude
@dataclass
class LastH5File:
job: Any = field(repr=False)
iteration: int = field(init=False, repr=False)
fpath: pathlib.Path = field(init=False)
h5_path: pathlib.Path = field(init=False, repr=False)
fname: str = field(init=False, repr=False)
electrons: str = field(init=False, repr=False)
mom: Dict[str, str] = field(init=False, repr=False)
w: str = field(init=False, repr=False)
def __post_init__(self):
self.h5_path = job_util.is_h5_path(self.job)
self.fname = self.last_fname()
self.iteration = job_util.extract_iteration_number(self.fname)
self.fpath = self.h5_path / self.fname
self.file_obj = h5py.File(self.fpath, "r")
self.electrons = f"/data/{self.iteration}/particles/electrons"
self.w = f"{self.electrons}/weighting"
self.mom = dict()
for xyz in "x", "y", "z":
self.mom[xyz] = f"{self.electrons}/momentum/{xyz}"
def __enter__(self):
return self.file_obj
def __exit__(self, type, value, traceback):
self.file_obj.close()
def last_fname(self):
fnames = job_util.get_diags_fnames(self.job)
return tuple(fnames)[-1]
def energy_histogram(
normalized_particle_momenta,
weights,
*,
bins=499,
erange=(1, 500),
normalized=False,
cone_aperture=None,
):
ux = normalized_particle_momenta["x"]
uy = normalized_particle_momenta["y"]
uz = normalized_particle_momenta["z"]
if cone_aperture is not None:
# angle in radians
theta = cone_aperture / 2
particle_in_cone = (ux ** 2 + uy ** 2) * np.cos(theta) ** 2 - uz ** 2 * np.sin(
theta
) ** 2 <= 0.0
ux = ux[particle_in_cone]
uy = uy[particle_in_cone]
uz = uz[particle_in_cone]
weights = weights[particle_in_cone]
expr = ne.evaluate("sqrt(1+ux**2+uy**2+uz**2)")
hist = histogram1d(mc2 * expr, bins=bins, range=erange, weights=e_pC * weights)
if normalized:
hist = util.normalize_to_interval(0, 1, hist)
return hist
def job_energy_histogram(
job,
*,
bins=499,
erange=(1, 500),
normalized=False,
cone_aperture=None,
):
uxyz = dict()
h5f = LastH5File(job)
with h5f as f:
w = np.array(f[h5f.w])
for xyz in "x", "y", "z":
# normalize momenta by mc
uxyz[xyz] = np.array(f[h5f.mom[xyz]]) / mc
hist = energy_histogram(
normalized_particle_momenta=uxyz,
weights=w,
bins=bins,
erange=erange,
normalized=normalized,
cone_aperture=cone_aperture,
)
return hist
def main():
"""Main entry point."""
proj = signac.get_project(search=False)
for job in proj: # ~14s
hist = job_energy_histogram(job)
if __name__ == "__main__":
main()
|
the-stack_106_20272
|
"""
@author: David Lei
@since: 28/08/2016
@modified:
Worst: O(n^2)
Best: O(n)
to find 3rd smallest element in array, a = [5, 4, 1, 2, 9, 8, 6]
split array into 2 around a pivot, eg: make pivot 6
less than 6 = [5, 4, 1, 2]
greater than 6 = [6, 8]
len(less than) = 4
2 < 4 so do again
pivot = 2
less than = [1]
greater than = [4, 5]
len(less than) = 1, len(greater than) = 2
# given unsorted array, find kth smallest element
# same as sorting and finding item at index k
# but can do without complete sorting the entire array
"""
import random
def randomized_partition(array, start, end):
random_index = random.randint(start, end)
array[random_index], array[end] = array[end], array[random_index]
# picked pivot 'randomly', put it at the end index
# now partition
pivot = array[end] # randomly chosen pivot
wall = start - 1
for i in range(start, end):
if array[i] <= pivot:
wall += 1 # wall is <= pivot
array[i], array[wall] = array[wall], array[i] # swap elements
array[wall + 1], array[end] = array[end], array[wall + 1] # put pivot in right place
return wall + 1 # index of pivot
def randomized_quick_select(array, start, end, i):
"""
returns ith smallest element in an array[start...end]
:param array: array of distinct elements
:param start: start index to look at
:param end: end index to look at
:param i: something th smallest index we want
"""
if start == end: # base case
return array[start]
pivot_index = randomized_partition(array, start, end)
k = pivot_index - start + 1
if i == k:
return array[pivot_index]
elif i < k:
return randomized_quick_select(array, start, pivot_index-1, i) # recurse on lower half
else:
return randomized_quick_select(array, pivot_index+1, end, i-k) # recurse on upper half
# Another implementation for practice ------
def quick_select_partition_2(array, start, end, pivot_index): # Inplace.
array[end], array[pivot_index] = array[pivot_index], array[end]
pivot_element = array[end]
wall = start # Things left to the wall are smaller, right are bigger.
for i in range(start, end): # Exclusive of end as the pivot is now there.
if array[i] < pivot_element:
array[i], array[wall] = array[wall], array[i]
wall += 1
array[end], array[wall] = array[wall], array[end]
return wall # Will no return 0 but will return the value starting at start.
def quick_select_2(array, start, end, k):
if start == end: # Only 1 element, return it.
return array[start]
pivot_index = (start + end) // 2
pivot_index = quick_select_partition_2(array, start, end, pivot_index) # Returns true position of pivot.
k_smallest_element_index = k -1
if k_smallest_element_index == pivot_index: # We look for the kth smallest so the kth smallest is at index k.
return array[k_smallest_element_index]
elif k_smallest_element_index < pivot_index: # kth smallest element is in the bottom half.
return quick_select_2(array, start, pivot_index - 1, k)
else: # kth smallest element is in the top half.
return quick_select_2(array, pivot_index + 1, end, k)
if __name__ == "__main__":
# 1 2 3 4 5 6 7 8 9 10 11 12 13 14 | th smallest element.
a = [1, 1, 2, 3, 4, 5, 6, 8, 10, 11, 12, 14, 15, 20]
# ith smallest element is in range [1, len(a)].
ith_smallest_element = 9 # len(a) - 1
print(randomized_quick_select(a[::-1], 0, len(a) - 1, ith_smallest_element))
print(quick_select_2(a[::-1], 0, len(a) - 1, ith_smallest_element))
|
the-stack_106_20274
|
# Copyright 2019,2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from nnabla.utils.nnp_graph import NnpNetworkPass
from .base import ImageNetBase
class VGG(ImageNetBase):
"""
VGG architectures for 11, 13, 16 layers.
Args:
num_layers (int): Number of layers chosen from 11, 13, 16.
The following is a list of string that can be specified to ``use_up_to`` option in ``__call__`` method;
* ``'classifier'`` (default): The output of the final affine layer for classification.
* ``'pool'``: The output of the final global average pooling.
* ``'lastconv'``: The input of the final global average pooling without ReLU activation.
* ``'lastconv+relu'``: Network up to ``'lastconv'`` followed by ReLU activation.
* ``'lastfeature'``: Network up to one layer before ``'classifier'``, but without activation.
References:
* `Simonyan and Zisserman, Very Deep Convolutional Networks for Large-Scale Image Recognition.
<https://arxiv.org/pdf/1409.1556>`_
"""
def __init__(self, num_layers=11):
# Check validity of num_layers
set_num_layers = set((11, 13, 16))
assert num_layers in set_num_layers, "num_layers must be chosen from {}".format(
set_num_layers)
self.num_layers = num_layers
# Load nnp
self._load_nnp('VGG-{}.nnp'.format(num_layers),
'VGG-{0}/VGG-{0}.nnp'.format(num_layers))
self._KEY_VARIABLE = {
'classifier': 'VGG{}/Affine_3'.format(num_layers),
'pool': 'VGG{}/MaxPooling_5'.format(num_layers),
'lastconv': 'VGG16/Convolution_13' if num_layers == 16 else 'VGG{}/Convolution_12'.format(num_layers),
'lastconv+relu': 'VGG16/ReLU_13' if num_layers == 16 else 'VGG{}/ReLU_12'.format(num_layers),
'lastfeature': 'VGG{}/Affine_2'.format(num_layers),
}
def _input_shape(self):
return (3, 224, 224)
def __call__(self, input_var=None, use_from=None, use_up_to='classifier', training=False,
force_global_pooling=False, check_global_pooling=True, returns_net=False, verbose=0):
assert use_from is None, 'This should not be set because it is for forward compatibility.'
input_var = self.get_input_var(input_var)
callback = NnpNetworkPass(verbose)
callback.remove_and_rewire('ImageAugmentationX')
callback.set_variable('TrainingInput', input_var)
callback.set_batch_normalization_batch_stat_all(training)
self.use_up_to(use_up_to, callback)
if not training:
callback.remove_and_rewire(
'VGG{}/Dropout_1'.format(self.num_layers))
callback.remove_and_rewire(
'VGG{}/Dropout_2'.format(self.num_layers))
callback.fix_parameters()
batch_size = input_var.shape[0]
net = self.nnp.get_network(
'Training', batch_size=batch_size, callback=callback)
if returns_net:
return net
return list(net.outputs.values())[0]
class VGG11(VGG):
"""VGG11
An alias of :obj:`VGG` `(11)`.
"""
def __init__(self):
super(VGG11, self).__init__(11)
class VGG13(VGG):
"""VGG13
An alias of :obj:`VGG` `(13)`.
"""
def __init__(self):
super(VGG13, self).__init__(13)
class VGG16(VGG):
"""VGG16
An alias of :obj:`VGG` `(16)`.
"""
def __init__(self):
super(VGG16, self).__init__(16)
|
the-stack_106_20275
|
import utime
import ustruct
def color565(r, g, b):
return (r & 0xf8) << 8 | (g & 0xfc) << 3 | b >> 3
class DummyPin:
"""A fake gpio pin for when you want to skip pins."""
OUT = 0
IN = 0
PULL_UP = 0
PULL_DOWN = 0
OPEN_DRAIN = 0
ALT = 0
ALT_OPEN_DRAIN = 0
LOW_POWER = 0
MED_POWER = 0
HIGH_PWER = 0
IRQ_FALLING = 0
IRQ_RISING = 0
IRQ_LOW_LEVEL = 0
IRQ_HIGH_LEVEL = 0
def __call__(self, *args, **kwargs):
return False
init = __call__
value = __call__
out_value = __call__
toggle = __call__
high = __call__
low = __call__
on = __call__
off = __call__
mode = __call__
pull = __call__
drive = __call__
irq = __call__
class Display:
_PAGE_SET = None
_COLUMN_SET = None
_RAM_WRITE = None
_RAM_READ = None
_INIT = ()
_ENCODE_PIXEL = ">H"
_ENCODE_POS = ">HH"
_DECODE_PIXEL = ">BBB"
def __init__(self, width, height):
self.width = width
self.height = height
self.init()
def init(self):
"""Run the initialization commands."""
for command, data in self._INIT:
self._write(command, data)
def _block(self, x0, y0, x1, y1, data=None):
"""Read or write a block of data."""
self._write(self._COLUMN_SET, self._encode_pos(x0, x1))
self._write(self._PAGE_SET, self._encode_pos(y0, y1))
if data is None:
size = ustruct.calcsize(self._DECODE_PIXEL)
return self._read(self._RAM_READ,
(x1 - x0 + 1) * (y1 - y0 + 1) * size)
self._write(self._RAM_WRITE, data)
def _encode_pos(self, a, b):
"""Encode a postion into bytes."""
return ustruct.pack(self._ENCODE_POS, a, b)
def _encode_pixel(self, color):
"""Encode a pixel color into bytes."""
return ustruct.pack(self._ENCODE_PIXEL, color)
def _decode_pixel(self, data):
"""Decode bytes into a pixel color."""
return color565(*ustruct.unpack(self._DECODE_PIXEL, data))
def pixel(self, x, y, color=None):
"""Read or write a pixel."""
if color is None:
return self._decode_pixel(self._block(x, y, x, y))
if not 0 <= x < self.width or not 0 <= y < self.height:
return
self._block(x, y, x, y, self._encode_pixel(color))
def fill_rectangle(self, x, y, width, height, color):
"""Draw a filled rectangle."""
x = min(self.width - 1, max(0, x))
y = min(self.height - 1, max(0, y))
w = min(self.width - x, max(1, width))
h = min(self.height - y, max(1, height))
self._block(x, y, x + w - 1, y + h - 1, b'')
chunks, rest = divmod(w * h, 512)
pixel = self._encode_pixel(color)
if chunks:
data = pixel * 512
for count in range(chunks):
self._write(None, data)
if rest:
self._write(None, pixel * rest)
def fill(self, color=0):
"""Fill whole screen."""
self.fill_rectangle(0, 0, self.width, self.height, color)
def hline(self, x, y, width, color):
"""Draw a horizontal line."""
self.fill_rectangle(x, y, width, 1, color)
def vline(self, x, y, height, color):
"""Draw a vertical line."""
self.fill_rectangle(x, y, 1, height, color)
def blit_buffer(self, buffer, x, y, width, height):
"""Copy pixels from a buffer."""
if (not 0 <= x < self.width or
not 0 <= y < self.height or
not 0 < x + width <= self.width or
not 0 < y + height <= self.height):
raise ValueError("out of bounds")
self._block(x, y, x + width - 1, y + height - 1, buffer)
class DisplaySPI(Display):
def __init__(self, spi, dc, cs=None, rst=None, width=1, height=1):
self.spi = spi
self.cs = cs
self.dc = dc
self.rst = rst
if self.rst is None:
self.rst = DummyPin()
if self.cs is None:
self.cs = DummyPin()
self.cs.init(self.cs.OUT, value=1)
self.dc.init(self.dc.OUT, value=0)
self.rst.init(self.rst.OUT, value=1)
self.reset()
super().__init__(width, height)
def reset(self):
self.rst(0)
utime.sleep_ms(50)
self.rst(1)
utime.sleep_ms(50)
def _write(self, command=None, data=None):
if command is not None:
self.dc(0)
self.cs(0)
self.spi.write(bytearray([command]))
self.cs(1)
if data is not None:
self.dc(1)
self.cs(0)
self.spi.write(data)
self.cs(1)
def _read(self, command=None, count=0):
self.dc(0)
self.cs(0)
if command is not None:
self.spi.write(bytearray([command]))
if count:
data = self.spi.read(count)
self.cs(1)
return data
|
the-stack_106_20277
|
import datetime
import dateutil.parser
from django.core.cache import cache
from django.test import TestCase
from apps.physicaldevice.models import Device
from apps.stream.models import StreamId, StreamVariable
from apps.streamdata.helpers import StreamDataBuilderHelper
from apps.streamdata.models import StreamData
from apps.utils.test_util import TestMixin
from ..actions.action import *
from ..cache_utils import cached_serialized_filter_for_slug, get_current_cached_filter_state_for_slug
from ..models import *
from ..process import FilterHelper
from ..processing.trigger import evaluate_cached_transition
from ..serializers import *
class StreamFilterHelperTestCase(TestMixin, TestCase):
"""
Fixure includes:
"""
def setUp(self):
self.usersTestSetup()
self.orgTestSetup()
self.deviceTemplateTestSetup()
self.v1 = StreamVariable.objects.create_variable(
name='Var A', project=self.p1, created_by=self.u2, lid=1,
)
self.v2 = StreamVariable.objects.create_variable(
name='Var B', project=self.p2, created_by=self.u3, lid=2,
)
self.pd1 = Device.objects.create_device(project=self.p1, label='d1', template=self.dt1, created_by=self.u2)
self.pd2 = Device.objects.create_device(project=self.p2, label='d2', template=self.dt1, created_by=self.u3)
self.s1 = StreamId.objects.create_stream(
project=self.p1, variable=self.v1, device=self.pd1, created_by=self.u2
)
self.s2 = StreamId.objects.create_stream(
project=self.p2, variable=self.v2, device=self.pd1, created_by=self.u3
)
if cache:
cache.clear()
def tearDown(self):
StreamFilterAction.objects.all().delete()
StreamFilterTrigger.objects.all().delete()
StateTransition.objects.all().delete()
StreamFilter.objects.all().delete()
State.objects.all().defer()
StreamId.objects.all().delete()
StreamVariable.objects.all().delete()
StreamData.objects.all().delete()
Device.objects.all().delete()
self.deviceTemplateTestTearDown()
self.orgTestTearDown()
self.userTestTearDown()
if cache:
cache.clear()
def _dummy_basic_filter(self, with_actions=False):
"""
Filter with two states:
state1 -> state2 if value >= 10
state2 -> state1 if value < 10
"""
f = StreamFilter.objects.create_filter_from_streamid(
name='Filter 1', input_stream=self.s1, created_by=self.u2
)
a1 = a2 = None
state1 = State.objects.create(label="state1", filter=f, created_by=self.u2)
state2 = State.objects.create(label="state2", filter=f, created_by=self.u2)
if with_actions:
extra_payload = {
"notification_recipient": "admin",
"notification_level": "warn",
"custom_note": "dummy"
}
a1 = StreamFilterAction.objects.create(
type='eml', created_by=self.u2, extra_payload=extra_payload, on='exit', state=state1
)
a2 = StreamFilterAction.objects.create(
type='eml', created_by=self.u2, extra_payload=extra_payload, on='exit', state=state2
)
transition1 = StateTransition.objects.create(
src=state1, dst=state2, filter=f, created_by=self.u2
)
t1 = StreamFilterTrigger.objects.create(
operator='ge', created_by=self.u2, filter=f, threshold=10, transition=transition1
)
transition2 = StateTransition.objects.create(
src=state2, dst=state1, filter=f, created_by=self.u2
)
t2 = StreamFilterTrigger.objects.create(
operator='lt', created_by=self.u2, filter=f, threshold=10, transition=transition2
)
return {
'filter': f,
'states': [state1, state2],
'transitions': [transition1, transition2]
}
def _dummy_data(self, stream_slug, data):
count = 1
data_entries = []
data_helper = StreamDataBuilderHelper()
for item in data:
stream_data = data_helper.build_data_obj(
stream_slug=stream_slug,
streamer_local_id=count,
timestamp=item[0],
int_value=item[1]
)
data_entries.append(stream_data)
return data_entries
def testTransitionShouldExecute(self):
filter_info = self._dummy_basic_filter()
t0 = dateutil.parser.parse('2016-09-28T10:00:00Z')
data_helper = StreamDataBuilderHelper()
stream_data = data_helper.build_data_obj(
stream_slug=self.s1.slug,
streamer_local_id=1,
timestamp=t0,
int_value=11
)
filter_helper = FilterHelper(False)
serializer = StateTransitionReadOnlySerializer(filter_info['transitions'][0])
transition1_data = serializer.data
serializer = StateReadOnlySerializer(filter_info['transitions'][0].src)
src = serializer.data
serializer = StateReadOnlySerializer(filter_info['transitions'][0].dst)
dst = serializer.data
# No current state, but condition not met
stream_data.value = 9
res = filter_helper._transition_should_execute(
src, dst, None, transition1_data, stream_data
)
self.assertFalse(res)
# No current state, and condition met
stream_data.value = 11
res = filter_helper._transition_should_execute(
src, dst, None, transition1_data, stream_data
)
self.assertTrue(res)
# Different src state. Never transition
res = filter_helper._transition_should_execute(
src, dst, 'state2', transition1_data, stream_data
)
self.assertFalse(res)
# Correct src state, does not meet condition
stream_data.value = 9
res = filter_helper._transition_should_execute(
src, dst, 'state1', transition1_data, stream_data
)
self.assertFalse(res)
# Correct src state, meet condition
stream_data.value = 11
res = filter_helper._transition_should_execute(
src, dst, 'state1', transition1_data, stream_data
)
self.assertTrue(res)
# No src, but condition not met
stream_data.value = 9
res = filter_helper._transition_should_execute(
None, dst, 'state1', transition1_data, stream_data
)
self.assertFalse(res)
# No src, and from another state. Condition met
stream_data.value = 11
res = filter_helper._transition_should_execute(
None, dst, 'state1', transition1_data, stream_data
)
self.assertTrue(res)
# No src, condition met from existing state
stream_data.value = 11
res = filter_helper._transition_should_execute(
None, dst, 'state2', transition1_data, stream_data
)
self.assertFalse(res)
# No src, condition met from existing state
stream_data.value = 11
res = filter_helper._transition_should_execute(
None, dst, '', transition1_data, stream_data
)
self.assertTrue(res)
def testBasic1Flow(self):
filter_info = self._dummy_basic_filter()
serializer = StateTransitionReadOnlySerializer(filter_info['transitions'][0])
transition1_data = serializer.data
serializer = StateTransitionReadOnlySerializer(filter_info['transitions'][1])
transition2_data = serializer.data
self.assertFalse(evaluate_cached_transition(transition1_data, 9))
self.assertTrue(evaluate_cached_transition(transition2_data, 9))
self.assertTrue(evaluate_cached_transition(transition1_data, 11))
self.assertFalse(evaluate_cached_transition(transition2_data, 11))
# 10 <= value < 20
t3 = StreamFilterTrigger.objects.create(
operator='lt', created_by=self.u2, filter=filter_info['filter'], threshold=20, transition=filter_info['transitions'][0]
)
serializer = StateTransitionReadOnlySerializer(filter_info['transitions'][0])
transition1_data = serializer.data
self.assertFalse(evaluate_cached_transition(transition1_data, 9))
self.assertTrue(evaluate_cached_transition(transition1_data, 10))
self.assertTrue(evaluate_cached_transition(transition1_data, 12))
self.assertFalse(evaluate_cached_transition(transition1_data, 20))
def testBasic2Flow(self):
f = StreamFilter.objects.create_filter_from_streamid(
name='Filter 1', input_stream=self.s1, created_by=self.u2
)
extra_payload = {
"notification_recipient": "[org:admin]",
"custom_note": "dummy"
}
state1 = State.objects.create(label="state1", filter=f, created_by=self.u2)
state2 = State.objects.create(label="state2", filter=f, created_by=self.u2)
a1 = StreamFilterAction.objects.create(
type='eml', created_by=self.u2, extra_payload=extra_payload, on='exit', state=state1
)
a2 = StreamFilterAction.objects.create(
type='eml', created_by=self.u2, extra_payload=extra_payload, on='entry', state=state1
)
transition1 = StateTransition.objects.create(
src=state1, dst=state2, filter=f, created_by=self.u2
)
t1 = StreamFilterTrigger.objects.create(
operator='ge', created_by=self.u2, filter=f, threshold=10, transition=transition1
)
transition2 = StateTransition.objects.create(
src=state2, dst=state1, filter=f, created_by=self.u2
)
t2 = StreamFilterTrigger.objects.create(
operator='lt', created_by=self.u2, filter=f, threshold=10, transition=transition2
)
cached_filter = cached_serialized_filter_for_slug(self.s1.slug)
t0 = dateutil.parser.parse('2016-09-28T10:00:00Z')
data = [
(t0, 1),
(t0 + datetime.timedelta(seconds=50), 5),
(t0 + datetime.timedelta(seconds=100), 8),
(t0 + datetime.timedelta(seconds=150), 11),
(t0 + datetime.timedelta(seconds=200), 15),
(t0 + datetime.timedelta(seconds=250), 9),
(t0 + datetime.timedelta(seconds=300), 8),
(t0 + datetime.timedelta(seconds=350), 12),
(t0 + datetime.timedelta(seconds=400), 9),
]
data_entries = self._dummy_data(self.s1.slug, data)
filter_helper = FilterHelper(False)
cached_filter = filter_helper.process_filter(data_entries[2], cached_filter)
self.assertEqual(get_current_cached_filter_state_for_slug(self.s1.slug), 'state1')
cached_filter = filter_helper.process_filter(data_entries[3], cached_filter)
self.assertEqual(get_current_cached_filter_state_for_slug(self.s1.slug), 'state2')
cached_filter = filter_helper.process_filter(data_entries[4], cached_filter)
self.assertEqual(get_current_cached_filter_state_for_slug(self.s1.slug), 'state2')
cached_filter = filter_helper.process_filter(data_entries[5], cached_filter)
self.assertEqual(get_current_cached_filter_state_for_slug(self.s1.slug), 'state1')
|
the-stack_106_20279
|
#!/usr/bin/env python
"""
Tool for packaging Python apps for Android
==========================================
This module defines the entry point for command line and programmatic use.
"""
from __future__ import print_function
from os import environ
from pythonforandroid import __version__
from pythonforandroid.pythonpackage import get_dep_names_of_package
from pythonforandroid.recommendations import (
RECOMMENDED_NDK_API, RECOMMENDED_TARGET_API)
from pythonforandroid.util import BuildInterruptingException
from pythonforandroid.entrypoints import main
def check_python_dependencies():
# Check if the Python requirements are installed. This appears
# before the imports because otherwise they're imported elsewhere.
# Using the ok check instead of failing immediately so that all
# errors are printed at once
from distutils.version import LooseVersion
from importlib import import_module
import sys
ok = True
modules = [('colorama', '0.3.3'), 'appdirs', ('sh', '1.10'), 'jinja2',
'six']
for module in modules:
if isinstance(module, tuple):
module, version = module
else:
version = None
try:
import_module(module)
except ImportError:
if version is None:
print('ERROR: The {} Python module could not be found, please '
'install it.'.format(module))
ok = False
else:
print('ERROR: The {} Python module could not be found, '
'please install version {} or higher'.format(
module, version))
ok = False
else:
if version is None:
continue
try:
cur_ver = sys.modules[module].__version__
except AttributeError: # this is sometimes not available
continue
if LooseVersion(cur_ver) < LooseVersion(version):
print('ERROR: {} version is {}, but python-for-android needs '
'at least {}.'.format(module, cur_ver, version))
ok = False
if not ok:
print('python-for-android is exiting due to the errors logged above')
exit(1)
check_python_dependencies()
import sys
from sys import platform
from os.path import (join, dirname, realpath, exists, expanduser, basename)
import os
import glob
import shutil
import re
import shlex
from functools import wraps
import argparse
import sh
import imp
from appdirs import user_data_dir
import logging
from distutils.version import LooseVersion
from pythonforandroid.recipe import Recipe
from pythonforandroid.logger import (logger, info, warning, setup_color,
Out_Style, Out_Fore,
info_notify, info_main, shprint)
from pythonforandroid.util import current_directory
from pythonforandroid.bootstrap import Bootstrap
from pythonforandroid.distribution import Distribution, pretty_log_dists
from pythonforandroid.graph import get_recipe_order_and_bootstrap
from pythonforandroid.build import Context, build_recipes
user_dir = dirname(realpath(os.path.curdir))
toolchain_dir = dirname(__file__)
sys.path.insert(0, join(toolchain_dir, "tools", "external"))
APK_SUFFIX = '.apk'
def add_boolean_option(parser, names, no_names=None,
default=True, dest=None, description=None):
group = parser.add_argument_group(description=description)
if not isinstance(names, (list, tuple)):
names = [names]
if dest is None:
dest = names[0].strip("-").replace("-", "_")
def add_dashes(x):
return x if x.startswith("-") else "--"+x
opts = [add_dashes(x) for x in names]
group.add_argument(
*opts, help=("(this is the default)" if default else None),
dest=dest, action='store_true')
if no_names is None:
def add_no(x):
x = x.lstrip("-")
return ("no_"+x) if "_" in x else ("no-"+x)
no_names = [add_no(x) for x in names]
opts = [add_dashes(x) for x in no_names]
group.add_argument(
*opts, help=(None if default else "(this is the default)"),
dest=dest, action='store_false')
parser.set_defaults(**{dest: default})
def require_prebuilt_dist(func):
"""Decorator for ToolchainCL methods. If present, the method will
automatically make sure a dist has been built before continuing
or, if no dists are present or can be obtained, will raise an
error.
"""
@wraps(func)
def wrapper_func(self, args):
ctx = self.ctx
ctx.set_archs(self._archs)
ctx.prepare_build_environment(user_sdk_dir=self.sdk_dir,
user_ndk_dir=self.ndk_dir,
user_android_api=self.android_api,
user_ndk_api=self.ndk_api)
dist = self._dist
if dist.needs_build:
if dist.folder_exists(): # possible if the dist is being replaced
dist.delete()
info_notify('No dist exists that meets your requirements, '
'so one will be built.')
build_dist_from_args(ctx, dist, args)
func(self, args)
return wrapper_func
def dist_from_args(ctx, args):
"""Parses out any distribution-related arguments, and uses them to
obtain a Distribution class instance for the build.
"""
return Distribution.get_distribution(
ctx,
name=args.dist_name,
recipes=split_argument_list(args.requirements),
arch_name=args.arch,
ndk_api=args.ndk_api,
force_build=args.force_build,
require_perfect_match=args.require_perfect_match,
allow_replace_dist=args.allow_replace_dist)
def build_dist_from_args(ctx, dist, args):
"""Parses out any bootstrap related arguments, and uses them to build
a dist."""
bs = Bootstrap.get_bootstrap(args.bootstrap, ctx)
blacklist = getattr(args, "blacklist_requirements", "").split(",")
if len(blacklist) == 1 and blacklist[0] == "":
blacklist = []
build_order, python_modules, bs = (
get_recipe_order_and_bootstrap(
ctx, dist.recipes, bs,
blacklist=blacklist
))
assert set(build_order).intersection(set(python_modules)) == set()
ctx.recipe_build_order = build_order
ctx.python_modules = python_modules
info('The selected bootstrap is {}'.format(bs.name))
info_main('# Creating dist with {} bootstrap'.format(bs.name))
bs.distribution = dist
info_notify('Dist will have name {} and requirements ({})'.format(
dist.name, ', '.join(dist.recipes)))
info('Dist contains the following requirements as recipes: {}'.format(
ctx.recipe_build_order))
info('Dist will also contain modules ({}) installed from pip'.format(
', '.join(ctx.python_modules)))
ctx.distribution = dist
ctx.prepare_bootstrap(bs)
if dist.needs_build:
ctx.prepare_dist()
build_recipes(build_order, python_modules, ctx,
getattr(args, "private", None),
ignore_project_setup_py=getattr(
args, "ignore_setup_py", False
),
)
ctx.bootstrap.run_distribute()
info_main('# Your distribution was created successfully, exiting.')
info('Dist can be found at (for now) {}'
.format(join(ctx.dist_dir, ctx.distribution.dist_dir)))
def split_argument_list(l):
if not len(l):
return []
return re.split(r'[ ,]+', l)
class NoAbbrevParser(argparse.ArgumentParser):
"""We want to disable argument abbreviation so as not to interfere
with passing through arguments to build.py, but in python2 argparse
doesn't have this option.
This subclass alternative is follows the suggestion at
https://bugs.python.org/issue14910.
"""
def _get_option_tuples(self, option_string):
return []
class ToolchainCL(object):
def __init__(self):
argv = sys.argv
self.warn_on_carriage_return_args(argv)
# Buildozer used to pass these arguments in a now-invalid order
# If that happens, apply this fix
# This fix will be removed once a fixed buildozer is released
if (len(argv) > 2
and argv[1].startswith('--color')
and argv[2].startswith('--storage-dir')):
argv.append(argv.pop(1)) # the --color arg
argv.append(argv.pop(1)) # the --storage-dir arg
parser = NoAbbrevParser(
description='A packaging tool for turning Python scripts and apps '
'into Android APKs')
generic_parser = argparse.ArgumentParser(
add_help=False,
description='Generic arguments applied to all commands')
argparse.ArgumentParser(
add_help=False, description='Arguments for dist building')
generic_parser.add_argument(
'--debug', dest='debug', action='store_true', default=False,
help='Display debug output and all build info')
generic_parser.add_argument(
'--color', dest='color', choices=['always', 'never', 'auto'],
help='Enable or disable color output (default enabled on tty)')
generic_parser.add_argument(
'--sdk-dir', '--sdk_dir', dest='sdk_dir', default='',
help='The filepath where the Android SDK is installed')
generic_parser.add_argument(
'--ndk-dir', '--ndk_dir', dest='ndk_dir', default='',
help='The filepath where the Android NDK is installed')
generic_parser.add_argument(
'--android-api',
'--android_api',
dest='android_api',
default=0,
type=int,
help=('The Android API level to build against defaults to {} if '
'not specified.').format(RECOMMENDED_TARGET_API))
generic_parser.add_argument(
'--ndk-version', '--ndk_version', dest='ndk_version', default=None,
help=('DEPRECATED: the NDK version is now found automatically or '
'not at all.'))
generic_parser.add_argument(
'--ndk-api', type=int, default=None,
help=('The Android API level to compile against. This should be your '
'*minimal supported* API, not normally the same as your --android-api. '
'Defaults to min(ANDROID_API, {}) if not specified.').format(RECOMMENDED_NDK_API))
generic_parser.add_argument(
'--symlink-java-src', '--symlink_java_src',
action='store_true',
dest='symlink_java_src',
default=False,
help=('If True, symlinks the java src folder during build and dist '
'creation. This is useful for development only, it could also'
' cause weird problems.'))
default_storage_dir = user_data_dir('python-for-android')
if ' ' in default_storage_dir:
default_storage_dir = '~/.python-for-android'
generic_parser.add_argument(
'--storage-dir', dest='storage_dir', default=default_storage_dir,
help=('Primary storage directory for downloads and builds '
'(default: {})'.format(default_storage_dir)))
generic_parser.add_argument(
'--arch', help='The arch to build for.',
default='armeabi-v7a')
# Options for specifying the Distribution
generic_parser.add_argument(
'--dist-name', '--dist_name',
help='The name of the distribution to use or create', default='')
generic_parser.add_argument(
'--requirements',
help=('Dependencies of your app, should be recipe names or '
'Python modules. NOT NECESSARY if you are using '
'Python 3 with --use-setup-py'),
default='')
generic_parser.add_argument(
'--recipe-blacklist',
help=('Blacklist an internal recipe from use. Allows '
'disabling Python 3 core modules to save size'),
dest="recipe_blacklist",
default='')
generic_parser.add_argument(
'--blacklist-requirements',
help=('Blacklist an internal recipe from use. Allows '
'disabling Python 3 core modules to save size'),
dest="blacklist_requirements",
default='')
generic_parser.add_argument(
'--bootstrap',
help='The bootstrap to build with. Leave unset to choose '
'automatically.',
default=None)
generic_parser.add_argument(
'--hook',
help='Filename to a module that contains python-for-android hooks',
default=None)
add_boolean_option(
generic_parser, ["force-build"],
default=False,
description='Whether to force compilation of a new distribution')
add_boolean_option(
generic_parser, ["require-perfect-match"],
default=False,
description=('Whether the dist recipes must perfectly match '
'those requested'))
add_boolean_option(
generic_parser, ["allow-replace-dist"],
default=True,
description='Whether existing dist names can be automatically replaced'
)
generic_parser.add_argument(
'--local-recipes', '--local_recipes',
dest='local_recipes', default='./p4a-recipes',
help='Directory to look for local recipes')
generic_parser.add_argument(
'--java-build-tool',
dest='java_build_tool', default='auto',
choices=['auto', 'ant', 'gradle'],
help=('The java build tool to use when packaging the APK, defaults '
'to automatically selecting an appropriate tool.'))
add_boolean_option(
generic_parser, ['copy-libs'],
default=False,
description='Copy libraries instead of using biglink (Android 4.3+)'
)
self._read_configuration()
subparsers = parser.add_subparsers(dest='subparser_name',
help='The command to run')
def add_parser(subparsers, *args, **kwargs):
"""
argparse in python2 doesn't support the aliases option,
so we just don't provide the aliases there.
"""
if 'aliases' in kwargs and sys.version_info.major < 3:
kwargs.pop('aliases')
return subparsers.add_parser(*args, **kwargs)
add_parser(
subparsers,
'recommendations',
parents=[generic_parser],
help='List recommended p4a dependencies')
parser_recipes = add_parser(
subparsers,
'recipes',
parents=[generic_parser],
help='List the available recipes')
parser_recipes.add_argument(
"--compact",
action="store_true", default=False,
help="Produce a compact list suitable for scripting")
add_parser(
subparsers, 'bootstraps',
help='List the available bootstraps',
parents=[generic_parser])
add_parser(
subparsers, 'clean_all',
aliases=['clean-all'],
help='Delete all builds, dists and caches',
parents=[generic_parser])
add_parser(
subparsers, 'clean_dists',
aliases=['clean-dists'],
help='Delete all dists',
parents=[generic_parser])
add_parser(
subparsers, 'clean_bootstrap_builds',
aliases=['clean-bootstrap-builds'],
help='Delete all bootstrap builds',
parents=[generic_parser])
add_parser(
subparsers, 'clean_builds',
aliases=['clean-builds'],
help='Delete all builds',
parents=[generic_parser])
parser_clean = add_parser(
subparsers, 'clean',
help='Delete build components.',
parents=[generic_parser])
parser_clean.add_argument(
'component', nargs='+',
help=('The build component(s) to delete. You can pass any '
'number of arguments from "all", "builds", "dists", '
'"distributions", "bootstrap_builds", "downloads".'))
parser_clean_recipe_build = add_parser(
subparsers,
'clean_recipe_build', aliases=['clean-recipe-build'],
help=('Delete the build components of the given recipe. '
'By default this will also delete built dists'),
parents=[generic_parser])
parser_clean_recipe_build.add_argument(
'recipe', help='The recipe name')
parser_clean_recipe_build.add_argument(
'--no-clean-dists', default=False,
dest='no_clean_dists',
action='store_true',
help='If passed, do not delete existing dists')
parser_clean_download_cache = add_parser(
subparsers,
'clean_download_cache', aliases=['clean-download-cache'],
help='Delete cached downloads for requirement builds',
parents=[generic_parser])
parser_clean_download_cache.add_argument(
'recipes',
nargs='*',
help='The recipes to clean (space-separated). If no recipe name is'
' provided, the entire cache is cleared.')
parser_export_dist = add_parser(
subparsers,
'export_dist', aliases=['export-dist'],
help='Copy the named dist to the given path',
parents=[generic_parser])
parser_export_dist.add_argument('output_dir',
help='The output dir to copy to')
parser_export_dist.add_argument(
'--symlink',
action='store_true',
help='Symlink the dist instead of copying')
parser_apk = add_parser(
subparsers,
'apk', help='Build an APK',
parents=[generic_parser])
# This is actually an internal argument of the build.py
# (see pythonforandroid/bootstraps/common/build/build.py).
# However, it is also needed before the distribution is finally
# assembled for locating the setup.py / other build systems, which
# is why we also add it here:
parser_apk.add_argument(
'--private', dest='private',
help='the directory with the app source code files' +
' (containing your main.py entrypoint)',
required=False, default=None)
parser_apk.add_argument(
'--release', dest='build_mode', action='store_const',
const='release', default='debug',
help='Build the PARSER_APK. in Release mode')
parser_apk.add_argument(
'--use-setup-py', dest="use_setup_py",
action='store_true', default=False,
help="Process the setup.py of a project if present. " +
"(Experimental!")
parser_apk.add_argument(
'--ignore-setup-py', dest="ignore_setup_py",
action='store_true', default=False,
help="Don't run the setup.py of a project if present. " +
"This may be required if the setup.py is not " +
"designed to work inside p4a (e.g. by installing " +
"dependencies that won't work or aren't desired " +
"on Android")
parser_apk.add_argument(
'--keystore', dest='keystore', action='store', default=None,
help=('Keystore for JAR signing key, will use jarsigner '
'default if not specified (release build only)'))
parser_apk.add_argument(
'--signkey', dest='signkey', action='store', default=None,
help='Key alias to sign PARSER_APK. with (release build only)')
parser_apk.add_argument(
'--keystorepw', dest='keystorepw', action='store', default=None,
help='Password for keystore')
parser_apk.add_argument(
'--signkeypw', dest='signkeypw', action='store', default=None,
help='Password for key alias')
add_parser(
subparsers,
'create', help='Compile a set of requirements into a dist',
parents=[generic_parser])
add_parser(
subparsers,
'archs', help='List the available target architectures',
parents=[generic_parser])
add_parser(
subparsers,
'distributions', aliases=['dists'],
help='List the currently available (compiled) dists',
parents=[generic_parser])
add_parser(
subparsers,
'delete_dist', aliases=['delete-dist'], help='Delete a compiled dist',
parents=[generic_parser])
parser_sdk_tools = add_parser(
subparsers,
'sdk_tools', aliases=['sdk-tools'],
help='Run the given binary from the SDK tools dis',
parents=[generic_parser])
parser_sdk_tools.add_argument(
'tool', help='The binary tool name to run')
add_parser(
subparsers,
'adb', help='Run adb from the given SDK',
parents=[generic_parser])
add_parser(
subparsers,
'logcat', help='Run logcat from the given SDK',
parents=[generic_parser])
add_parser(
subparsers,
'build_status', aliases=['build-status'],
help='Print some debug information about current built components',
parents=[generic_parser])
parser.add_argument('-v', '--version', action='version',
version=__version__)
args, unknown = parser.parse_known_args(sys.argv[1:])
args.unknown_args = unknown
if hasattr(args, "private") and args.private is not None:
# Pass this value on to the internal bootstrap build.py:
args.unknown_args += ["--private", args.private]
if hasattr(args, "ignore_setup_py") and args.ignore_setup_py:
args.use_setup_py = False
self.args = args
if args.subparser_name is None:
parser.print_help()
exit(1)
setup_color(args.color)
if args.debug:
logger.setLevel(logging.DEBUG)
self.ctx = Context()
self.ctx.use_setup_py = getattr(args, "use_setup_py", True)
have_setup_py_or_similar = False
if getattr(args, "private", None) is not None:
project_dir = getattr(args, "private")
if (os.path.exists(os.path.join(project_dir, "setup.py")) or
os.path.exists(os.path.join(project_dir,
"pyproject.toml"))):
have_setup_py_or_similar = True
# Process requirements and put version in environ
if hasattr(args, 'requirements'):
requirements = []
# Add dependencies from setup.py, but only if they are recipes
# (because otherwise, setup.py itself will install them later)
if (have_setup_py_or_similar and
getattr(args, "use_setup_py", False)):
try:
info("Analyzing package dependencies. MAY TAKE A WHILE.")
# Get all the dependencies corresponding to a recipe:
dependencies = [
dep.lower() for dep in
get_dep_names_of_package(
args.private,
keep_version_pins=True,
recursive=True,
verbose=True,
)
]
info("Dependencies obtained: " + str(dependencies))
all_recipes = [
recipe.lower() for recipe in
set(Recipe.list_recipes(self.ctx))
]
dependencies = set(dependencies).intersection(
set(all_recipes)
)
# Add dependencies to argument list:
if len(dependencies) > 0:
if len(args.requirements) > 0:
args.requirements += u","
args.requirements += u",".join(dependencies)
except ValueError:
# Not a python package, apparently.
warning(
"Processing failed, is this project a valid "
"package? Will continue WITHOUT setup.py deps."
)
# Parse --requirements argument list:
for requirement in split_argument_list(args.requirements):
if "==" in requirement:
requirement, version = requirement.split(u"==", 1)
os.environ["VERSION_{}".format(requirement)] = version
info('Recipe {}: version "{}" requested'.format(
requirement, version))
requirements.append(requirement)
args.requirements = u",".join(requirements)
self.warn_on_deprecated_args(args)
self.storage_dir = args.storage_dir
self.ctx.setup_dirs(self.storage_dir)
self.sdk_dir = args.sdk_dir
self.ndk_dir = args.ndk_dir
self.android_api = args.android_api
self.ndk_api = args.ndk_api
self.ctx.symlink_java_src = args.symlink_java_src
self.ctx.java_build_tool = args.java_build_tool
self._archs = split_argument_list(args.arch)
self.ctx.local_recipes = args.local_recipes
self.ctx.copy_libs = args.copy_libs
# Each subparser corresponds to a method
getattr(self, args.subparser_name.replace('-', '_'))(args)
@staticmethod
def warn_on_carriage_return_args(args):
for check_arg in args:
if '\r' in check_arg:
warning("Argument '{}' contains a carriage return (\\r).".format(str(check_arg.replace('\r', ''))))
warning("Invoking this program via scripts which use CRLF instead of LF line endings will have undefined behaviour.")
def warn_on_deprecated_args(self, args):
"""
Print warning messages for any deprecated arguments that were passed.
"""
# Output warning if setup.py is present and neither --ignore-setup-py
# nor --use-setup-py was specified.
if getattr(args, "private", None) is not None and \
(os.path.exists(os.path.join(args.private, "setup.py")) or
os.path.exists(os.path.join(args.private, "pyproject.toml"))
):
if not getattr(args, "use_setup_py", False) and \
not getattr(args, "ignore_setup_py", False):
warning(" **** FUTURE BEHAVIOR CHANGE WARNING ****")
warning("Your project appears to contain a setup.py file.")
warning("Currently, these are ignored by default.")
warning("This will CHANGE in an upcoming version!")
warning("")
warning("To ensure your setup.py is ignored, please specify:")
warning(" --ignore-setup-py")
warning("")
warning("To enable what will some day be the default, specify:")
warning(" --use-setup-py")
# NDK version is now determined automatically
if args.ndk_version is not None:
warning('--ndk-version is deprecated and no longer necessary, '
'the value you passed is ignored')
if 'ANDROIDNDKVER' in environ:
warning('$ANDROIDNDKVER is deprecated and no longer necessary, '
'the value you set is ignored')
def hook(self, name):
if not self.args.hook:
return
if not hasattr(self, "hook_module"):
# first time, try to load the hook module
self.hook_module = imp.load_source("pythonforandroid.hook",
self.args.hook)
if hasattr(self.hook_module, name):
info("Hook: execute {}".format(name))
getattr(self.hook_module, name)(self)
else:
info("Hook: ignore {}".format(name))
@property
def default_storage_dir(self):
udd = user_data_dir('python-for-android')
if ' ' in udd:
udd = '~/.python-for-android'
return udd
@staticmethod
def _read_configuration():
# search for a .p4a configuration file in the current directory
if not exists(".p4a"):
return
info("Reading .p4a configuration")
with open(".p4a") as fd:
lines = fd.readlines()
lines = [shlex.split(line)
for line in lines if not line.startswith("#")]
for line in lines:
for arg in line:
sys.argv.append(arg)
def recipes(self, args):
"""
Prints recipes basic info, e.g.
.. code-block:: bash
python3 3.7.1
depends: ['hostpython3', 'sqlite3', 'openssl', 'libffi']
conflicts: ['python2']
optional depends: ['sqlite3', 'libffi', 'openssl']
"""
ctx = self.ctx
if args.compact:
print(" ".join(set(Recipe.list_recipes(ctx))))
else:
for name in sorted(Recipe.list_recipes(ctx)):
try:
recipe = Recipe.get_recipe(name, ctx)
except (IOError, ValueError):
warning('Recipe "{}" could not be loaded'.format(name))
except SyntaxError:
import traceback
traceback.print_exc()
warning(('Recipe "{}" could not be loaded due to a '
'syntax error').format(name))
version = str(recipe.version)
print('{Fore.BLUE}{Style.BRIGHT}{recipe.name:<12} '
'{Style.RESET_ALL}{Fore.LIGHTBLUE_EX}'
'{version:<8}{Style.RESET_ALL}'.format(
recipe=recipe, Fore=Out_Fore, Style=Out_Style,
version=version))
print(' {Fore.GREEN}depends: {recipe.depends}'
'{Fore.RESET}'.format(recipe=recipe, Fore=Out_Fore))
if recipe.conflicts:
print(' {Fore.RED}conflicts: {recipe.conflicts}'
'{Fore.RESET}'
.format(recipe=recipe, Fore=Out_Fore))
if recipe.opt_depends:
print(' {Fore.YELLOW}optional depends: '
'{recipe.opt_depends}{Fore.RESET}'
.format(recipe=recipe, Fore=Out_Fore))
def bootstraps(self, _args):
"""List all the bootstraps available to build with."""
for bs in Bootstrap.list_bootstraps():
bs = Bootstrap.get_bootstrap(bs, self.ctx)
print('{Fore.BLUE}{Style.BRIGHT}{bs.name}{Style.RESET_ALL}'
.format(bs=bs, Fore=Out_Fore, Style=Out_Style))
print(' {Fore.GREEN}depends: {bs.recipe_depends}{Fore.RESET}'
.format(bs=bs, Fore=Out_Fore))
def clean(self, args):
components = args.component
component_clean_methods = {
'all': self.clean_all,
'dists': self.clean_dists,
'distributions': self.clean_dists,
'builds': self.clean_builds,
'bootstrap_builds': self.clean_bootstrap_builds,
'downloads': self.clean_download_cache}
for component in components:
if component not in component_clean_methods:
raise BuildInterruptingException((
'Asked to clean "{}" but this argument is not '
'recognised'.format(component)))
component_clean_methods[component](args)
def clean_all(self, args):
"""Delete all build components; the package cache, package builds,
bootstrap builds and distributions."""
self.clean_dists(args)
self.clean_builds(args)
self.clean_download_cache(args)
def clean_dists(self, _args):
"""Delete all compiled distributions in the internal distribution
directory."""
ctx = self.ctx
if exists(ctx.dist_dir):
shutil.rmtree(ctx.dist_dir)
def clean_bootstrap_builds(self, _args):
"""Delete all the bootstrap builds."""
if exists(join(self.ctx.build_dir, 'bootstrap_builds')):
shutil.rmtree(join(self.ctx.build_dir, 'bootstrap_builds'))
# for bs in Bootstrap.list_bootstraps():
# bs = Bootstrap.get_bootstrap(bs, self.ctx)
# if bs.build_dir and exists(bs.build_dir):
# info('Cleaning build for {} bootstrap.'.format(bs.name))
# shutil.rmtree(bs.build_dir)
def clean_builds(self, _args):
"""Delete all build caches for each recipe, python-install, java code
and compiled libs collection.
This does *not* delete the package download cache or the final
distributions. You can also use clean_recipe_build to delete the build
of a specific recipe.
"""
ctx = self.ctx
if exists(ctx.build_dir):
shutil.rmtree(ctx.build_dir)
if exists(ctx.python_installs_dir):
shutil.rmtree(ctx.python_installs_dir)
libs_dir = join(self.ctx.build_dir, 'libs_collections')
if exists(libs_dir):
shutil.rmtree(libs_dir)
def clean_recipe_build(self, args):
"""Deletes the build files of the given recipe.
This is intended for debug purposes. You may experience
strange behaviour or problems with some recipes if their
build has made unexpected state changes. If this happens, run
clean_builds, or attempt to clean other recipes until things
work again.
"""
recipe = Recipe.get_recipe(args.recipe, self.ctx)
info('Cleaning build for {} recipe.'.format(recipe.name))
recipe.clean_build()
if not args.no_clean_dists:
self.clean_dists(args)
def clean_download_cache(self, args):
""" Deletes a download cache for recipes passed as arguments. If no
argument is passed, it'll delete *all* downloaded caches. ::
p4a clean_download_cache kivy,pyjnius
This does *not* delete the build caches or final distributions.
"""
ctx = self.ctx
if hasattr(args, 'recipes') and args.recipes:
for package in args.recipes:
remove_path = join(ctx.packages_path, package)
if exists(remove_path):
shutil.rmtree(remove_path)
info('Download cache removed for: "{}"'.format(package))
else:
warning('No download cache found for "{}", skipping'.format(
package))
else:
if exists(ctx.packages_path):
shutil.rmtree(ctx.packages_path)
info('Download cache removed.')
else:
print('No cache found at "{}"'.format(ctx.packages_path))
@require_prebuilt_dist
def export_dist(self, args):
"""Copies a created dist to an output dir.
This makes it easy to navigate to the dist to investigate it
or call build.py, though you do not in general need to do this
and can use the apk command instead.
"""
ctx = self.ctx
dist = dist_from_args(ctx, args)
if dist.needs_build:
raise BuildInterruptingException(
'You asked to export a dist, but there is no dist '
'with suitable recipes available. For now, you must '
' create one first with the create argument.')
if args.symlink:
shprint(sh.ln, '-s', dist.dist_dir, args.output_dir)
else:
shprint(sh.cp, '-r', dist.dist_dir, args.output_dir)
@property
def _dist(self):
ctx = self.ctx
dist = dist_from_args(ctx, self.args)
ctx.distribution = dist
return dist
@require_prebuilt_dist
def apk(self, args):
"""Create an APK using the given distribution."""
ctx = self.ctx
dist = self._dist
# Manually fixing these arguments at the string stage is
# unsatisfactory and should probably be changed somehow, but
# we can't leave it until later as the build.py scripts assume
# they are in the current directory.
fix_args = ('--dir', '--private', '--add-jar', '--add-source',
'--whitelist', '--blacklist', '--presplash', '--icon')
unknown_args = args.unknown_args
for i, arg in enumerate(unknown_args):
argx = arg.split('=')
if argx[0] in fix_args:
if len(argx) > 1:
unknown_args[i] = '='.join(
(argx[0], realpath(expanduser(argx[1]))))
elif i + 1 < len(unknown_args):
unknown_args[i+1] = realpath(expanduser(unknown_args[i+1]))
env = os.environ.copy()
if args.build_mode == 'release':
if args.keystore:
env['P4A_RELEASE_KEYSTORE'] = realpath(expanduser(args.keystore))
if args.signkey:
env['P4A_RELEASE_KEYALIAS'] = args.signkey
if args.keystorepw:
env['P4A_RELEASE_KEYSTORE_PASSWD'] = args.keystorepw
if args.signkeypw:
env['P4A_RELEASE_KEYALIAS_PASSWD'] = args.signkeypw
elif args.keystorepw and 'P4A_RELEASE_KEYALIAS_PASSWD' not in env:
env['P4A_RELEASE_KEYALIAS_PASSWD'] = args.keystorepw
build = imp.load_source('build', join(dist.dist_dir, 'build.py'))
with current_directory(dist.dist_dir):
self.hook("before_apk_build")
os.environ["ANDROID_API"] = str(self.ctx.android_api)
build_args = build.parse_args(args.unknown_args)
self.hook("after_apk_build")
self.hook("before_apk_assemble")
build_type = ctx.java_build_tool
if build_type == 'auto':
info('Selecting java build tool:')
build_tools_versions = os.listdir(join(ctx.sdk_dir,
'build-tools'))
build_tools_versions = sorted(build_tools_versions,
key=LooseVersion)
build_tools_version = build_tools_versions[-1]
info(('Detected highest available build tools '
'version to be {}').format(build_tools_version))
if build_tools_version >= '25.0' and exists('gradlew'):
build_type = 'gradle'
info(' Building with gradle, as gradle executable is '
'present')
else:
build_type = 'ant'
if build_tools_version < '25.0':
info((' Building with ant, as the highest '
'build-tools-version is only {}').format(
build_tools_version))
else:
info(' Building with ant, as no gradle executable '
'detected')
if build_type == 'gradle':
# gradle-based build
env["ANDROID_NDK_HOME"] = self.ctx.ndk_dir
env["ANDROID_HOME"] = self.ctx.sdk_dir
gradlew = sh.Command('./gradlew')
if exists('/usr/bin/dos2unix'):
# .../dists/bdisttest_python3/gradlew
# .../build/bootstrap_builds/sdl2-python3/gradlew
# if docker on windows, gradle contains CRLF
output = shprint(
sh.Command('dos2unix'), gradlew._path.decode('utf8'),
_tail=20, _critical=True, _env=env
)
if args.build_mode == "debug":
gradle_task = "assembleDebug"
elif args.build_mode == "release":
gradle_task = "assembleRelease"
else:
raise BuildInterruptingException(
"Unknown build mode {} for apk()".format(args.build_mode))
output = shprint(gradlew, gradle_task, _tail=20,
_critical=True, _env=env)
# gradle output apks somewhere else
# and don't have version in file
apk_dir = join(dist.dist_dir,
"build", "outputs", "apk",
args.build_mode)
apk_glob = "*-{}.apk"
apk_add_version = True
else:
# ant-based build
try:
ant = sh.Command('ant')
except sh.CommandNotFound:
raise BuildInterruptingException(
'Could not find ant binary, please install it '
'and make sure it is in your $PATH.')
output = shprint(ant, args.build_mode, _tail=20,
_critical=True, _env=env)
apk_dir = join(dist.dist_dir, "bin")
apk_glob = "*-*-{}.apk"
apk_add_version = False
self.hook("after_apk_assemble")
info_main('# Copying APK to current directory')
apk_re = re.compile(r'.*Package: (.*\.apk)$')
apk_file = None
for line in reversed(output.splitlines()):
m = apk_re.match(line)
if m:
apk_file = m.groups()[0]
break
if not apk_file:
info_main('# APK filename not found in build output. Guessing...')
if args.build_mode == "release":
suffixes = ("release", "release-unsigned")
else:
suffixes = ("debug", )
for suffix in suffixes:
apks = glob.glob(join(apk_dir, apk_glob.format(suffix)))
if apks:
if len(apks) > 1:
info('More than one built APK found... guessing you '
'just built {}'.format(apks[-1]))
apk_file = apks[-1]
break
else:
raise BuildInterruptingException('Couldn\'t find the built APK')
info_main('# Found APK file: {}'.format(apk_file))
if apk_add_version:
info('# Add version number to APK')
apk_name = basename(apk_file)[:-len(APK_SUFFIX)]
apk_file_dest = "{}-{}-{}".format(
apk_name, build_args.version, APK_SUFFIX)
info('# APK renamed to {}'.format(apk_file_dest))
shprint(sh.cp, apk_file, apk_file_dest)
else:
shprint(sh.cp, apk_file, './')
@require_prebuilt_dist
def create(self, args):
"""Create a distribution directory if it doesn't already exist, run
any recipes if necessary, and build the apk.
"""
pass # The decorator does everything
def archs(self, _args):
"""List the target architectures available to be built for."""
print('{Style.BRIGHT}Available target architectures are:'
'{Style.RESET_ALL}'.format(Style=Out_Style))
for arch in self.ctx.archs:
print(' {}'.format(arch.arch))
def dists(self, args):
"""The same as :meth:`distributions`."""
self.distributions(args)
def distributions(self, _args):
"""Lists all distributions currently available (i.e. that have already
been built)."""
ctx = self.ctx
dists = Distribution.get_distributions(ctx)
if dists:
print('{Style.BRIGHT}Distributions currently installed are:'
'{Style.RESET_ALL}'.format(Style=Out_Style, Fore=Out_Fore))
pretty_log_dists(dists, print)
else:
print('{Style.BRIGHT}There are no dists currently built.'
'{Style.RESET_ALL}'.format(Style=Out_Style))
def delete_dist(self, _args):
dist = self._dist
if not dist.folder_exists():
info('No dist exists that matches your specifications, '
'exiting without deleting.')
return
dist.delete()
def sdk_tools(self, args):
"""Runs the android binary from the detected SDK directory, passing
all arguments straight to it. This binary is used to install
e.g. platform-tools for different API level targets. This is
intended as a convenience function if android is not in your
$PATH.
"""
ctx = self.ctx
ctx.prepare_build_environment(user_sdk_dir=self.sdk_dir,
user_ndk_dir=self.ndk_dir,
user_android_api=self.android_api,
user_ndk_api=self.ndk_api)
android = sh.Command(join(ctx.sdk_dir, 'tools', args.tool))
output = android(
*args.unknown_args, _iter=True, _out_bufsize=1, _err_to_out=True)
for line in output:
sys.stdout.write(line)
sys.stdout.flush()
def adb(self, args):
"""Runs the adb binary from the detected SDK directory, passing all
arguments straight to it. This is intended as a convenience
function if adb is not in your $PATH.
"""
self._adb(args.unknown_args)
def logcat(self, args):
"""Runs ``adb logcat`` using the adb binary from the detected SDK
directory. All extra args are passed as arguments to logcat."""
self._adb(['logcat'] + args.unknown_args)
def _adb(self, commands):
"""Call the adb executable from the SDK, passing the given commands as
arguments."""
ctx = self.ctx
ctx.prepare_build_environment(user_sdk_dir=self.sdk_dir,
user_ndk_dir=self.ndk_dir,
user_android_api=self.android_api,
user_ndk_api=self.ndk_api)
if platform in ('win32', 'cygwin'):
adb = sh.Command(join(ctx.sdk_dir, 'platform-tools', 'adb.exe'))
else:
adb = sh.Command(join(ctx.sdk_dir, 'platform-tools', 'adb'))
info_notify('Starting adb...')
output = adb(*commands, _iter=True, _out_bufsize=1, _err_to_out=True)
for line in output:
sys.stdout.write(line)
sys.stdout.flush()
def build_status(self, _args):
"""Print the status of the specified build. """
print('{Style.BRIGHT}Bootstraps whose core components are probably '
'already built:{Style.RESET_ALL}'.format(Style=Out_Style))
bootstrap_dir = join(self.ctx.build_dir, 'bootstrap_builds')
if exists(bootstrap_dir):
for filen in os.listdir(bootstrap_dir):
print(' {Fore.GREEN}{Style.BRIGHT}{filen}{Style.RESET_ALL}'
.format(filen=filen, Fore=Out_Fore, Style=Out_Style))
print('{Style.BRIGHT}Recipes that are probably already built:'
'{Style.RESET_ALL}'.format(Style=Out_Style))
other_builds_dir = join(self.ctx.build_dir, 'other_builds')
if exists(other_builds_dir):
for filen in sorted(os.listdir(other_builds_dir)):
name = filen.split('-')[0]
dependencies = filen.split('-')[1:]
recipe_str = (' {Style.BRIGHT}{Fore.GREEN}{name}'
'{Style.RESET_ALL}'.format(
Style=Out_Style, name=name, Fore=Out_Fore))
if dependencies:
recipe_str += (
' ({Fore.BLUE}with ' + ', '.join(dependencies) +
'{Fore.RESET})').format(Fore=Out_Fore)
recipe_str += '{Style.RESET_ALL}'.format(Style=Out_Style)
print(recipe_str)
if __name__ == "__main__":
main()
|
the-stack_106_20280
|
import pickle
import numpy as np
import matplotlib.pyplot as plt
with open('c10p1.pickle', 'rb') as f:
data = pickle.load(f)
c10p1 = data['c10p1']
def normailze(raw_data):
mean = np.mean(raw_data, axis=0)
data = raw_data - mean
return data
data = normailze(c10p1)
plt.figure(1)
plt.scatter(data[:, 0], data[:, 1], marker='o', c='r')
plt.title('Oja`s rule, data cloud')
plt.xlabel('u1')
plt.ylabel('u2')
eta = 1
alpha = 1
t_step = 0.01
max_iter = 500
w0 = w = np.random.rand(2)
print(f'W0:{w}')
for step in np.arange(0, max_iter, 1):
plt.figure(1)
u = data[np.remainder(step, data.shape[0]), :]
v = u @ w
plt.plot(v, marker='^', c='b')
w = w+t_step*eta*(v*u-alpha*v*v*w)
plt.plot(w[0], w[1], marker='*', c='g')
print('Question 7:')
c = np.dot(data.T, data) / data.shape[0]
ev, e = np.linalg.eig(c)
print(ev)
print(e)
print('Question 8:')
w1 = w0
offset = [2, 2]
data1 = data + np.tile(offset, (data.shape[0], 1))
plt.figure(2)
plt.scatter(data1[:, 0], data1[:, 1], marker='o', c='r')
plt.title('Oja`s rule, data1 cloud')
plt.xlabel('u1')
plt.ylabel('u2')
for step in np.arange(0, max_iter, 1):
plt.figure(2)
u = data1[np.remainder(step, data1.shape[0]), :]
v = u @ w1
plt.plot(v, marker='^', c='b')
w1 = w1+t_step*eta*(v*u-alpha*v*v*w1)
plt.plot(w1[0], w1[1], marker='*', c='g')
print('Question 9:')
w2 = w0
plt.figure(3)
plt.scatter(data[:, 0], data[:, 1], marker='o', c='r')
plt.title('Hebb rule, data cloud')
plt.xlabel('u1')
plt.ylabel('u2')
for step in np.arange(0, max_iter, 1):
plt.figure(3)
u = data[np.remainder(step, data.shape[0]), :]
v = u @ w2
plt.plot(v, marker='^', c='b')
w2 = w2+t_step*eta*(v*u)
plt.plot(w2[0], w2[1], marker='*', c='g')
plt.show()
|
the-stack_106_20281
|
import module_crud
from time import sleep
caminho = './projeto_crud_python/arquive/arquive.txt'
conteudo = '\ntexto'
opc1 = 'ler arquivo'
opc2 = 'escrever'
opc3 = 'apagar '
inicia = True
while inicia :
print('\n'*50)
print('| ','-'*15,' |')
print(f'1- {opc1}\n2- {opc2}\n3- {opc3}\n4- sair ->[')
print('| ','-'*15,' |')
resp = input('?_ ')
if resp == '1' :
print('\n'*50)
print(f'--{opc1.upper()}--')
print('-'*20,'\n')
print(module_crud.le(caminho))
print('\n','-'*20,'\n')
voltar = True
while voltar:
resp2 = input('voltar ?_s or n_ ')
if(resp2 == 's' or resp2 == '1'):
voltar = False
if(resp2 == 'n' or resp2 == '2'):
print('saindo...')
sleep(3.0)
print('---------')
voltar = False
inicia = False
else:
print('valor invalido')
elif resp == '2' :
print('\n'*50)
print(f'--{opc2.upper()}--')
print('-'*15)
resp_2 = True
while resp_2:
resplinnha = str(input('1- na ultima linha\n2- na proxima linha \n?_ '))
if(resplinnha == '1' or resplinnha == '2'):
print('-'*15,'\n')
conteudo = str(input('insira o conteudo :\n\n'))
print('\n')
print('-'*15)
print('salvando...')
print('-'*15)
if(resplinnha == '1'):
module_crud.adiciona(conteudo,caminho,True)
if(resplinnha == '2'):
module_crud.adiciona(conteudo,caminho,False)
sleep(3.0)
print('slavo com sucesso!')
quest = True
while quest:
resp2 = input('voltar ?_s or n_ ')
if(resp2 == 's' or resp2 == '1'):
resp_2 = False
quest = False
elif(resp2 == 'n' or resp2 == '2'):
inicia = False
resp_2 = False
quest = False
else:
print('valor invalido')
else:
print('valor invalido')
elif resp == '3' :
print('\n'*50)
print(f'--{opc3.upper()}--')
module_crud.apaga(caminho)
print('-'*15)
print('arquivo apagado !')
print('-'*15)
voltar = True
while voltar:
resp2 = input('voltar ?_s or n_ ')
if(resp2 == 's' or resp2 == '1'):
voltar = False
if(resp2 == 'n' or resp2 == '2'):
print('saindo...')
sleep(3.0)
print('---------')
voltar = False
inicia = False
else:
print('valor invalido')
elif resp == '4' :
print('saindo...')
sleep(3.0)
inicia = False
else :
print('seletor não encontrado')
|
the-stack_106_20283
|
import os
import sys
import pytest
# add scripts to the path
sys.path.append(
os.path.split(
os.path.dirname(
os.path.abspath(__file__)
)
)[0]
)
import pymsteams
def test_env_webhook_url():
"""
Test that we have the webhook set as an environment variable.
This is testing our test environment, not the code.
"""
webhook_url = os.getenv("MS_TEAMS_WEBHOOK", None)
assert webhook_url
assert webhook_url.find("https") == 0
def test_send_message():
"""
This sends a simple text message with a title and link button.
"""
teams_message = pymsteams.connectorcard(os.getenv("MS_TEAMS_WEBHOOK"))
teams_message.text("This is a simple text message.")
teams_message.title("Simple Message Title")
teams_message.addLinkButton("Go to the Repo", "https://github.com/rveachkc/pymsteams")
teams_message.send()
assert isinstance(teams_message.last_http_status.status_code, int)
def test_send_sectioned_message():
"""
This sends a message with sections.
"""
# start the message
teams_message = pymsteams.connectorcard(os.getenv("MS_TEAMS_WEBHOOK"))
teams_message.text("This is the main title.")
teams_message.title("Sectioned Message Title")
# section 1
section_1 = pymsteams.cardsection()
section_1.title("Section 1 title")
section_1.activityTitle("my activity title")
section_1.activitySubtitle("my activity subtitle")
section_1.activityImage("https://raw.githubusercontent.com/rveachkc/pymsteams/develop/test/desk_toys_1.jpg")
section_1.activityText("This is my activity Text. You should see an activity image, activity title, activity subtitle, and this text (of course).")
section_1.addFact("Fact", "this is fine")
section_1.addFact("Fact", "this is also fine")
section_1.text("This is my section 1 text. This section has an activity above and two facts below.")
teams_message.addSection(section_1)
# section 2
section_2 = pymsteams.cardsection()
section_2.text("This is section 2. You should see an image. This section does not have facts or a title.")
section_2.addImage("https://raw.githubusercontent.com/rveachkc/pymsteams/develop/test/desk_toys_2.jpg", ititle="Pew Pew Pew")
teams_message.addSection(section_2)
# send
teams_message.send()
assert isinstance(teams_message.last_http_status.status_code, int)
def test_send_potential_action():
"""
This sends a message with a potential action
"""
myTeamsMessage = pymsteams.connectorcard(os.getenv("MS_TEAMS_WEBHOOK"))
myTeamsMessage.text("This message should have four potential actions.")
myTeamsMessage.title("Action Message Title")
myTeamsPotentialAction1 = pymsteams.potentialaction(_name = "Add a comment")
myTeamsPotentialAction1.addInput("TextInput","comment","Add a comment",False)
myTeamsPotentialAction1.addAction("HttpPost","Add Comment","https://jsonplaceholder.typicode.com/posts")
myTeamsPotentialAction2 = pymsteams.potentialaction(_name = "Get Users")
myTeamsPotentialAction2.addInput("DateInput","dueDate","Enter due date")
myTeamsPotentialAction2.addAction("HttpPost","save","https://jsonplaceholder.typicode.com/posts")
myTeamsPotentialAction3 = pymsteams.potentialaction(_name = "Change Status")
myTeamsPotentialAction3.choices.addChoices("In progress","0")
myTeamsPotentialAction3.choices.addChoices("Active","1")
myTeamsPotentialAction3.addInput("MultichoiceInput","list","Select a status",False)
myTeamsPotentialAction3.addAction("HttpPost","Save","https://jsonplaceholder.typicode.com/posts")
myTeamsPotentialAction4 = pymsteams.potentialaction(_name = "Download pymsteams")
myTeamsPotentialAction4.addOpenURI("Links", [
{
"os": "default",
"uri": "https://pypi.org/project/pymsteams/",
},
])
myTeamsMessage.addPotentialAction(myTeamsPotentialAction1)
myTeamsMessage.addPotentialAction(myTeamsPotentialAction2)
myTeamsMessage.addPotentialAction(myTeamsPotentialAction3)
myTeamsMessage.summary("Message Summary")
myTeamsMessage.send()
assert isinstance(myTeamsMessage.last_http_status.status_code, int)
def test_http_500():
with pytest.raises(pymsteams.TeamsWebhookException):
#myTeamsMessage = pymsteams.connectorcard(os.getenv("MS_TEAMS_WEBHOOK"))
myTeamsMessage = pymsteams.connectorcard("https://httpstat.us/500")
myTeamsMessage.text("This is a simple text message.")
myTeamsMessage.title("Simple Message Title")
myTeamsMessage.send()
#myTeamsMessage.hookurl = "https://httpstat.us/500"
def test_http_403():
with pytest.raises(pymsteams.TeamsWebhookException):
myTeamsMessage = pymsteams.connectorcard("http://httpstat.us/403")
myTeamsMessage.text("This is a simple text message.")
myTeamsMessage.title("Simple Message Title")
myTeamsMessage.send()
def test_message_size():
def getMsg(card):
msg = pymsteams.connectorcard(os.getenv("MS_TEAMS_WEBHOOK"))
msg.title('Simple Message Title')
msg.summary('Simple Summary')
msg.addSection(card)
return msg
# setup text that's too large
failure_char_count = 21000
text = 'a' * failure_char_count
card = pymsteams.cardsection()
card.text(text)
msg = getMsg(card)
with pytest.raises(pymsteams.TeamsWebhookException):
msg.send()
card1 = pymsteams.cardsection()
card2 = pymsteams.cardsection()
card1.text(text[:int(len(text)/2)])
card2.text(text[int(len(text)/2):])
msg = getMsg(card1)
assert msg.send()
msg = getMsg(card2)
assert msg.send()
|
the-stack_106_20284
|
"""
Train LDA model using https://pypi.python.org/pypi/lda,
and visualize in 2-D space with t-SNE.
"""
import os
import time
import lda
import random
import argparse
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.manifold import TSNE
import bokeh.plotting as bp
from bokeh.plotting import save
from bokeh.models import HoverTool
from utils import preprocess
#
import dask.dataframe as dd
import glob
import pandas as pd
if __name__ == '__main__':
lda_base = 'lda_simple'
if not os.path.exists(lda_base):
os.makedirs(lda_base)
##############################################################################
# cli inputs
parser = argparse.ArgumentParser()
parser.add_argument('--raw_tweet_dir', required=True, type=str,
help='a directory of raw profile files')
parser.add_argument('--num_train_tweet', required=True, type=int,
help='number of profiles used for training a LDA model')
parser.add_argument('--n_topics', required=True, type=int, default=20,
help='number of topics')
parser.add_argument('--n_iter', required=True, type=int, default=1500,
help='number of iteration for LDA model training')
parser.add_argument('--top_n', required=True, type=int, default=8,
help='number of keywords to show for each topic')
parser.add_argument('--threshold', required=True, type=float, default=0.0,
help='threshold probability for topic assignment')
parser.add_argument('--num_example', required=True, type=int, default=5000,
help='number of profiles to show on the plot')
parser.add_argument('--sentiment', required=True, type=str, default='all',
help='sentiment from tweets of profiles to plot')
args = parser.parse_args()
# unpack
raw_tweet_dir = args.raw_tweet_dir
num_train_tweet = args.num_train_tweet
n_topics = args.n_topics
n_iter = args.n_iter
n_top_words = args.top_n
threshold = args.threshold
num_example = args.num_example
sentiment = args.sentiment
##############################################################################
# get training
num_scanned_tweet = 0
num_qualified_tweet = 0
all_files = glob.glob(raw_tweet_dir + "week_*_all.tsv")
li = []
for filename in all_files:
print(filename)
df = pd.read_csv(filename, index_col=None, #header=0,
sep='\t',encoding = 'utf8',lineterminator='\n', usecols = [16,20,36],
names=['user_id_str','user_description','sentiment'] ,low_memory=False)
li.append(df)
raw_tweet_files = pd.concat(li, axis=0, ignore_index=True)
raw_tweet_files.info()
# split by sentiment
raw_tweet_files["sentiment"] = raw_tweet_files["sentiment"].apply(pd.to_numeric, errors='coerce')
if sentiment == 'pos':
raw_tweet_files = raw_tweet_files[raw_tweet_files['sentiment'].apply(lambda x: x>0.1)]
elif sentiment == 'neg':
raw_tweet_files = raw_tweet_files[raw_tweet_files['sentiment'].apply(lambda x: x<-0.1)]
raw_tweet_files['user_id_str'] = pd.to_numeric(raw_tweet_files['user_id_str'], errors='coerce')
raw_tweet_files["user_description"]=raw_tweet_files["user_description"].astype(str)
raw_tweet_files = raw_tweet_files[~raw_tweet_files['user_id_str'].isnull()]
raw_tweet_files = raw_tweet_files[~raw_tweet_files['user_description'].isnull()]
raw_tweet_text = set(raw_tweet_files['user_description'])
print('len', len(raw_tweet_text))
raw_tweet = []
processed_tweet = []
processed_tweet_set = set() # for quicker'item in?' check
t0 = time.time()
for row in raw_tweet_text:
num_scanned_tweet += 1
p_t = preprocess(row)
if p_t and p_t not in processed_tweet_set: # ignore duplicate tweets
raw_tweet += row,
processed_tweet += p_t,
processed_tweet_set.add(p_t)
num_qualified_tweet += 1
if num_scanned_tweet % 1000000 == 0: # progress update
print('scanned {} tweets'.format(num_scanned_tweet))
if num_qualified_tweet == num_train_tweet: # enough data for training
break
'''if num_qualified_tweet == num_train_tweet: # break outer loop
break'''
del processed_tweet_set # free memory
t1 = time.time()
print('\n>>> scanned {} tweets to find {} trainable; took {} mins\n'.format(
num_scanned_tweet, num_train_tweet, (t1-t0)/60.))
##############################################################################
# train LDA
# ignore terms that have a document frequency strictly lower than 5, 10
cvectorizer = CountVectorizer(min_df=5)
cvz = cvectorizer.fit_transform(processed_tweet)
lda_model = lda.LDA(n_topics=n_topics, n_iter=n_iter)
X_topics = lda_model.fit_transform(cvz)
t2 = time.time()
print('\n>>> LDA training done; took {} mins\n'.format((t2-t1)/60.))
np.save('lda_simple/lda_doc_topic_{}profiles_{}topics_{}.npy'.format(
X_topics.shape[0], X_topics.shape[1], sentiment), X_topics)
np.save('lda_simple/lda_topic_word_{}profiles_{}topics_{}.npy'.format(
X_topics.shape[0], X_topics.shape[1], sentiment), lda_model.topic_word_)
print('\n>>> doc_topic & topic word written to disk\n')
##############################################################################
# threshold and plot
_idx = np.amax(X_topics, axis=1) > threshold # idx of tweets that > threshold
_topics = X_topics[_idx]
_raw_tweet = np.array(raw_tweet)[_idx]
_processed_tweet = np.array(processed_tweet)[_idx]
# t-SNE: 50 -> 2D
tsne_model = TSNE(n_components=2, verbose=1, random_state=0, angle=.99,
init='pca')
tsne_lda = tsne_model.fit_transform(_topics[:num_example])
t3 = time.time()
print('\n>>> t-SNE transformation done; took {} mins\n'.format((t3-t2)/60.))
# find the most probable topic for each tweet
_lda_keys = []
for i, tweet in enumerate(_raw_tweet):
_lda_keys += _topics[i].argmax(),
# generate random hex color
colormap = []
for i in range(X_topics.shape[1]):
r = lambda: random.randint(0, 255)
colormap += ('#%02X%02X%02X' % (r(), r(), r())),
colormap = np.array(colormap)
# show topics and their top words
topic_summaries = []
topic_word = lda_model.topic_word_ # get the topic words
vocab = cvectorizer.get_feature_names()
for i, topic_dist in enumerate(topic_word):
topic_words = np.array(vocab)[np.argsort(topic_dist)][:-(n_top_words+1):-1]
topic_summaries.append(' '.join(topic_words))
# use the coordinate of a random tweet as string topic string coordinate
topic_coord = np.empty((X_topics.shape[1], 2)) * np.nan
for topic_num in _lda_keys:
if not np.isnan(topic_coord).any():
break
topic_coord[topic_num] = tsne_lda[_lda_keys.index(topic_num)]
# plot
title = "t-SNE visualization of LDA model trained on {} profiles, {} topics, " \
"thresholding at {} topic probability, {} iter ({} data points and " \
"top {} words)".format(num_qualified_tweet, n_topics, threshold,
n_iter, num_example, n_top_words)
plot_lda = bp.figure(plot_width=1400, plot_height=1100,
title=title,
tools="pan,wheel_zoom,box_zoom,reset,hover,previewsave",
x_axis_type=None, y_axis_type=None, min_border=1)
# create the dictionary with all the information
plot_dict = {
'x': tsne_lda[:, 0],#tsne_lda[:num_example, 0],
'y': tsne_lda[:, 1],#tsne_lda[:num_example, 1],
'colors': colormap[_lda_keys][:num_example],
'tweet': _raw_tweet[:num_example],#text[:num_example],
'topic_key': _lda_keys[:num_example]
}
# create the dataframe from the dictionary
plot_df = pd.DataFrame.from_dict(plot_dict)
# declare the source
source = bp.ColumnDataSource(data=plot_df)
# build scatter function from the columns of the dataframe
plot_lda.scatter('x', 'y', color='colors', source=source)
'''plot_lda.scatter(x=tsne_lda[:, 0], y=tsne_lda[:, 1],
color=colormap[_lda_keys][:num_example],
source=bp.ColumnDataSource({
"tweet": _raw_tweet[:num_example],
"topic_key": _lda_keys[:num_example]
}))'''
# plot crucial words
for i in range(X_topics.shape[1]):
plot_lda.text(topic_coord[i, 0], topic_coord[i, 1], [topic_summaries[i]])
hover = plot_lda.select(dict(type=HoverTool))
hover.tooltips = {"tweet": "@tweet - topic: @topic_key"}
save(plot_lda, 'tsne_lda_viz_{}_{}_{}_{}_{}_{}_{}.html'.format(
num_qualified_tweet, n_topics, threshold, n_iter, num_example, n_top_words, sentiment))
t4 = time.time()
print('\n>>> whole process done; took {} mins\n'.format((t4-t0)/60.))
|
the-stack_106_20285
|
import click
from flask.cli import FlaskGroup
from myapi.app import create_app
def create_myapi(info):
return create_app(cli=True)
@click.group(cls=FlaskGroup, create_app=create_myapi)
def cli():
"""Main entry point"""
@cli.command("init")
def init():
"""Init application, create database tables
and create a new user named admin with password admin
"""
from myapi.extensions import db
from myapi.models import User
click.echo("create database")
db.create_all()
click.echo("done")
click.echo("create user")
user = User(
username='admin',
email='[email protected]',
password='admin',
active=True
)
db.session.add(user)
db.session.commit()
click.echo("created user admin")
if __name__ == "__main__":
cli()
|
the-stack_106_20288
|
import os
import hashlib
import warnings
from tempfile import mkdtemp, TemporaryFile
from shutil import rmtree
from twisted.trial import unittest
from scrapy.item import Item, Field
from scrapy.http import Request, Response
from scrapy.settings import Settings
from scrapy.pipelines.images import ImagesPipeline
skip = False
try:
from PIL import Image
except ImportError as e:
skip = 'Missing Python Imaging Library, install https://pypi.python.org/pypi/Pillow'
else:
encoders = set(('jpeg_encoder', 'jpeg_decoder'))
if not encoders.issubset(set(Image.core.__dict__)):
skip = 'Missing JPEG encoders'
def _mocked_download_func(request, info):
response = request.meta.get('response')
return response() if callable(response) else response
class ImagesPipelineTestCase(unittest.TestCase):
skip = skip
def setUp(self):
self.tempdir = mkdtemp()
self.pipeline = ImagesPipeline(self.tempdir, download_func=_mocked_download_func)
def tearDown(self):
rmtree(self.tempdir)
def test_file_path(self):
file_path = self.pipeline.file_path
self.assertEqual(file_path(Request("https://dev.mydeco.com/mydeco.gif")),
'full/3fd165099d8e71b8a48b2683946e64dbfad8b52d.jpg')
self.assertEqual(file_path(Request("http://www.maddiebrown.co.uk///catalogue-items//image_54642_12175_95307.jpg")),
'full/0ffcd85d563bca45e2f90becd0ca737bc58a00b2.jpg')
self.assertEqual(file_path(Request("https://dev.mydeco.com/two/dirs/with%20spaces%2Bsigns.gif")),
'full/b250e3a74fff2e4703e310048a5b13eba79379d2.jpg')
self.assertEqual(file_path(Request("http://www.dfsonline.co.uk/get_prod_image.php?img=status_0907_mdm.jpg")),
'full/4507be485f38b0da8a0be9eb2e1dfab8a19223f2.jpg')
self.assertEqual(file_path(Request("http://www.dorma.co.uk/images/product_details/2532/")),
'full/97ee6f8a46cbbb418ea91502fd24176865cf39b2.jpg')
self.assertEqual(file_path(Request("http://www.dorma.co.uk/images/product_details/2532")),
'full/244e0dd7d96a3b7b01f54eded250c9e272577aa1.jpg')
self.assertEqual(file_path(Request("http://www.dorma.co.uk/images/product_details/2532"),
response=Response("http://www.dorma.co.uk/images/product_details/2532"),
info=object()),
'full/244e0dd7d96a3b7b01f54eded250c9e272577aa1.jpg')
def test_thumbnail_name(self):
thumb_path = self.pipeline.thumb_path
name = '50'
self.assertEqual(thumb_path(Request("file:///tmp/foo.jpg"), name),
'thumbs/50/38a86208c36e59d4404db9e37ce04be863ef0335.jpg')
self.assertEqual(thumb_path(Request("file://foo.png"), name),
'thumbs/50/e55b765eba0ec7348e50a1df496040449071b96a.jpg')
self.assertEqual(thumb_path(Request("file:///tmp/foo"), name),
'thumbs/50/0329ad83ebb8e93ea7c7906d46e9ed55f7349a50.jpg')
self.assertEqual(thumb_path(Request("file:///tmp/some.name/foo"), name),
'thumbs/50/850233df65a5b83361798f532f1fc549cd13cbe9.jpg')
self.assertEqual(thumb_path(Request("file:///tmp/some.name/foo"), name,
response=Response("file:///tmp/some.name/foo"),
info=object()),
'thumbs/50/850233df65a5b83361798f532f1fc549cd13cbe9.jpg')
def test_convert_image(self):
SIZE = (100, 100)
# straigh forward case: RGB and JPEG
COLOUR = (0, 127, 255)
im = _create_image('JPEG', 'RGB', SIZE, COLOUR)
converted, _ = self.pipeline.convert_image(im)
self.assertEquals(converted.mode, 'RGB')
self.assertEquals(converted.getcolors(), [(10000, COLOUR)])
# check that thumbnail keep image ratio
thumbnail, _ = self.pipeline.convert_image(converted, size=(10, 25))
self.assertEquals(thumbnail.mode, 'RGB')
self.assertEquals(thumbnail.size, (10, 10))
# transparency case: RGBA and PNG
COLOUR = (0, 127, 255, 50)
im = _create_image('PNG', 'RGBA', SIZE, COLOUR)
converted, _ = self.pipeline.convert_image(im)
self.assertEquals(converted.mode, 'RGB')
self.assertEquals(converted.getcolors(), [(10000, (205, 230, 255))])
class DeprecatedImagesPipeline(ImagesPipeline):
def file_key(self, url):
return self.image_key(url)
def image_key(self, url):
image_guid = hashlib.sha1(url).hexdigest()
return 'empty/%s.jpg' % (image_guid)
def thumb_key(self, url, thumb_id):
thumb_guid = hashlib.sha1(url).hexdigest()
return 'thumbsup/%s/%s.jpg' % (thumb_id, thumb_guid)
class DeprecatedImagesPipelineTestCase(unittest.TestCase):
def setUp(self):
self.tempdir = mkdtemp()
def init_pipeline(self, pipeline_class):
self.pipeline = pipeline_class(self.tempdir, download_func=_mocked_download_func)
self.pipeline.open_spider(None)
def test_default_file_key_method(self):
self.init_pipeline(ImagesPipeline)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.assertEqual(self.pipeline.file_key("https://dev.mydeco.com/mydeco.gif"),
'full/3fd165099d8e71b8a48b2683946e64dbfad8b52d.jpg')
self.assertEqual(len(w), 1)
self.assertTrue('image_key(url) and file_key(url) methods are deprecated' in str(w[-1].message))
def test_default_image_key_method(self):
self.init_pipeline(ImagesPipeline)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.assertEqual(self.pipeline.image_key("https://dev.mydeco.com/mydeco.gif"),
'full/3fd165099d8e71b8a48b2683946e64dbfad8b52d.jpg')
self.assertEqual(len(w), 1)
self.assertTrue('image_key(url) and file_key(url) methods are deprecated' in str(w[-1].message))
def test_overridden_file_key_method(self):
self.init_pipeline(DeprecatedImagesPipeline)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.assertEqual(self.pipeline.file_path(Request("https://dev.mydeco.com/mydeco.gif")),
'empty/3fd165099d8e71b8a48b2683946e64dbfad8b52d.jpg')
self.assertEqual(len(w), 1)
self.assertTrue('image_key(url) and file_key(url) methods are deprecated' in str(w[-1].message))
def test_default_thumb_key_method(self):
self.init_pipeline(ImagesPipeline)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.assertEqual(self.pipeline.thumb_key("file:///tmp/foo.jpg", 50),
'thumbs/50/38a86208c36e59d4404db9e37ce04be863ef0335.jpg')
self.assertEqual(len(w), 1)
self.assertTrue('thumb_key(url) method is deprecated' in str(w[-1].message))
def test_overridden_thumb_key_method(self):
self.init_pipeline(DeprecatedImagesPipeline)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.assertEqual(self.pipeline.thumb_path(Request("file:///tmp/foo.jpg"), 50),
'thumbsup/50/38a86208c36e59d4404db9e37ce04be863ef0335.jpg')
self.assertEqual(len(w), 1)
self.assertTrue('thumb_key(url) method is deprecated' in str(w[-1].message))
def tearDown(self):
rmtree(self.tempdir)
class ImagesPipelineTestCaseFields(unittest.TestCase):
def test_item_fields_default(self):
class TestItem(Item):
name = Field()
image_urls = Field()
images = Field()
for cls in TestItem, dict:
url = 'http://www.example.com/images/1.jpg'
item = cls({'name': 'item1', 'image_urls': [url]})
pipeline = ImagesPipeline.from_settings(Settings({'IMAGES_STORE': 's3://example/images/'}))
requests = list(pipeline.get_media_requests(item, None))
self.assertEqual(requests[0].url, url)
results = [(True, {'url': url})]
pipeline.item_completed(results, item, None)
self.assertEqual(item['images'], [results[0][1]])
def test_item_fields_override_settings(self):
class TestItem(Item):
name = Field()
image = Field()
stored_image = Field()
for cls in TestItem, dict:
url = 'http://www.example.com/images/1.jpg'
item = cls({'name': 'item1', 'image': [url]})
pipeline = ImagesPipeline.from_settings(Settings({
'IMAGES_STORE': 's3://example/images/',
'IMAGES_URLS_FIELD': 'image',
'IMAGES_RESULT_FIELD': 'stored_image'
}))
requests = list(pipeline.get_media_requests(item, None))
self.assertEqual(requests[0].url, url)
results = [(True, {'url': url})]
pipeline.item_completed(results, item, None)
self.assertEqual(item['stored_image'], [results[0][1]])
def _create_image(format, *a, **kw):
buf = TemporaryFile()
Image.new(*a, **kw).save(buf, format)
buf.seek(0)
return Image.open(buf)
if __name__ == "__main__":
unittest.main()
|
the-stack_106_20290
|
"""
AWS keys protection module
"""
import sys
import json
import hashlib
import copy
import ipaddress
import urllib.request
import boto3
import botocore.config
from .common import get_accessibility_data
DENY_NOT_IP_POLICY = {
"Sid": "DenyIpBased",
"Effect": "Deny",
"NotAction": "iam:PutUserPolicy",
"Resource": "*",
"Condition": {
"NotIpAddress": {
"aws:SourceIp": ""
}
}
}
DENY_NOT_UA_POLICY = {
"Sid": "DenyUABased",
"Effect": "Deny",
"Action": "iam:PutUserPolicy",
"Resource": "*",
"Condition": {
"StringNotEquals": {
"aws:UserAgent": ""
}
}
}
def protect_keys(options):
"""
Protect AWS access keys
by applying IAM policy with aws:SourceIp
condition
"""
data = get_accessibility_data(False)
accessible_profiles = [
profile for profile in data if data[profile].get('accessible', False)]
if options.target_ip == "":
response = urllib.request.urlopen("http://ipinfo.io").read()
ip_cidr = json.loads(response)['ip'] + "/32"
else:
try:
ip_cidr = str(ipaddress.ip_network(options.target_ip))
except ValueError as err:
sys.stderr.write(
f'Error parsing IP address {options.target_ip}:{str(err)}')
sys.exit(-1)
if options.target_profile != "" and options.target_profile not in accessible_profiles:
print(f'Profile {options.target_profile} not available or accessible')
return
if options.target_profile == "":
print(f'IP based protection ({ip_cidr}) will be applied to all \
of the following active profiles:\n')
print('\n'.join(accessible_profiles))
answer = input('\nProceed? (y/n)')
if not answer.lower().strip() == 'y':
print('Aborting...')
sys.exit(0)
for profile in accessible_profiles:
# if single profile targeted
if options.target_profile != "" and options.target_profile != profile:
continue
arn = data[profile]['identity']
if ':user' not in arn:
print(f'Not applying protection for non-user identity {arn}')
continue
arn_digest = hashlib.sha256(arn.encode('utf-8')).hexdigest()
policy = {
"Version": "2012-10-17",
"Statement": [copy.copy(DENY_NOT_IP_POLICY), copy.copy(DENY_NOT_UA_POLICY)]
}
policy['Statement'][0]['Condition']['NotIpAddress']['aws:SourceIp'] = ip_cidr
if not options.enable_backdoor:
del policy['Statement'][1]
del policy['Statement'][0]['NotAction']
policy['Statement'][0]['Action'] = '*'
else:
policy['Statement'][1]['Condition']['StringNotEquals']['aws:UserAgent'] = arn_digest
iam = boto3.Session(profile_name=profile).client('iam', config=botocore.config.Config(
user_agent=arn_digest
))
print(f'Processing profile {profile}: {arn}')
user = arn.split('/')[1]
print(f'🔒 Set IP based protection ({ip_cidr}) on user {user}')
if options.enable_backdoor:
print(
'Backdoor 🚪 access enabled, you can use this utility from differenty IP to protect again\n')
else:
print(
f'No backdoor 🚪 access. User policy will only accept API calls from {ip_cidr}\n')
iam.put_user_policy(
UserName=user,
PolicyName='IpBasedProtection',
PolicyDocument=json.dumps(policy)
)
|
the-stack_106_20291
|
# Copyright (C) 2016 Atsushi Togo
# All rights reserved.
#
# This file is part of phonopy.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
def fix_deprecated_option_names(argv):
deprecated = []
for i, v in enumerate(argv[1:]):
if v[0] == '-':
tag = v.split('=')[0]
if '_' in tag:
correct_tag = tag.replace('_', '-')
deprecated.append(tag)
argv[i + 1] = v.replace(tag, correct_tag)
return deprecated
def show_deprecated_option_warnings(deprecated):
lines = ["Option names with underscores are deprecated, by which",
"the underscores are replaced by dashes. Therefore"]
for tag in deprecated:
lines.append("'%s' has to be written as '%s'." %
(tag, tag.replace('_', '-')))
maxlen = max([len(line) for line in lines])
print("*" * maxlen)
print('\n'.join(lines))
print("*" * maxlen)
print("")
def get_parser():
deprecated = fix_deprecated_option_names(sys.argv)
import argparse
parser = argparse.ArgumentParser(
description="Phonopy command-line-tool")
parser.set_defaults(
abinit_mode=False,
anime=None,
band_format=None,
band_indices=None,
band_labels=None,
band_paths=None,
band_points=None,
cell_filename=None,
crystal_mode=False,
cutoff_frequency=None,
cutoff_radius=None,
displacement_distance=None,
dynamical_matrix_decimals=None,
elk_mode=False,
siesta_mode=False,
cp2k_mode=False,
dftbp_mode=False,
fc_calculator_options=None,
fc_symmetry=None,
fc_format=None,
fc_spg_symmetry=False,
fits_debye_model=False,
force_constants_decimals=None,
force_constants=None,
force_sets=None,
force_sets_zero=None,
fmax=None,
fmin=None,
frequency_conversion_factor=None,
fpitch=None,
frequency_scale_factor=None,
gv_delta_q=None,
hdf5_compression="gzip",
is_band_connection=False,
is_band_const_interval=False,
is_check_symmetry=False,
is_displacement=False,
is_dos_mode=False,
is_eigenvectors=False,
is_full_fc=False,
is_gamma_center=False,
is_graph_plot=False,
is_graph_save=False,
is_group_velocity=False,
is_hdf5=False,
is_legend=False,
is_legacy_plot=False,
is_little_cogroup=False,
is_moment=False,
is_nac=False,
is_nodiag=False,
is_nomeshsym=False,
is_nosym=False,
is_plusminus_displacements=False,
is_tetrahedron_method=True,
is_thermal_displacements=False,
is_thermal_displacement_matrices=False,
is_thermal_displacement_matrices_cif=None,
is_thermal_properties=False,
is_projected_thermal_properties=False,
is_trigonal_displacements=False,
is_wien2k_p1=False,
irreps_qpoint=None,
lapack_solver=False,
loglevel=None,
masses=None,
magmom=None,
mesh_format=None,
mesh_numbers=None,
modulation=None,
moment_order=None,
nac_method=None,
nac_q_direction=None,
pdos=None,
pretend_real=False,
primitive_axes=None,
projection_direction=None,
qe_mode=False,
qpoints=None,
qpoints_format=None,
quiet=False,
random_displacemen=None,
random_seed=None,
read_fc_format=None,
read_force_constants=False,
read_qpoints=False,
show_irreps=False,
sigma=None,
supercell_dimension=None,
symmetry_tolerance=None,
temperature=None,
tmax=None,
tmin=None,
tstep=None,
turbomooe_mode=False,
use_alm=False,
use_hiphive=False,
vasp_mode=False,
verbose=False,
wien2k_mode=False,
write_dynamical_matrices=False,
write_fc_format=None,
write_force_constants=False,
write_mesh=True)
parser.add_argument(
"--abinit", dest="abinit_mode", action="store_true",
help="Invoke Abinit mode")
parser.add_argument(
"--alm", dest="use_alm", action="store_true",
help="Use ALM for generating force constants")
parser.add_argument(
"--amplitude", dest="displacement_distance", type=float,
help="Distance of displacements")
parser.add_argument(
"--anime", nargs='+', dest="anime",
help="Same as ANIME tag")
parser.add_argument(
"--band", nargs='+', dest="band_paths",
help="Same behavior as BAND tag")
parser.add_argument(
"--band-connection", dest="is_band_connection", action="store_true",
help="Treat band crossings")
parser.add_argument(
"--band-const-interval", dest="is_band_const_interval",
action="store_true",
help="Band paths are sampled with similar interval.")
parser.add_argument(
"--band-labels", nargs='+', dest="band_labels",
help="Show labels at band segments")
parser.add_argument(
"--band-format", dest="band_format",
help="Band structure output file-format")
parser.add_argument(
"--band-points", dest="band_points", type=int,
help=("Number of points calculated on a band segment in "
"the band structure mode"))
parser.add_argument(
"--bi", "--band-indices", nargs='+', dest="band_indices",
help=("Band indices to be included to calcualte thermal "
"properties"))
parser.add_argument(
"-c", "--cell", dest="cell_filename", metavar="FILE",
help="Read unit cell")
parser.add_argument(
"--cp2k", dest="cp2k_mode", action="store_true",
help="Invoke CP2K mode")
parser.add_argument(
"--crystal", dest="crystal_mode", action="store_true",
help="Invoke CRYSTAL mode")
parser.add_argument(
"--cutoff-freq", "--cutoff-frequency", dest="cutoff_frequency",
type=float,
help=("Thermal properties are not calculated below this "
"cutoff frequency."))
parser.add_argument(
"--cutoff-radius", dest="cutoff_radius", type=float,
help="Out of cutoff radius, force constants are set zero.")
parser.add_argument(
"-d", "--displacement", dest="is_displacement", action="store_true",
help="Create supercells with displacements")
parser.add_argument(
"--dftb+", dest="dftbp_mode", action="store_true",
help="Invoke dftb+ mode")
parser.add_argument(
"--dim", nargs='+', dest="supercell_dimension",
help="Same behavior as DIM tag")
parser.add_argument(
"--dm-decimals", dest="dynamical_matrix_decimals",
type=int, help="Decimals of values of decimals")
parser.add_argument(
"--dos", dest="is_dos_mode", action="store_true",
help="Calculate (P)DOS")
parser.add_argument(
"--eigvecs", "--eigenvectors", dest="is_eigenvectors",
action="store_true",
help="Output eigenvectors")
parser.add_argument(
"--elk", dest="elk_mode", action="store_true",
help="Invoke elk mode")
parser.add_argument(
"-f", "--force-sets", nargs='+', dest="force_sets",
help="Create FORCE_SETS")
parser.add_argument(
"--factor", dest="frequency_conversion_factor", type=float,
help="Frequency unit conversion factor")
parser.add_argument(
"--fc", "--force-constants", nargs=1, dest="force_constants",
help=("Create FORCE_CONSTANTS from vaspurn.xml. "
"vasprun.xml has to be passed as argument."))
parser.add_argument(
"--fc-calc-opt", "--fc-calculator-options",
dest="fc_calculator_options",
help=("Options for force constants calculator as comma separated "
"string with the style of key = values"))
parser.add_argument(
"--fc-decimals", dest="force_constants_decimals", type=int,
help="Decimals of values of force constants")
parser.add_argument(
"--fc-format", dest="fc_format",
help="Force constants input/output file-format")
parser.add_argument(
"--fc-spg-symmetry", dest="fc_spg_symmetry", action="store_true",
help="Enforce space group symmetry to force constants")
parser.add_argument(
"--fc-symmetry", dest="fc_symmetry", action="store_true",
help="Symmetrize force constants")
parser.add_argument(
"--fits-debye-model", dest="fits_debye_model", action="store_true",
help="Fits total DOS to a Debye model")
parser.add_argument(
"--freq-scale", dest="frequency_scale_factor", type=float,
help=("Squared scale factor multiplied as fc2 * factor^2. Therefore "
"frequency is changed but the contribution from NAC is not "
"changed."))
parser.add_argument(
"--full-fc", dest="is_full_fc", action="store_true",
help="Calculate full supercell force constants matrix")
parser.add_argument(
"--fz", "--force-sets-zero", nargs='+', dest="force_sets_zero",
help=("Create FORCE_SETS. disp.yaml in the current directory and "
"vapsrun.xml's for VASP or case.scf(m) for Wien2k as arguments "
"are required. The first argument is that of the perfect "
"supercell to subtract residual forces"))
parser.add_argument(
"--fmax", dest="fmax", type=float,
help="Maximum frequency used for DOS or moment calculation")
parser.add_argument(
"--fmin", dest="fmin", type=float,
help="Minimum frequency used for DOS or moment calculation")
parser.add_argument(
"--fpitch", dest="fpitch", type=float,
help="Frequency pitch used for DOS or moment calculation")
parser.add_argument(
"--gc", "--gamma-center", dest="is_gamma_center", action="store_true",
help="Set mesh as Gamma center")
parser.add_argument(
"--gv", "--group-velocity", dest="is_group_velocity",
action="store_true",
help="Calculate group velocities at q-points")
parser.add_argument(
"--gv-delta-q", dest="gv_delta_q", type=float,
help="Delta-q distance used for group velocity calculation")
parser.add_argument(
"--hdf5", dest="is_hdf5", action="store_true",
help="Use hdf5 for force constants")
parser.add_argument(
"--hdf5-compression", dest="hdf5_compression",
help="hdf5 compression filter")
parser.add_argument(
"--hiphive", dest="use_hiphive", action="store_true",
help="Use hiPhive for generating force constants")
parser.add_argument(
"--irreps", "--irreps-qpoint", nargs='+', dest="irreps_qpoint",
help="A q-point where characters of irreps are calculated")
# parser.add_argument(
# "--lapack-solver", dest="lapack_solver", action="store_true",
# help=("Use Lapack via Lapacke for solving phonons. This "
# "option can be used only when phonopy is compiled "
# "specially."))
parser.add_argument(
"--legend", dest="is_legend", action="store_true",
help="Legend of plots is shown in thermal displacements")
parser.add_argument(
"--legacy-plot", dest="is_legacy_plot", action="store_true",
help="Legacy style band structure pl")
parser.add_argument(
"--lcg", "--little-cogroup", dest="is_little_cogroup",
action="store_true",
help=("Show irreps of little co-group (or point-group of "
"wave vector q) instead of little group"))
parser.add_argument(
"--loglevel", dest="loglevel", type=int,
help="Log level")
parser.add_argument(
"--mass", nargs='+', dest="masses",
help="Same as MASS tag")
parser.add_argument(
"--magmom", nargs='+', dest="magmoms",
help="Same as MAGMOM tag")
parser.add_argument(
"--mesh-format", dest="mesh_format",
help="Mesh output file-format")
parser.add_argument(
"--modulation", nargs='+', dest="modulation",
help="Same as MODULATION tag")
parser.add_argument(
"--mp", "--mesh", nargs='+', dest="mesh_numbers",
help="Same behavior as MP tag")
parser.add_argument(
"--moment", dest="is_moment", action="store_true",
help="Calculate moment of phonon states distribution")
parser.add_argument(
"--moment-order", dest="moment_order",
type=int, help="Order of moment of phonon states distribution")
parser.add_argument(
"--nac", dest="is_nac", action="store_true",
help="Non-analytical term correction")
parser.add_argument(
"--nac-method", dest="nac_method",
help="Non-analytical term correction method: Gonze (default) or Wang")
parser.add_argument(
"--nodiag", dest="is_nodiag", action="store_true",
help="Set displacements parallel to axes")
parser.add_argument(
"--nomeshsym", dest="is_nomeshsym", action="store_true",
help="Symmetry is not imposed for mesh sampling.")
parser.add_argument(
"--nosym", dest="is_nosym", action="store_true",
help="Symmetry is not imposed.")
parser.add_argument(
"--nothm", "--no-tetrahedron-method", dest="is_tetrahedron_method",
action="store_false",
help="Do not use tetrahedron method for DOS/PDOS")
parser.add_argument(
"--nowritemesh", dest="write_mesh", action="store_false",
help="Do not write mesh.yaml or mesh.hdf5")
parser.add_argument(
"-p", "--plot", dest="is_graph_plot", action="store_true",
help="Plot data")
parser.add_argument(
"--pa", "--primitive-axis", "--primitive-axes",
nargs='+', dest="primitive_axes",
help="Same as PRIMITIVE_AXES tag")
parser.add_argument(
"--pd", "--projection-direction", nargs='+',
dest="projection_direction",
help="Same as PROJECTION_DIRECTION tag")
parser.add_argument(
"--pdos", nargs='+', dest="pdos",
help="Same as PDOS tag")
parser.add_argument(
"--pm", dest="is_plusminus_displacements", action="store_true",
help="Set plus minus displacements")
parser.add_argument(
"--pr", "--pretend-real", dest="pretend_real", action="store_true",
help=("Use imaginary frequency as real for thermal property "
"calculation. For a testing purpose only, when a small "
"amount of imaginary branches obtained."))
parser.add_argument(
"--pt", "--projected-thermal-property",
dest="is_projected_thermal_properties", action="store_true",
help="Output projected thermal properties")
parser.add_argument(
"--qe", "--pwscf", dest="qe_mode",
action="store_true", help="Invoke Quantum espresso (QE) mode")
parser.add_argument(
"--qpoints", nargs='+', dest="qpoints",
help="Calculate at specified q-points")
parser.add_argument(
"--qpoints-format", dest="qpoints_format",
help="Q-points output file-format")
parser.add_argument(
"--q-direction", nargs='+', dest="nac_q_direction",
help=("Direction of q-vector perturbation used for NAC at "
"q->0, and group velocity for degenerate phonon "
"mode in q-points mode"))
parser.add_argument(
"-q", "--quiet", dest="quiet", action="store_true",
help="Print out smallest information")
parser.add_argument(
"--random-seed", dest="random_seed",
type=int, help="Random seed by a 32 bit unsigned integer")
parser.add_argument(
"--rd", "--random-displacements", dest="random_displacements",
type=int, help="Number of supercells with random displacements")
parser.add_argument(
"--readfc", dest="read_force_constants", action="store_true",
help="Read FORCE_CONSTANTS")
parser.add_argument(
"--readfc-format", dest="readfc_format",
help="Force constants input file-format")
parser.add_argument(
"--read-qpoints", dest="read_qpoints", action="store_true",
help="Read QPOITNS")
parser.add_argument(
"-s", "--save", dest="is_graph_save", action="store_true",
help="Save plot data in pdf")
parser.add_argument(
"--show-irreps", dest="show_irreps", action="store_true",
help="Show IR-Reps along with characters")
parser.add_argument(
"--siesta", dest="siesta_mode", action="store_true",
help="Invoke Siesta mode")
parser.add_argument(
"--sigma", dest="sigma",
help="Smearing width for DOS")
parser.add_argument(
"--symmetry", dest="is_check_symmetry", action="store_true",
help="Check crystal symmetry")
parser.add_argument(
"-t", "--thermal-property", dest="is_thermal_properties",
action="store_true",
help="Output thermal properties")
parser.add_argument(
"--td", "--thermal-displacements", dest="is_thermal_displacements",
action="store_true",
help="Output thermal displacements")
parser.add_argument(
"--tdm", "--thermal-displacement-matrix",
dest="is_thermal_displacement_matrices", action="store_true",
help="Output thermal displacement matrices")
parser.add_argument(
"--tdm-cif", "--thermal-displacement-matrix-cif",
metavar='TEMPERATURE',
dest="thermal_displacement_matrices_cif", type=float,
help="Write cif with aniso_U for which temperature is specified")
parser.add_argument(
"--temperature", dest="temperature", type=float,
metavar='TEMPERATURE', help="A temperature point")
parser.add_argument(
"--tmax", dest="tmax", type=float,
help="Maximum calculated temperature")
parser.add_argument(
"--tmin", dest="tmin", type=float,
help="Minimum calculated temperature")
parser.add_argument(
"--tolerance", dest="symmetry_tolerance", type=float,
help="Symmetry tolerance to search")
parser.add_argument(
"--trigonal", dest="is_trigonal_displacements", action="store_true",
help="Set displacements of all trigonal axes ")
parser.add_argument(
"--tstep", dest="tstep", type=float,
help="Calculated temperature step")
parser.add_argument(
"--turbomole", dest="turbomole_mode", action="store_true",
help="Invoke TURBOMOLE mode")
parser.add_argument(
"-v", "--verbose", dest="verbose", action="store_true",
help="Detailed information is shown.")
parser.add_argument(
"--vasp", dest="vasp_mode", action="store_true",
help="Invoke Vasp mode")
parser.add_argument(
"--wien2k", dest="wien2k_mode", action="store_true",
help="Invoke Wien2k mode")
parser.add_argument(
"--wien2k_p1", dest="is_wien2k_p1", action="store_true",
help="Assume Wien2k structs with displacements are P1")
parser.add_argument(
"--writefc", dest="write_force_constants", action="store_true",
help="Write FORCE_CONSTANTS")
parser.add_argument(
"--writefc-format", dest="writefc_format",
help="Force constants output file-format")
parser.add_argument(
"--writedm", dest="write_dynamical_matrices", action="store_true",
help=("Write dynamical matrices. This has to be used "
"with QPOINTS setting (or --qpoints)"))
parser.add_argument(
"--xyz-projection", dest="xyz_projection", action="store_true",
help="Project PDOS x, y, z directions in Cartesian coordinates")
parser.add_argument(
"conf_file", nargs='*',
help="Phonopy configure file")
return parser, deprecated
|
the-stack_106_20293
|
from math import sqrt, acos
def dist(v1, v2):
return sqrt((v1[0]-v2[0])**2 + (v1[1]-v2[1])**2)
def dot(v1, v2):
return v1[0]*v2[0] + v1[1]*v2[1]
def cross(v1, v2, v3):
return (v2[0]-v1[0])*(v3[1]-v1[1]) - (v2[1]-v1[1])*(v3[0]-v1[0])
def norm(v1):
return sqrt(v1[0]*v1[0] + v1[1]*v1[1])
def angle(v1, v2):
return acos(dot(v1,v2)/(norm(v1)*norm(v2)))
def sort_points_by_y(vect):
return sorted(vect, key = lambda x: (x[1], x[0]))
def sort_points_by_angle(vect):
l = len(vect)
angles = list(map(angle, [(1,0) for _ in range(l)], vect))
for k in range(l-1):
for w in range(k+1,l):
if angles[k] > angles[w]:
vect[k], vect[w] = vect[w], vect[k]
angles[k], angles[w] = angles[w], angles[k]
return vect, angles
def remove_collinear(p0, vect, angles):
l = len(vect)
to_remove_vect = []
to_remove_angle = []
for k in range(l-1):
for w in range(k+1,l):
if angles[k] == angles[w]:
if dist(p0,vect[k]) < dist(p0,vect[w]):
to_remove_vect.append(vect[k])
to_remove_angle.append(angles[k])
else:
to_remove_vect.append(vect[w])
to_remove_angle.append(angles[w])
for v,a in zip(to_remove_vect, to_remove_angle):
vect.remove(v)
angles.remove(a)
return vect, angles, to_remove_vect, to_remove_angle
def graham_scan(p0, vect):
if len(vect) < 2:
return "Convex hull is empty"
stack = [p0, vect[0], vect[1]]
stack_size = 3
if len(vect) == 2:
return stack
l = len(vect)
for k in range(2, l):
while(True):
print(stack)
d = cross(stack[stack_size - 2], stack[stack_size - 1], vect[k])
print(d)
if d < 0: # left turn
break
else: # non left turn
stack.pop()
stack_size -= 1
stack.append(vect[k])
stack_size += 1
return stack
p1 = (1,1)
p2 = (5,3)
p3 = (7,6)
p4 = (3,5)
a1 = (4,4)
a2 = (6,4)
# Pipeline
# 1 - sort_points_by_y
# 2 - sort_points_by_angle
# 3 - remove_collinear
|
the-stack_106_20294
|
#!/usr/bin/env python
#
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Updates the Chrome reference builds.
Before running this script, you should first verify that you are authenticated
for SVN. You can do this by running:
$ svn ls svn://svn.chromium.org/chrome/trunk/deps/reference_builds
You may need to get your SVN password from https://chromium-access.appspot.com/.
Usage:
$ cd /tmp
$ /path/to/update_reference_build.py
$ cd reference_builds/reference_builds
$ gcl change
$ gcl upload <change>
$ gcl commit <change>
"""
import collections
import logging
import os
import shutil
import subprocess
import sys
import urllib2
import zipfile
# Google storage location (no public web URL's), example:
# gs://chrome-unsigned/desktop-*/30.0.1595.0/precise32/chrome-precise32.zip
CHROME_GS_URL_FMT = ('gs://chrome-unsigned/desktop-*/%s/%s/%s')
def _ReportValueError(error_string):
#TODO(aiolos): alert sheriffs via email when an error is seen.
#This should be added when alerts are added when updating the build.
raise ValueError(error_string)
class BuildUpdater(object):
# Remove a platform name from this list to disable updating it.
_REF_BUILD_PLATFORMS = ['Mac64', 'Win', 'Linux', 'Linux_x64']
# Omaha is Chrome's autoupdate server. It reports the current versions used
# by each platform on each channel.
_OMAHA_PLATFORMS = ['mac', 'linux', 'win']
# All of the information we need to update each platform.
# omaha: name omaha uses for the plaftorms.
# zip_name: name of the zip file to be retrieved from cloud storage.
# gs_build: name of the Chrome build platform used in cloud storage.
# destination: Name of the folder to download the reference build to.
UpdateInfo = collections.namedtuple('UpdateInfo',
'omaha, gs_build, zip_name, destination')
_PLATFORM_MAP = { 'Mac64': UpdateInfo(omaha='mac',
gs_build='mac64',
zip_name='chrome-mac.zip',
destination='chrome_mac'),
'Win': UpdateInfo(omaha='win',
gs_build='win',
zip_name='chrome-win.zip',
destination='chrome_win'),
'Linux': UpdateInfo(omaha='linux',
gs_build='precise32',
zip_name='chrome-precise32.zip',
destination='chrome_linux'),
'Linux_x64': UpdateInfo(omaha='linux',
gs_build='precise64',
zip_name='chrome-precise64.zip',
destination='chrome_linux64')}
def __init__(self):
stable_versions = self._StableVersionsMap()
current_versions = self._CurrentRefBuildsMap()
self._platform_to_version_map = {}
for platform in stable_versions:
if (platform not in current_versions or
stable_versions[platform] != current_versions[platform]):
self._platform_to_version_map[platform] = stable_versions[platform]
@classmethod
def _StableVersionsMap(cls):
omaha_versions_map = cls._OmahaVersionsMap()
versions_map = {}
for platform in cls._REF_BUILD_PLATFORMS:
omaha_platform = cls._PLATFORM_MAP[platform].omaha
if omaha_platform in omaha_versions_map:
versions_map[platform] = omaha_versions_map[omaha_platform]
return versions_map
@classmethod
def _OmahaReport(cls):
url ='https://omahaproxy.appspot.com/all?channel=stable'
lines = urllib2.urlopen(url).readlines()
return [l.split(',') for l in lines]
@classmethod
def _OmahaVersionsMap(cls):
platforms = cls._OMAHA_PLATFORMS
rows = cls._OmahaReport()
if (len(rows) < 1 or
not rows[0][0:3] == ['os', 'channel', 'current_version']):
_ReportValueError('Omaha report is not in the expected form: %s.'
% rows)
versions_map = {}
for row in rows[1:]:
if row[1] != 'stable':
_ReportValueError('Omaha report contains a line with the channel %s'
% row[1])
if row[0] in platforms:
versions_map[row[0]] = row[2]
if not all(platform in versions_map for platform in platforms):
_ReportValueError('Omaha report did not contain all desired platforms')
return versions_map
@classmethod
def _CurrentRefBuildsMap(cls):
#TODO(aiolos): Add logic for pulling the current reference build versions.
# Return an empty dictionary to force an update until we store the builds in
# in cloud storage.
return {}
@staticmethod
def _GetCmdStatusAndOutput(args, cwd=None, shell=False):
"""Executes a subprocess and returns its exit code and output.
Args:
args: A string or a sequence of program arguments.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
shell: Whether to execute args as a shell command.
Returns:
The tuple (exit code, output).
"""
logging.info(str(args) + ' ' + (cwd or ''))
p = subprocess.Popen(args=args, cwd=cwd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=shell)
stdout, stderr = p.communicate()
exit_code = p.returncode
if stderr:
logging.critical(stderr)
logging.info(stdout)
return (exit_code, stdout)
def _GetBuildUrl(self, platform, version, filename):
"""Returns the URL for fetching one file.
Args:
platform: Platform name, must be a key in |self._PLATFORM_MAP|.
version: A Chrome version number, e.g. 30.0.1600.1.
filename: Name of the file to fetch.
Returns:
The URL for fetching a file. This may be a GS or HTTP URL.
"""
return CHROME_GS_URL_FMT % (
version, self._PLATFORM_MAP[platform].gs_build, filename)
def _FindBuildVersion(self, platform, version, filename):
"""Searches for a version where a filename can be found.
Args:
platform: Platform name.
version: A Chrome version number, e.g. 30.0.1600.1.
filename: Filename to look for.
Returns:
A version where the file could be found, or None.
"""
# TODO(shadi): Iterate over official versions to find a valid one.
return (version
if self._DoesBuildExist(platform, version, filename) else None)
def _DoesBuildExist(self, platform, version, filename):
"""Checks whether a file can be found for the given Chrome version.
Args:
platform: Platform name.
version: Chrome version number, e.g. 30.0.1600.1.
filename: Filename to look for.
Returns:
True if the file could be found, False otherwise.
"""
url = self._GetBuildUrl(platform, version, filename)
return self._DoesGSFileExist(url)
def _DoesGSFileExist(self, gs_file_name):
"""Returns True if the GS file can be found, False otherwise."""
exit_code = BuildUpdater._GetCmdStatusAndOutput(
['gsutil', 'ls', gs_file_name])[0]
return not exit_code
def _GetPlatformFiles(self, platform):
"""Returns the name of the zip file to fetch for |platform|."""
return BuildUpdater._PLATFORM_MAP[platform].zip_name
def _DownloadBuilds(self):
for platform in self._platform_to_version_map:
version = self._platform_to_version_map[platform]
filename = self._GetPlatformFiles(platform)
output = os.path.join('dl', platform,
'%s_%s_%s' % (platform,
version,
filename))
if os.path.exists(output):
logging.info('%s alread exists, skipping download', output)
continue
build_version = self._FindBuildVersion(platform, version, filename)
if not build_version:
logging.critical('Failed to find %s build for r%s\n', platform,
version)
sys.exit(1)
dirname = os.path.dirname(output)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
url = self._GetBuildUrl(platform, build_version, filename)
self._DownloadFile(url, output)
def _DownloadFile(self, url, output):
logging.info('Downloading %s, saving to %s', url, output)
BuildUpdater._GetCmdStatusAndOutput(['gsutil', 'cp', url, output])
def _FetchSvnRepos(self):
if not os.path.exists('reference_builds'):
os.makedirs('reference_builds')
BuildUpdater._GetCmdStatusAndOutput(
['gclient', 'config',
'svn://svn.chromium.org/chrome/trunk/deps/reference_builds'],
'reference_builds')
BuildUpdater._GetCmdStatusAndOutput(
['gclient', 'sync'], 'reference_builds')
def _UnzipFile(self, dl_file, dest_dir):
"""Unzips a file if it is a zip file.
Args:
dl_file: The downloaded file to unzip.
dest_dir: The destination directory to unzip to.
Returns:
True if the file was unzipped. False if it wasn't a zip file.
"""
if not zipfile.is_zipfile(dl_file):
return False
logging.info('Opening %s', dl_file)
with zipfile.ZipFile(dl_file, 'r') as z:
for content in z.namelist():
dest = os.path.join(dest_dir, content[content.find('/')+1:])
# Create dest parent dir if it does not exist.
if not os.path.isdir(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
# If dest is just a dir listing, do nothing.
if not os.path.basename(dest):
continue
if not os.path.isdir(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
with z.open(content) as unzipped_content:
logging.info('Extracting %s to %s (%s)', content, dest, dl_file)
with file(dest, 'wb') as dest_file:
dest_file.write(unzipped_content.read())
permissions = z.getinfo(content).external_attr >> 16
if permissions:
os.chmod(dest, permissions)
return True
def _ClearDir(self, directory):
"""Clears all files in |directory| except for hidden files and folders."""
for root, dirs, files in os.walk(directory):
# Skip hidden files and folders (like .svn and .git).
files = [f for f in files if f[0] != '.']
dirs[:] = [d for d in dirs if d[0] != '.']
for f in files:
os.remove(os.path.join(root, f))
def _ExtractBuilds(self):
for platform in self._platform_to_version_map:
if os.path.exists('tmp_unzip'):
os.path.unlink('tmp_unzip')
dest_dir = os.path.join(
'reference_builds', 'reference_builds',
BuildUpdater._PLATFORM_MAP[platform].destination)
self._ClearDir(dest_dir)
for root, _, dl_files in os.walk(os.path.join('dl', platform)):
for dl_file in dl_files:
dl_file = os.path.join(root, dl_file)
if not self._UnzipFile(dl_file, dest_dir):
logging.info('Copying %s to %s', dl_file, dest_dir)
shutil.copy(dl_file, dest_dir)
def _SvnAddAndRemove(self):
svn_dir = os.path.join('reference_builds', 'reference_builds')
# List all changes without ignoring any files.
stat = BuildUpdater._GetCmdStatusAndOutput(['svn', 'stat', '--no-ignore'],
svn_dir)[1]
for line in stat.splitlines():
action, filename = line.split(None, 1)
# Add new and ignored files.
if action == '?' or action == 'I':
BuildUpdater._GetCmdStatusAndOutput(
['svn', 'add', filename], svn_dir)
elif action == '!':
BuildUpdater._GetCmdStatusAndOutput(
['svn', 'delete', filename], svn_dir)
filepath = os.path.join(svn_dir, filename)
if not os.path.isdir(filepath) and os.access(filepath, os.X_OK):
BuildUpdater._GetCmdStatusAndOutput(
['svn', 'propset', 'svn:executable', 'true', filename], svn_dir)
def DownloadAndUpdateBuilds(self):
self._DownloadBuilds()
self._FetchSvnRepos()
self._ExtractBuilds()
self._SvnAddAndRemove()
def main():
logging.getLogger().setLevel(logging.DEBUG)
#TODO(aiolos): check that there are no options passed (argparse).
b = BuildUpdater()
b.DownloadAndUpdateBuilds()
logging.info('Successfully updated reference builds. Move to '
'reference_builds/reference_builds and make a change with gcl.')
if __name__ == '__main__':
main()
|
the-stack_106_20296
|
# -*- coding: utf-8 -*-
from django.core.cache import cache
from django.db.models.signals import pre_delete, m2m_changed, pre_save
from django.dispatch import receiver
from publication_backbone.models_bases.polymorphic_mptt.signals import (
move_to_done,
pre_save_polymorphic_mptt,
post_save_polymorphic_mptt,
)
from publication_backbone.utils.dispatch import make_dispatch_uid
from publication_backbone.models import BaseCategory, Category, CategoryLink
from publication_backbone.utils.contrib import get_unique_slug
def validate_category_path(sender, instance, raw, using, update_fields, **kwargs):
"""
Check for duplicate category path before save category
"""
if (not raw) and (sender._default_manager.exclude(pk=instance.pk).filter(path=instance.path).exists()) :
ancestors = instance.get_ancestors_list()
instance.slug = get_unique_slug(instance.slug, instance.id)
instance.make_path(ancestors + [instance,])
# Connect validate_category_path to all categories class
for clazz in BaseCategory.__subclasses__():
pre_save.connect(validate_category_path, clazz, dispatch_uid=make_dispatch_uid(pre_save, validate_category_path, clazz))
#==============================================================================
# Category db event handlers
#==============================================================================
@receiver(m2m_changed, sender=Category.rubrics.through,
dispatch_uid=make_dispatch_uid(m2m_changed, 'invalidate_after_rubrics_set_changed', Category.rubrics.through))
def invalidate_after_rubrics_set_changed(sender, instance, action, reverse, model, pk_set, **kwargs):
"""
Automatically normalize rubrics set
"""
if action == 'post_add':
if not hasattr(instance, '_during_rubrics_validation'):
# normalize rubrics set
instance.validate_rubrics(pk_set)
# clear cache
keys = [instance.CATEGORY_ACTIVE_RUBRIC_COUNT_CACHE_KEY,
instance.CATEGORY_ACTIVE_RUBRIC_IDS_CACHE_KEY]
cache.delete_many(keys)
def invalidate_ctgr_vsbl_chldrn_cache_before_save(sender, instance, **kwargs):
if not instance.id is None:
try:
original = sender._default_manager.get(pk=instance.id)
if original.parent_id != instance.parent_id and not original.parent_id is None:
key = sender.CATEGORY_VISIBLE_CHILDREN_CACHE_KEY_PATTERN % {'id': original.parent_id}
cache.delete(key)
except sender.DoesNotExist:
pass
def invalidate_ctgr_vsbl_chldrn_cache_after_save(sender, instance, **kwargs):
if not instance.id is None:
keys = [sender.CATEGORY_VISIBLE_CHILDREN_CACHE_KEY_PATTERN % {'id': instance.id}, ]
if not instance.parent_id is None:
keys.append(sender.CATEGORY_VISIBLE_CHILDREN_CACHE_KEY_PATTERN % {'id': instance.parent_id})
cache.delete_many(keys)
def invalidate_ctgr_vsbl_chldrn_cache_after_move(sender, instance, target, position, prev_parent, **kwargs):
if not prev_parent is None:
key = sender.CATEGORY_VISIBLE_CHILDREN_CACHE_KEY_PATTERN % {'id': prev_parent.id}
cache.delete(key)
invalidate_ctgr_vsbl_chldrn_cache_after_save(sender, instance, **kwargs)
# connect all subclasses of base content item too
for clazz in BaseCategory.__subclasses__():
pre_save_polymorphic_mptt.connect(invalidate_ctgr_vsbl_chldrn_cache_before_save, clazz,
dispatch_uid=make_dispatch_uid(pre_save_polymorphic_mptt,
invalidate_ctgr_vsbl_chldrn_cache_before_save,
clazz))
post_save_polymorphic_mptt.connect(invalidate_ctgr_vsbl_chldrn_cache_after_save, clazz,
dispatch_uid=make_dispatch_uid(post_save_polymorphic_mptt,
invalidate_ctgr_vsbl_chldrn_cache_after_save,
clazz))
pre_delete.connect(invalidate_ctgr_vsbl_chldrn_cache_after_save, clazz,
dispatch_uid=make_dispatch_uid(pre_delete,
invalidate_ctgr_vsbl_chldrn_cache_after_save,
clazz))
move_to_done.connect(invalidate_ctgr_vsbl_chldrn_cache_after_move, clazz,
dispatch_uid=make_dispatch_uid(move_to_done,
invalidate_ctgr_vsbl_chldrn_cache_after_move,
clazz))
|
the-stack_106_20297
|
from django.test import TestCase
from grid.templatetags.grid_tags import style_element, YES_IMG, NO_IMG, \
YES_KEYWORDS, NO_KEYWORDS
class GridTest(TestCase):
def test_01_style_element_filter(self):
tests = [
('+', 1, 0, ''),
('++', 2, 0, ''),
('+++', 3, 0, ''),
('+1', 1, 0, ''),
('+2', 2, 0, ''),
('+3', 3, 0, ''),
('+4', 3, 0, ''),
('+42', 3, 0, ''),
('-', 0, 1, ''),
('--', 0, 2, ''),
('---', 0, 3, ''),
('-1', 0, 1, ''),
('-2', 0, 2, ''),
('-3', 0, 3, ''),
('-4', 0, 3, ''),
('-42', 0, 3, ''),
]
for positive in YES_KEYWORDS:
tests.append((positive, 1, 0, ''))
tests.append(('%stest' % positive, 1, 0, 'test'))
for negative in NO_KEYWORDS:
tests.append((negative, 0, 1, ''))
tests.append(('%stest' % negative, 0, 1, 'test'))
for text, yes, no, endswith in tests:
output = style_element(text)
got_yes = output.count(YES_IMG)
self.assertEqual(
got_yes,
yes,
"%s resulted in %s yes-gifs instead of %s." % (text, got_yes, yes)
)
got_no = output.count(NO_IMG)
self.assertEqual(
got_no,
no,
"%s resulted in %s no-gifs instead of %s." % (text, got_no, no)
)
self.assertTrue(
output.endswith(endswith),
"Expected %s to end with %s, got %s instead." % (text, endswith, output)
)
|
the-stack_106_20298
|
from flask import Flask, request, abort
from flask.helpers import safe_join
from werkzeug.utils import append_slash_redirect
from lektor.db import Database
from lektor.builder import Builder
from lektor.buildfailures import FailureController
from lektor.admin.modules import register_modules
from lektor.reporter import CliReporter
class LektorInfo(object):
def __init__(self, env, output_path, ui_lang='en', extra_flags=None,
verbosity=0):
self.env = env
self.ui_lang = ui_lang
self.output_path = output_path
self.extra_flags = extra_flags
self.verbosity = verbosity
def get_pad(self):
return Database(self.env).new_pad()
def get_builder(self, pad=None):
if pad is None:
pad = self.get_pad()
return Builder(pad, self.output_path, extra_flags=self.extra_flags)
def get_failure_controller(self, pad=None):
if pad is None:
pad = self.get_pad()
return FailureController(pad, self.output_path)
def resolve_artifact(self, path, pad=None, redirect_slash=True):
"""Resolves an artifact and also triggers a build if necessary.
Returns a tuple in the form ``(artifact_name, filename)`` where
`artifact_name` can be `None` in case a file was targeted explicitly.
"""
if pad is None:
pad = self.get_pad()
artifact_name = filename = None
# We start with trying to resolve a source and then use the
# primary
source = pad.resolve_url_path(path)
if source is not None:
# If the request path does not end with a slash but we
# requested a URL that actually wants a trailing slash, we
# append it. This is consistent with what apache and nginx do
# and it ensures our relative urls work.
if not path.endswith('/') and \
source.url_path != '/' and \
source.url_path != path:
return abort(append_slash_redirect(request.environ))
with CliReporter(self.env, verbosity=self.verbosity):
builder = self.get_builder(pad)
prog, _ = builder.build(source)
artifact = prog.primary_artifact
if artifact is not None:
artifact_name = artifact.artifact_name
filename = artifact.dst_filename
if filename is None:
filename = safe_join(self.output_path, path.strip('/'))
return artifact_name, filename
class WebUI(Flask):
def __init__(self, env, debug=False, output_path=None, ui_lang='en',
verbosity=0, extra_flags=None):
Flask.__init__(self, 'lektor.admin', static_url_path='/admin/static')
self.lektor_info = LektorInfo(env, output_path, ui_lang,
extra_flags=extra_flags,
verbosity=verbosity)
self.debug = debug
self.config['PROPAGATE_EXCEPTIONS'] = True
register_modules(self)
WebAdmin = WebUI
|
the-stack_106_20299
|
"""
.. Deep Residual Learning for Image Recognition:
https://arxiv.org/abs/1512.03385
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride,
bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride,
bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512 * block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNet18():
return ResNet(BasicBlock, [2, 2, 2, 2])
def ResNet34():
return ResNet(BasicBlock, [3, 4, 6, 3])
def ResNet50():
return ResNet(Bottleneck, [3, 4, 6, 3])
def ResNet101():
return ResNet(Bottleneck, [3, 4, 23, 3])
def ResNet152():
return ResNet(Bottleneck, [3, 8, 36, 3])
def test():
net = ResNet18()
y = net(torch.randn(1, 3, 32, 32))
print(y.size())
# test()
|
the-stack_106_20300
|
import logging
import os
import sys
from urllib.request import urlopen
from xml.etree.ElementTree import fromstring
import pandas as pd
import requests
import xlrd
from django.conf import settings
from django.core.management.base import BaseCommand
from django.core.management.color import no_style
from django.db import IntegrityError, connection
from protein.models import (Gene, Protein, ProteinAlias, ProteinConformation,
ProteinFamily, ProteinArrestinPair, ProteinSegment,
ProteinSequenceType, ProteinSource, ProteinState, Species)
from residue.models import (Residue, ResidueGenericNumber,
ResidueGenericNumberEquivalent,
ResidueNumberingScheme)
from signprot.models import SignprotStructure
from structure.models import Structure
class Command(BaseCommand):
help = 'Build Arrestin proteins'
# source files
arrestin_data_file = os.sep.join([settings.DATA_DIR, 'arrestin_data', 'ortholog_alignment.xlsx'])
bouvier_file = os.sep.join([settings.DATA_DIR, 'g_protein_data', '201025_bouvier_gloriam.xlsx'])
local_uniprot_dir = os.sep.join([settings.DATA_DIR, 'protein_data', 'uniprot'])
remote_uniprot_dir = 'https://uniprot.org/uniprot/'
logger = logging.getLogger(__name__)
def add_arguments(self, parser):
parser.add_argument('--filename',
action='append',
dest='filename',
help='Filename to import. Can be used multiple times')
parser.add_argument('--coupling',
default=False,
action='store_true',
help='Purge and import GPCR-Arrestin coupling data')
def handle(self, *args, **options):
self.options = options
if options['filename']:
filenames = options['filename']
else:
filenames = False
if self.options['coupling']:
self.purge_coupling_data()
self.logger.info('PASS: purge_coupling_data')
if os.path.exists(self.bouvier_file):
self.add_bouvier_coupling_data()
self.logger.info('PASS: add_bouvier_coupling_data')
else:
self.logger.warning('Bouvier source data ' + self.bouvier_file + ' not found')
else:
try:
self.purge_can_residues()
self.logger.info('PASS: purge_can_residues')
self.purge_can_proteins()
self.logger.info('PASS: purge_can_proteins')
# add proteins
self.can_create_families()
self.logger.info('PASS: can_create_families')
self.can_add_proteins()
self.logger.info('PASS: can_add_proteins')
# add residues
self.add_can_residues()
self.logger.info('PASS: add_can_residues')
# add coupling data
self.purge_coupling_data()
self.logger.info('PASS: purge_coupling_data')
if os.path.exists(self.bouvier_file):
self.add_bouvier_coupling_data()
self.logger.info('PASS: add_bouvier_coupling_data')
else:
self.logger.warning('Bouvier source data ' + self.bouvier_file + ' not found')
except Exception as msg:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, exc_obj, fname, exc_tb.tb_lineno)
self.logger.error(msg)
def purge_can_residues(self):
"""Purge residues."""
try:
Residue.objects.filter(generic_number_id__scheme__slug="can").delete()
except Exception as msg:
self.logger.warning('Existing Residue data cannot be deleted', msg)
def purge_can_proteins(self):
"""Purge proteins."""
try:
Protein.objects.filter(residue_numbering_scheme__slug='can').delete()
except Exception as msg:
self.logger.warning('Protein to delete not found' + str(msg))
def purge_coupling_data(self):
"""DROP data from the protein_arrestin_pair table."""
try:
ProteinArrestinPair.objects.filter().delete()
sequence_sql = connection.ops.sequence_reset_sql(no_style(), [ProteinArrestinPair])
with connection.cursor() as cursor:
for sql in sequence_sql:
cursor.execute(sql)
except Exception as msg:
self.logger.warning('Existing protein_arrestin_pair data cannot be deleted' + str(msg))
@staticmethod
def read_coupling(filenames=False):
"""
Function to read Coupling data from Excel files.
The ideal would be for the excel organization to hopefully be fixed in the same way for data
coming from different groups. For now the data comes from Bouvier and has been processed by David E. Gloriam.
"""
book = xlrd.open_workbook(filenames)
sheet1 = book.sheet_by_name("plain")
rows = sheet1.nrows
beglogmaxec50 = 36
endlogmaxec50 = 38
begpec50 = 52
endpec50 = 54
begemax = 68
endemax = 70
data = {}
# data dictionary format:
# {'<protein>':
# {'<arrestinsubtype>':
# {'logmaxec50': <logmaxec50>,
# 'pec50deg': <pec50deg>}
# }
# }
def cleanValue(s):
"""
Function to return a 0.0 (a value which means no coupling) since returning
an NA string to the database field declared as a float won't work, also
because NULL might have a meaning. In Python to return NULL one uses None
:param s:
:return: float
"""
if s == '':
# return None
return float(0.0)
else:
# return float(str(s).strip())
return format(float(s), '.2f').strip()
# return str(s).strip()
for i in range(3, rows):
protein = sheet1.cell_value(i, 0)
protein_dict = {}
# logemaxec50
for j in range(beglogmaxec50, endlogmaxec50):
arrestinsubtype = sheet1.cell_value(2, j)
protein_dict[arrestinsubtype] = {}
protein_dict[arrestinsubtype]['logmaxec50'] = cleanValue(sheet1.cell_value(i, j))
# pec50 deg = david e gloriam
for j in range(begpec50, endpec50):
arrestinsubtype = sheet1.cell_value(2, j)
protein_dict[arrestinsubtype]['pec50deg'] = cleanValue(sheet1.cell_value(i, j))
# emax deg = david e gloriam
for j in range(begemax, endemax):
arrestinsubtype = sheet1.cell_value(2, j)
protein_dict[arrestinsubtype]['emaxdeg'] = cleanValue(sheet1.cell_value(i, j))
data[protein] = protein_dict
# pprint(protein_dict[arrestin_subtype])
# pprint(data)
return data
def add_bouvier_coupling_data(self):
"""
This function adds coupling data coming from Michel Bouvier processed by David Gloriam
@return:
p, source, values['logemaxec50'], values['pec50deg'], ..., ap
p = protein_name
source = One of GuideToPharma, Aska, Bouvier
values = selfdescriptive
ap = arrestin uniprot name, e.g.
"""
self.logger.info('BEGIN ADDING Bouvier-Gloriam coupling data')
# read source files
filepath = self.bouvier_file
self.logger.info('Reading file ' + filepath)
data = self.read_coupling(filepath)
# pprint(data['AVPR2'])
# pprint(data['AVP2R'])
# pprint(data['BDKRB1'])
source = 'Bouvier'
lookup = {}
for entry_name, couplings in data.items():
# if it has / then pick first, since it gets same protein
entry_name = entry_name.split("/")[0]
# append _human to entry name
# entry_name = "{}_HUMAN".format(entry_name).lower()
# Fetch protein
try:
p = Protein.objects.filter(genes__name=entry_name, species__common_name="Human")[0]
except Protein.DoesNotExist:
self.logger.warning('Protein not found for entry_name {}'.format(entry_name))
print("protein not found for ", entry_name)
continue
for arrestin, values in couplings.items():
if arrestin not in lookup:
ap = Protein.objects.filter(family__name=arrestin, species__common_name="Human")[0]
lookup[arrestin] = ap
else:
ap = lookup[arrestin]
# print(p, source, ap)
# print(p, source, values['logmaxec50'], values['pec50deg'], values['emaxdeg'], ap)
apair = ProteinArrestinPair(protein=p,
source=source,
logmaxec50_deg=values['logmaxec50'],
pec50_deg=values['pec50deg'],
emax_deg=values['emaxdeg'],
arrestin_subtype=ap)
apair.save()
self.logger.info('COMPLETED ADDING Bouvier-Gloriam coupling data')
def add_can_residues(self):
"""Add CAN residues from source file provided by Andrija Sente."""
# Parsing pdb uniprot file for residues
self.logger.info('Start parsing ARRESTIN RESIDUES')
self.logger.info('Parsing file ' + self.arrestin_data_file)
residue_data = pd.read_excel(self.arrestin_data_file)
can_scheme = ResidueNumberingScheme.objects.get(slug='can')
can_dict = residue_data[residue_data.ID == 'CAN_id'].iloc[:, 3:].to_dict('list')
# Loop over data table, but skip "CAN_posand" and "CAN_id" from current input file
for index, row in residue_data[2:].iterrows():
try:
# for now only allow for ortholog with uniprot entries:
if not row['AccessionID'].startswith('ENS'):
# fetch protein for protein conformation
pr, c = Protein.objects.get_or_create(accession=row['AccessionID'])
# fetch protein conformation
pc, c = ProteinConformation.objects.get_or_create(protein_id=pr)
else:
continue
except:
print('error making/getting protein', row['AccessionID'])
continue
# loop over residue generic number
sequence_number = 1
for aln_pos in can_dict:
canId = can_dict[aln_pos][0]
# Add '0' infront of single digit positions
if (int(canId.split('.')[2]) < 10):
rgnsp = canId.split('.')
canId = rgnsp[0] + '.' + rgnsp[1] + '.0' + rgnsp[2]
ps, c = ProteinSegment.objects.get_or_create(slug=canId.split('.')[1], proteinfamily='Arrestin')
rgn, c = ResidueGenericNumber.objects.get_or_create(label=canId, scheme=can_scheme, protein_segment=ps)
# only add AA information if not gap
if not row[aln_pos] == '-':
try:
Residue.objects.get_or_create(sequence_number=sequence_number, protein_conformation=pc,
amino_acid=row[aln_pos], generic_number=rgn, display_generic_number=rgn,
protein_segment=ps)
sequence_number += 1
except Exception as msg:
print("failed to add residue", msg)
self.logger.error("Failed to add residues", msg)
# Add also to the ResidueGenericNumberEquivalent table needed for single residue selection
try:
ResidueGenericNumberEquivalent.objects.get_or_create(label=rgn.label, default_generic_number=rgn,
scheme=can_scheme) # Update scheme_id
except Exception as msg:
print("failed to add residue generic number", msg)
self.logger.error("Failed to add residues to ResidueGenericNumberEquivalent")
def get_uniprot_accession_id(self, response_xml):
# TODO: This function seems to be legacy, perhaps it can be deleted?
"""Get Uniprot accession ID."""
root = fromstring(response_xml)
return next(
# el for el in root.getchildren()[0].getchildren()
el for el in root[0]
if el.attrib['dbSource'] == 'UniProt'
).attrib['dbAccessionId']
def map_pdb_to_uniprot(self, pdb_id):
# TODO: This function seems to be legacy, perhaps it can be deleted?
# Due to the new RCSB API this doesn't even work after December 2020
"""Get uniprot ID from PDB ID."""
pdb_mapping_url = 'https://www.rcsb.org/pdb/rest/das/pdb_uniprot_mapping/alignment'
pdb_mapping_response = requests.get(
pdb_mapping_url, params={'query': pdb_id}
).text
uniprot_id = self.get_uniprot_accession_id(pdb_mapping_response)
return uniprot_id
def can_add_proteins(self):
"""Add arrestin proteins."""
self.logger.info('Start adding ARRESTIN proteins')
self.logger.info('Parsing file ' + self.arrestin_data_file)
# Import ortholog alignment as pandas dataframe
residue_data = pd.read_excel(self.arrestin_data_file)
# Create new residue numbering scheme
self.create_can_rns()
rns = ResidueNumberingScheme.objects.get(slug='can')
state = ProteinState.objects.get(slug='active')
arrestins = residue_data[2:].Ortholog.unique()
for arrestin in arrestins:
pfm = ProteinFamily.objects.get(name=arrestin)
for accession in residue_data[residue_data.Ortholog == arrestin].AccessionID.unique():
# only allow uniprot accession:
if not accession.startswith('ENS'):
up = self.parse_uniprot_file(accession)
# if len(up['genes']) == 0:
# print('There is no GN field in the uniprot!', accession)
# self.logger.error('There is no GN field in the uniprot! {}'.format(accession))
# continue
if not 'source' in up:
print('No source found, probably deprecated!', accession)
self.logger.error('No source found, probably deprecated! {}'.format(accession))
continue
# Create new Protein
self.can_create_arrestins(pfm, rns, accession, up)
# add new can protein conformations
try:
arrestin = Protein.objects.get(accession=accession)
pc, created = ProteinConformation.objects.get_or_create(protein=arrestin, state=state)
self.logger.info('Created protein conformation')
except Exception as msg:
self.logger.error('Failed to create protein conformation', msg)
def can_create_arrestins(self, family, residue_numbering_scheme, accession, uniprot):
# get/create protein source
try:
source, created = ProteinSource.objects.get_or_create(name=uniprot['source'],
defaults={'name': uniprot['source']})
if created:
self.logger.info('Created protein source ' + source.name)
except IntegrityError:
source = ProteinSource.objects.get(name=uniprot['source'])
# get/create species
try:
species, created = Species.objects.get_or_create(latin_name=uniprot['species_latin_name'],
defaults={
'common_name': uniprot['species_common_name'],
})
if created:
self.logger.info('Created species ' + species.latin_name)
except IntegrityError:
species = Species.objects.get(latin_name=uniprot['species_latin_name'])
# get/create protein sequence type
# Wild-type for all sequences from source file
try:
sequence_type, created = ProteinSequenceType.objects.get_or_create(slug='wt',
defaults={
'slug': 'wt',
'name': 'Wild-type',
})
if created:
self.logger.info('Created protein sequence type Wild-type')
except:
self.logger.error('Failed creating protein sequence type Wild-type')
# create protein
p = Protein()
p.family = family
p.species = species
p.source = source
p.residue_numbering_scheme = residue_numbering_scheme
p.sequence_type = sequence_type
if accession:
p.accession = accession
p.entry_name = uniprot['entry_name'].lower()
p.name = uniprot['names'][0]
p.sequence = uniprot['sequence']
try:
p.save()
self.logger.info('Created protein {}'.format(p.entry_name))
except:
self.logger.error('Failed creating protein {}'.format(p.entry_name))
# protein aliases
for i, alias in enumerate(uniprot['names']):
pcan = Protein.objects.get(entry_name=uniprot['entry_name'].lower())
a = ProteinAlias()
a.protein = pcan
a.name = alias
a.position = i
try:
a.save()
self.logger.info('Created protein alias ' + a.name + ' for protein ' + p.name)
except:
self.logger.error('Failed creating protein alias ' + a.name + ' for protein ' + p.name)
# genes
for i, gene in enumerate(uniprot['genes']):
g = False
try:
g, created = Gene.objects.get_or_create(name=gene, species=species, position=i)
if created:
self.logger.info('Created gene ' + g.name + ' for protein ' + p.name)
except IntegrityError:
g = Gene.objects.get(name=gene, species=species, position=i)
if g:
pcan = Protein.objects.get(entry_name=uniprot['entry_name'].lower())
g.proteins.add(pcan)
# structures
# for i, structure in enumerate(uniprot['structures']):
# # try:
# res = structure[1]
# if res == '-':
# res = 0
# structure, created = SignprotStructure.objects.get_or_create(PDB_code=structure[0], resolution=res, protein = p, id=self.signprot_struct_ids())
# if created:
# self.logger.info('Created structure ' + structure.PDB_code + ' for protein ' + p.name)
def signprot_struct_ids(self):
structs = Structure.objects.count()
s_structs = SignprotStructure.objects.count()
offset = 1000
if s_structs == None:
return structs + 1 + offset
else:
return structs + s_structs + 1 + offset
def create_can_rns(self):
"""Add new numbering scheme entry_name."""
# rns_can, created= ResidueNumberingScheme.objects.get_or_create(slug='can', short_name='CAN', defaults={
# 'name': 'Common arrestin numbering scheme'})
try:
rns_can, created = ResidueNumberingScheme.objects.get_or_create(slug='can', short_name='CAN',
defaults={'name': 'Common arrestin numbering scheme'})
if created:
self.logger.info('Created Arrestin Numbering ' + rns_can.slug)
except IntegrityError:
rns_can = ResidueNumberingScheme.objects.get(slug='can')
self.logger.info('Integrity Error on creating can numbering')
def can_create_families(self):
"""Purge and create arrestin in protein_family."""
ProteinFamily.objects.filter(slug__startswith="200").delete()
# 4 arrestin subtypes, two of which are primarily expressed in the retina and bind only to visual opsins (arrestin 1 and arrestin 4), while the other two (β-arrestin 1 and β-arrestin 2) interact with the remaining ~800 GPCRs
can_dict = {}
can_dict['Arrestin'] = ['Beta', 'Visual']
can_dict['Beta'] = ['ARRB1', 'ARRB2']
can_dict['Visual'] = ['ARRC', 'ARRS']
pff_can, created_pf = ProteinFamily.objects.get_or_create(slug='200', defaults={
'name': 'Arrestins'})
pf1_can = ProteinFamily.objects.get_or_create(slug='200_000', name='Arrestin', parent=pff_can)
for i, family in enumerate(can_dict['Arrestin']):
# slug for the different levels
fam_slug = '200_000_00' + str(i + 1)
pff_can = ProteinFamily.objects.get(slug='200_000')
new_pf, created = ProteinFamily.objects.get_or_create(slug=fam_slug, name=family, parent=pff_can)
for i, protein in enumerate(can_dict[family]):
prot_slug = fam_slug + '_00' + str(i + 1)
pff_fam = ProteinFamily.objects.get(slug=fam_slug)
new_pf, created = ProteinFamily.objects.get_or_create(slug=prot_slug, name=protein, parent=pff_fam)
def parse_uniprot_file(self, accession):
filename = accession + '.txt'
local_file_path = os.sep.join([self.local_uniprot_dir, filename])
remote_file_path = self.remote_uniprot_dir + filename
up = {
'genes': [],
'names': [],
'structures': []
}
read_sequence = False
remote = False
# record whether organism has been read
os_read = False
# should local file be written?
local_file = False
try:
if os.path.isfile(local_file_path):
uf = open(local_file_path, 'r')
self.logger.info('Reading local file ' + local_file_path)
else:
uf = urlopen(remote_file_path)
remote = True
self.logger.info('Reading remote file ' + remote_file_path)
local_file = open(local_file_path, 'w')
for raw_line in uf:
# line format
if remote:
line = raw_line.decode('UTF-8')
else:
line = raw_line
# write to local file if appropriate
if local_file:
local_file.write(line)
# end of file
if line.startswith('//'):
break
# entry name and review status
if line.startswith('ID'):
split_id_line = line.split()
up['entry_name'] = split_id_line[1].lower()
review_status = split_id_line[2].strip(';')
if review_status == 'Unreviewed':
up['source'] = 'TREMBL'
elif review_status == 'Reviewed':
up['source'] = 'SWISSPROT'
# species
elif line.startswith('OS') and not os_read:
species_full = line[2:].strip().strip('.')
species_split = species_full.split('(')
up['species_latin_name'] = species_split[0].strip()
if len(species_split) > 1:
up['species_common_name'] = species_split[1].strip().strip(')')
else:
up['species_common_name'] = up['species_latin_name']
os_read = True
# names
elif line.startswith('DE'):
split_de_line = line.split('=')
if len(split_de_line) > 1:
split_segment = split_de_line[1].split('{')
up['names'].append(split_segment[0].strip().strip(';'))
# genes
elif line.startswith('GN'):
split_gn_line = line.split(';')
for segment in split_gn_line:
if '=' in segment:
split_segment = segment.split('=')
split_segment = split_segment[1].split(',')
for gene_name in split_segment:
split_gene_name = gene_name.split('{')
up['genes'].append(split_gene_name[0].strip())
# structures
elif line.startswith('DR') and 'PDB' in line and not 'sum' in line:
split_gn_line = line.split(';')
up['structures'].append([split_gn_line[1].lstrip(), split_gn_line[3].lstrip().split(" A")[0]])
# sequence
elif line.startswith('SQ'):
split_sq_line = line.split()
seq_len = int(split_sq_line[2])
read_sequence = True
up['sequence'] = ''
elif read_sequence == True:
up['sequence'] += line.strip().replace(' ', '')
# close the Uniprot file
uf.close()
except:
return False
# close the local file if appropriate
if local_file:
local_file.close()
return up
|
the-stack_106_20303
|
from marshmallow import Schema, fields, post_load
from enum import Enum
from src.messages.create_game import MsgCreateGame
from src.messages.register import MsgRegister
from src.messages.subscribe_game import MsgSubscribeGame
from src.messages.turn import MsgTurn
class MessageType:
CREATE_GAME = 'creategame'
REGISTER = 'register'
SUBSCRIBE_GAME = 'subscribe'
TURN = 'turn'
USERS = 'users'
class Message:
def __init__(self, type, data=None):
self.type = type
self.data = data
class MessageSchema(Schema):
type = fields.Str()
payload = fields.Raw()
@post_load
def extract(self, data):
type = data['type']
schema = (MsgCreateGame() if type == MessageType.CREATE_GAME \
else MsgRegister() if type == MessageType.REGISTER \
else MsgSubscribeGame() if type == MessageType.SUBSCRIBE_GAME \
else MsgTurn() if type == MessageType.SUBSCRIBE_GAME \
else None)
if not schema is None:
payload = schema.load(data['payload'])
return Message(type, payload.data)
return Message(type)
|
the-stack_106_20305
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
def helm_dep_up(srcdir):
command_line = "cd %s; helm dep up --skip-refresh" % (srcdir)
try:
res = subprocess.check_output(
command_line, shell=True,
executable='/bin/bash')
res = res.strip() # strip whitespace
if res != "":
print(res)
except subprocess.CalledProcessError as e:
print(e)
raise
def _isdir(path, entry):
return os.path.isdir(os.path.join(path, entry))
def main():
path = os.path.abspath(os.path.dirname(sys.argv[0]))
srcdir = os.path.join(path, "..", "helm")
compkitsdir = os.path.join(srcdir, "compute-kits")
compkits = os.listdir(compkitsdir)
for package in [p for p in compkits if _isdir(compkitsdir, p)]:
helm_dep_up(os.path.join(os.path.join(compkitsdir, package)))
if __name__ == '__main__':
sys.exit(main())
|
the-stack_106_20308
|
bl_info = {
"name": "Manipulator Menu: Key: 'Ctrl Space'",
"description": "Manipulator Modes",
"author": "Antony Riakiotakis, Sebastian Koenig",
"version": (0, 1, 1),
"blender": (2, 77, 0),
"location": "Ctrl Space",
"warning": "",
"wiki_url": "",
"category": "3d View"
}
import bpy
from bpy.types import (
Menu,
Operator,
)
from bpy.props import (
EnumProperty,
)
# Pie Manipulator Mode - Ctrl Space
class VIEW3D_manipulator_set_of(Operator):
bl_label = "Set Manipulator"
bl_idname = "view3d.manipulator_set"
type = EnumProperty(
name="Type",
items=(('TRANSLATE', "Translate", "Use the manipulator for movement transformations"),
('ROTATE', "Rotate", "Use the manipulator for rotation transformations"),
('SCALE', "Scale", "Use the manipulator for scale transformations"),
),
)
def execute(self, context):
# show manipulator if user selects an option
context.space_data.show_manipulator = True
context.space_data.transform_manipulators = {self.type}
return {'FINISHED'}
class VIEW3D_PIE_manipulator_of(Menu):
bl_label = "Manipulator"
bl_idname = "view3d.manipulator_of"
def draw(self, context):
layout = self.layout
pie = layout.menu_pie()
pie.operator("view3d.manipulator_set", icon='MAN_TRANS', text="Translate").type = 'TRANSLATE'
pie.operator("view3d.manipulator_set", icon='MAN_ROT', text="Rotate").type = 'ROTATE'
pie.operator("view3d.manipulator_set", icon='MAN_SCALE', text="Scale").type = 'SCALE'
pie.prop(context.space_data, "show_manipulator")
classes = (
VIEW3D_manipulator_set_of,
VIEW3D_PIE_manipulator_of,
)
addon_keymaps = []
def register():
for cls in classes:
bpy.utils.register_class(cls)
wm = bpy.context.window_manager
if wm.keyconfigs.addon:
# Align
km = wm.keyconfigs.addon.keymaps.new(name='Object Non-modal')
kmi = km.keymap_items.new('wm.call_menu_pie', 'SPACE', 'PRESS', ctrl=True)
kmi.properties.name = "view3d.manipulator_of"
addon_keymaps.append((km, kmi))
def unregister():
for cls in classes:
bpy.utils.unregister_class(cls)
wm = bpy.context.window_manager
kc = wm.keyconfigs.addon
if kc:
for km, kmi in addon_keymaps:
km.keymap_items.remove(kmi)
addon_keymaps.clear()
if __name__ == "__main__":
register()
|
the-stack_106_20309
|
# Advent of Code 2021
# Day 10: Part 1 and Part 2
# Author: Nico Van den Hooff
from collections import Counter
def read_data(path):
with open(path, "r") as f:
data = f.read().splitlines()
# convert data to set, makes deleting corrupted lines efficient in part 2
data = set(data)
return data
def get_brackets():
"""Gets problem information as it relates to the brackets used in syntax"""
# starting brackets
opening = {"(", "[", "{", "<"}
# maps starting brackets to ending brackets
legal_opening = {"(": ")", "[": "]", "{": "}", "<": ">"}
# maps ending brackets to starting brackets
legal_closing = {")": "(", "]": "[", "}": "{", ">": "<"}
return opening, legal_opening, legal_closing
def get_points(problem):
"""Gets the point values for part 1 (corrupted) or part 2 (incomplete)"""
if problem == "corrupted":
return {")": 3, "]": 57, "}": 1197, ">": 25137}
elif problem == "incomplete":
return {")": 1, "]": 2, "}": 3, ">": 4}
def calc_corrupted_score(illegals, points):
"""Calculates the score for problematic brackets in corrupted chunks (part 1)"""
score = 0
for char, count in illegals.items():
score += points[char] * count
return score
def calc_incomplete_score(fixes, points):
"""Calculates the score for fixing incomplete chunks (part 2)"""
scores = []
for fix in fixes:
score = 0
for i in reversed(range(len(fix))):
score *= 5
current = fix[i]
score += points[current]
scores.append(score)
scores.sort()
score = scores[len(scores) // 2]
return score
def solve(problem, chunks):
"""Solves the corrupted or incomplete chunk problem"""
opening, legal_opening, legal_closing = get_brackets()
# we need different data structures for part 1 and 2
if problem == "corrupted":
illegals = Counter()
corrupted = set()
elif problem == "incomplete":
fixes = []
for chunk in chunks:
# stack tracks the opening brackets
stack = []
# push the first bracket onto the stack
stack.append(chunk[0])
for char in chunk[1:]:
# push opening brackets onto stack
if char in opening:
stack.append(char)
else:
top = len(stack) - 1
# pop opening bracket if legal closing bracket encounted
if legal_closing[char] == stack[top]:
stack.pop()
else:
if problem == "corrupted":
illegals[char] += 1
corrupted.add(chunk)
# end here since we stop at first illegal closing bracket
break
if problem == "incomplete":
# create a list of closing brackets to fix an incomplete chunk
for i, char in enumerate(stack):
stack[i] = legal_opening[char]
fixes.append(stack)
if problem == "corrupted":
return illegals, corrupted
elif problem == "incomplete":
return fixes
def part_1(data, problem="corrupted"):
points = get_points(problem)
illegals, corrupted = solve(problem, data)
solution1 = calc_corrupted_score(illegals, points)
return corrupted, solution1
def part_2(data, corrupted, problem="incomplete"):
# remove corrupted chunks
incomplete = data - corrupted
points = get_points(problem)
fixes = solve(problem, incomplete)
solution2 = calc_incomplete_score(fixes, points)
return solution2
def main(path):
data = read_data(path)
corrupted, solution1 = part_1(data)
solution2 = part_2(data, corrupted)
print(f"Part 1 Solution: {solution1}")
print(f"Part 2 Solution: {solution2}")
if __name__ == "__main__":
path = "problems/day-10-syntax-scoring/input.txt"
main(path)
|
the-stack_106_20310
|
import os
from subprocess import run
import platform
import time
def log(string, file):
print(string)
file.write(f"{string}\n")
file.flush()
def timestamp():
now = time.localtime()
return f"[{now.tm_mon}/{now.tm_mday}/{now.tm_year} {now.tm_hour}:{now.tm_min}:{now.tm_sec}]"
duration = 0
def time_me(f):
def wrapper(*args, **kwargs):
start = time.time() # Start timer
res = f(*args, **kwargs)
end = time.time() # Stop timer
global duration
duration = end - start # Calculate duration
return res
return wrapper
@time_me
def ps(cmd, env=None):
# Windows
# if env is None:
# result = run(["powershell", cmd], capture_output=True)
# else:
# result = run(["powershell", cmd], env=env, capture_output=True)
# Linux
if env is None: result = run(cmd, capture_output=True, shell=True)
else: result = run(cmd, env=env, capture_output=True, shell=True)
return result
def sync_examples():
# Inject .vscode folder into example projects
print("Inject .vscode folder into example projects...")
ps("Copy-Item ./MaximLP/Inject/* ./MaximLP/New_Project/ -force -Recurse")
ps("Copy-Item ./MaximSDK/Inject/* ./MaximSDK/New_Project/ -force -Recurse")
def release(version):
r_dir = f"./Releases/VSCode-Maxim-{version}" # Release directory
# Create release release directory
print("Creating release directory...")
ps(f"New-Item -Path {r_dir} -ItemType Directory")
sync_examples()
# Package release
print("Packaging...")
ps(f"New-Item -Path {r_dir}/MaximLP -ItemType Directory")
ps(f"Copy-Item ./MaximLP/* {r_dir}/MaximLP/ -force -Recurse")
ps(f"New-Item -Path {r_dir}/MaximSDK -ItemType Directory")
ps(f"Copy-Item ./MaximSDK/* {r_dir}/MaximSDK/ -force -Recurse")
ps(f"Copy-Item ./readme.md {r_dir}/ -force")
ps(f"Copy-Item ./userguide.md {r_dir}/ -force")
ps(f"Copy-Item ./LICENSE.txt {r_dir}/ -force")
#Archive release
print("Archiving...")
ps(f"compress-archive -path {r_dir} -DestinationPath {r_dir}/VSCode-Maxim-{version}")
# Clean up
print("Cleaning up...")
ps(f"Remove-Item {r_dir}/readme.md")
ps(f"Remove-Item {r_dir}/userguide.md")
ps(f"Remove-Item {r_dir}/LICENSE.txt")
ps(f"Remove-Item {r_dir}/MaximLP -Recurse")
ps(f"Remove-Item {r_dir}/MaximSDK -Recurse")
print("Done!")
# Tests cleaning and compiling example projects for target platforms. If no targets, boards, projects, etc. are specified then it will auto-detect
def test(targets=None, boards=None, projects=None):
env = os.environ.copy()
curplatform = platform.system()
# Simulate the VS Code terminal by appending to the Path
MAXIM_PATH = ""
if curplatform == 'Linux':
MAXIM_PATH = "/home/jcarter/MaximSDK" # Linux
env["PATH"] = f"{MAXIM_PATH}/Tools/OpenOCD:/{MAXIM_PATH}/Tools/GNUTools/bin:{MAXIM_PATH}/Tools/xPack/riscv-none-embed-gcc/bin:" + env["PATH"] # Linux
elif curplatform == 'Windows':
MAXIM_PATH = "C:/MaximSDK" # Windows
env["PATH"] = f"{MAXIM_PATH}/Tools/MinGW/msys/1.0/bin;{MAXIM_PATH}/Tools/OpenOCD;{MAXIM_PATH}/Tools/GNUTools/bin;{MAXIM_PATH}/Tools/xPack/riscv-none-embed-gcc/bin;" + env["PATH"] # Windows
LOG_DIR = os.getcwd()
# Create log file
try: os.mkdir(f"{LOG_DIR}/buildlogs")
except FileExistsError: pass
logfile = open(f"{LOG_DIR}/test.log", 'w')
# Log system info
log(timestamp(), logfile)
log(f"[PLATFORM] {platform.platform()}", logfile)
# Get list of target micros if none is specified
if targets is None:
targets = []
for dir in os.scandir(f"{MAXIM_PATH}/Examples"):
targets.append(dir.name) # Append subdirectories of Examples to list of target micros
log(f"[TARGETS] Detected targets {targets}", logfile)
else:
assert(type(targets) is list)
log(f"[TARGETS] Testing {targets}", logfile)
# Enforce alphabetical ordering
targets = sorted(targets)
# Create subfolders for target-specific logfiles
for t in targets:
try: os.mkdir(f"{LOG_DIR}/buildlogs/{t}")
except FileExistsError: pass
# Track failed projects for end summary
failed = []
count = 0
for target in targets:
log("====================", logfile)
log(f"[TARGET] {target}", logfile)
# Get list of supported boards for this target.
if boards is None:
boards = []
for dirpath, subdirs, items in os.walk(f"{MAXIM_PATH}/Libraries/Boards/{target}"):
if "board.mk" in items and curplatform == 'Linux': boards.append(dirpath.split('/')[-1]) # Linux
elif "board.mk" in items and curplatform == 'Windows': boards.append(dirpath.split('\\')[-1]) # Board string will be the last folder in the directory path # Windows
log(f"[BOARDS] Detected {boards}", logfile)
else:
assert(type(boards) is list)
log(f"[BOARDS] Testing {boards}")
boards = sorted(boards) # Enforce alphabetical ordering
# Get list of examples for this target. If a Makefile is in the root directory it's an example.
if projects is None:
projects = []
for dirpath, subdirs, items in os.walk(f"{MAXIM_PATH}/Examples/{target}"):
if 'Makefile' in items:
projects.append(dirpath)
log(f"[PROJECTS] Detected {projects}", logfile)
else:
assert(type(projects) is list)
log(f"[PROJECTS] Testing {projects}")
projects = sorted(projects) # Enforce alphabetical ordering
# Test each project
for project in projects:
if curplatform == 'Linux': project_stripped = project.split('/')[-1] # Linux
elif curplatform == 'Windows': project_stripped = project.split('\\')[-1] # Windows
log("---------------------", logfile)
log(f"[{target}]\t[{project_stripped}]", logfile)
os.chdir(project) # Need to us os.chdir to set working directory of subprocesses
for board in boards:
buildlog = f"{target}_{board}_{project_stripped}.log"
success = True
# Test build (make all)
build_cmd = f"make all TARGET={target} MAXIM_PATH={MAXIM_PATH} BOARD={board} MAKE=make"
res = ps(build_cmd, env=env) # Run build command
# Error check build command
if res.returncode != 0:
# Fail
success = False
log(f"{timestamp()}[{board}] --- [BUILD]\t[FAILED] Return code {res.returncode}. See buildlogs/{buildlog}", logfile)
# Log detailed output to separate output file
with open(f"{LOG_DIR}/buildlogs/{target}/{buildlog}", 'w') as f:
f.write("===============\n")
f.write(timestamp() + '\n')
f.write(f"[PROJECT] {project}\n")
f.write(f"[BOARD] {board}\n")
f.write(f"[BUILD COMMAND] {build_cmd}\n")
f.write("===============\n")
for line in str(res.stdout + res.stderr, encoding="ASCII").splitlines():
f.write(line + '\n')
else: log(f"{timestamp()}[{board}] --- [BUILD]\t[SUCCESS] {round(duration, 4)}s", logfile)
# Test clean (make clean)
clean_cmd = f"make clean TARGET={target} MAXIM_PATH={MAXIM_PATH} BOARD={board} MAKE=make"
res = ps(clean_cmd, env=env) # Run clean command
# Error check clean command
if res.returncode != 0:
log(f"{timestamp()}[{board}] --- [CLEAN]\t[SUCCESS] {str(res.stderr, encoding='ASCII')}", logfile)
success = False
else: log(f"{timestamp()}[{board}] --- [CLEAN]\t[SUCCESS] {round(duration, 4)}s", logfile)
# Add any failed projects to running list
project_info = {
"target":target,
"project":project_stripped,
"board":board,
"path":project,
"logfile":f"buildlogs/{buildlog}"
}
if not success and project_info not in failed: failed.append(project_info)
count += 1
log("====================", logfile)
log(f"[SUMMARY] Tested {count} projects. {count - len(failed)}/{count} succeeded. Failed projects: ", logfile)
for pinfo in failed:
log(f"[{pinfo['target']}] {pinfo['project']} for {pinfo['board']}... see {pinfo['logfile']}", logfile)
if __name__ == "__main__":
test(targets=["MAX78000"])
|
the-stack_106_20313
|
import numpy as np
from opytimizer.optimizers.evolutionary import ga
from opytimizer.spaces import search
def test_ga_params():
params = {
'p_selection': 0.75,
'p_mutation': 0.25,
'p_crossover': 0.5,
}
new_ga = ga.GA(params=params)
assert new_ga.p_selection == 0.75
assert new_ga.p_mutation == 0.25
assert new_ga.p_crossover == 0.5
def test_ga_params_setter():
new_ga = ga.GA()
try:
new_ga.p_selection = 'a'
except:
new_ga.p_selection = 0.75
try:
new_ga.p_selection = -1
except:
new_ga.p_selection = 0.75
assert new_ga.p_selection == 0.75
try:
new_ga.p_mutation = 'b'
except:
new_ga.p_mutation = 0.25
try:
new_ga.p_mutation = -1
except:
new_ga.p_mutation = 0.25
assert new_ga.p_mutation == 0.25
try:
new_ga.p_crossover = 'c'
except:
new_ga.p_crossover = 0.5
try:
new_ga.p_crossover = -1
except:
new_ga.p_crossover = 0.5
assert new_ga.p_crossover == 0.5
def test_ga_roulette_selection():
new_ga = ga.GA()
fitness = [10, 20, 30, 40, 50]
idx = new_ga._roulette_selection(len(fitness), fitness)
assert len(idx) == 4
def test_ga_crossover():
search_space = search.SearchSpace(n_agents=10, n_variables=2,
lower_bound=[1, 1], upper_bound=[10, 10])
new_ga = ga.GA()
alpha, beta = new_ga._crossover(
search_space.agents[0], search_space.agents[1])
assert type(alpha).__name__ == 'Agent'
assert type(beta).__name__ == 'Agent'
def test_ga_mutation():
search_space = search.SearchSpace(n_agents=10, n_variables=2,
lower_bound=[1, 1], upper_bound=[10, 10])
new_ga = ga.GA()
alpha, beta = new_ga._mutation(
search_space.agents[0], search_space.agents[1])
assert type(alpha).__name__ == 'Agent'
assert type(beta).__name__ == 'Agent'
def test_ga_update():
def square(x):
return np.sum(x**2)
new_ga = ga.GA()
search_space = search.SearchSpace(n_agents=10, n_variables=2,
lower_bound=[1, 1], upper_bound=[10, 10])
new_ga.update(search_space, square)
|
the-stack_106_20314
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import kfp.dsl as dsl
@dsl.pipeline(
name='Bolt Classification',
description='END to END kubeflow demo with TensorRT Inference Server'
)
def bolt( #pylint: disable=unused-argument
trtserver_name: dsl.PipelineParam = dsl.PipelineParam(name='trtserver_name', value='trtserver'),
model_name: dsl.PipelineParam = dsl.PipelineParam(name='model_name', value='bolt'),
model_version: dsl.PipelineParam = dsl.PipelineParam(name='model_version', value='1'),
webapp_prefix: dsl.PipelineParam = dsl.PipelineParam(name='webapp_prefix', value = 'webapp'),
webapp_port: dsl.PipelineParam = dsl.PipelineParam(name='webapp_port', value='80') ):
serve = dsl.ContainerOp(
name='serve',
image='gcr.io/gtc-2019-demo/ml-pipeline-kubeflow-trtisserve',
arguments=["--trtserver_name", trtserver_name,
"--model_path", 'gs://test-gtc-demo-2019/example_saved_model'
]
)
webapp = dsl.ContainerOp(
name='webapp',
image='gcr.io/gtc-2019-demo/ml-pipeline-trtis-webapp-launcher',
arguments=["--workflow_name", '%s' % ('{{workflow.name}}',),
"--trtserver_name", trtserver_name,
"--model_name", model_name,
"--model_version", str(model_version),
"--webapp_prefix", webapp_prefix,
"--webapp_port", str(webapp_port)
]
)
if __name__ == '__main__':
import kfp.compiler as compiler
compiler.Compiler().compile(bolt, __file__ + '.tar.gz')
|
the-stack_106_20315
|
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'G:i'
DATETIME_FORMAT = 'j F Y, G:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j M Y'
SHORT_DATETIME_FORMAT = 'j M Y, G:i'
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
# NUMBER_GROUPING =
|
the-stack_106_20316
|
#!/usr/bin/env python
# Two environmental variables influence this script.
#
# GDAL_CONFIG: the path to a gdal-config program that points to GDAL headers,
# libraries, and data files.
#
# PACKAGE_DATA: if defined, GDAL and PROJ4 data files will be copied into the
# source or binary distribution. This is essential when creating self-contained
# binary wheels.
import copy
from distutils.command.sdist import sdist
import itertools
import logging
import os
import platform
import pprint
import shutil
import subprocess
import sys
from setuptools import setup
from setuptools.extension import Extension
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
log = logging.getLogger()
def check_output(cmd):
# since subprocess.check_output doesn't exist in 2.6
# we wrap it here.
try:
out = subprocess.check_output(cmd)
return out.decode('utf')
except AttributeError:
# For some reasone check_output doesn't exist
# So fall back on Popen
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
out, err = p.communicate()
return out
def copy_data_tree(datadir, destdir):
try:
shutil.rmtree(destdir)
except OSError:
pass
shutil.copytree(datadir, destdir)
# python -W all setup.py ...
if 'all' in sys.warnoptions:
log.level = logging.DEBUG
# Parse the version from the rasterio module.
with open('rasterio/__init__.py') as f:
for line in f:
if line.find("__version__") >= 0:
version = line.split("=")[1].strip()
version = version.strip('"')
version = version.strip("'")
continue
with open('VERSION.txt', 'w') as f:
f.write(version)
# Use Cython if available.
try:
from Cython.Build import cythonize
except ImportError:
cythonize = None
# By default we'll try to get options via gdal-config. On systems without,
# options will need to be set in setup.cfg or on the setup command line.
include_dirs = []
library_dirs = []
libraries = []
extra_link_args = []
gdal2plus = False
gdal_output = [None] * 4
gdalversion = None
try:
import numpy as np
include_dirs.append(np.get_include())
except ImportError:
sys.exit("ERROR: Numpy and its headers are required to run setup().")
try:
gdal_config = os.environ.get('GDAL_CONFIG', 'gdal-config')
for i, flag in enumerate(("--cflags", "--libs", "--datadir", "--version")):
gdal_output[i] = check_output([gdal_config, flag]).strip()
for item in gdal_output[0].split():
if item.startswith("-I"):
include_dirs.extend(item[2:].split(":"))
for item in gdal_output[1].split():
if item.startswith("-L"):
library_dirs.extend(item[2:].split(":"))
elif item.startswith("-l"):
libraries.append(item[2:])
else:
# e.g. -framework GDAL
extra_link_args.append(item)
# datadir, gdal_output[2] handled below
gdalversion = gdal_output[3]
if gdalversion:
log.info("GDAL API version obtained from gdal-config: %s",
gdalversion)
except Exception as e:
if os.name == "nt":
log.info("Building on Windows requires extra options to setup.py "
"to locate needed GDAL files. More information is available "
"in the README.")
else:
log.warning("Failed to get options via gdal-config: %s", str(e))
# Get GDAL API version from environment variable.
if 'GDAL_VERSION' in os.environ:
gdalversion = os.environ['GDAL_VERSION']
log.info("GDAL API version obtained from environment: %s", gdalversion)
# Get GDAL API version from the command line if specified there.
if '--gdalversion' in sys.argv:
index = sys.argv.index('--gdalversion')
sys.argv.pop(index)
gdalversion = sys.argv.pop(index)
log.info("GDAL API version obtained from command line option: %s",
gdalversion)
if not gdalversion:
sys.exit("ERROR: A GDAL API version must be specified. Provide a path "
"to gdal-config using a GDAL_CONFIG environment variable "
"or use a GDAL_VERSION environment variable.")
gdal_version_parts = gdalversion.split('.')
gdal_major_version = int(gdal_version_parts[0])
gdal_minor_version = int(gdal_version_parts[1])
if gdal_major_version == 1 and gdal_minor_version < 11:
sys.exit("ERROR: GDAL >= 1.11 is required for rasterio. "
"Please upgrade GDAL.")
# Conditionally copy the GDAL data. To be used in conjunction with
# the bdist_wheel command to make self-contained binary wheels.
if os.environ.get('PACKAGE_DATA'):
destdir = 'rasterio/gdal_data'
if gdal_output[2]:
log.info("Copying gdal data from %s" % gdal_output[2])
copy_data_tree(gdal_output[2], destdir)
else:
# check to see if GDAL_DATA is defined
gdal_data = os.environ.get('GDAL_DATA', None)
if gdal_data:
log.info("Copying gdal_data from %s" % gdal_data)
copy_data_tree(gdal_data, destdir)
# Conditionally copy PROJ.4 data.
projdatadir = os.environ.get('PROJ_LIB', '/usr/local/share/proj')
if os.path.exists(projdatadir):
log.info("Copying proj_data from %s" % projdatadir)
copy_data_tree(projdatadir, 'rasterio/proj_data')
# Extend distutil's sdist command to generate 3 C extension sources for
# the _io module: a version for GDAL < 2, one for 2 <= GDAL < 2.1 and
# one for GDAL >= 2.1.
class sdist_multi_gdal(sdist):
def run(self):
shutil.copy('rasterio/_shim1.pyx', 'rasterio/_shim.pyx')
_ = check_output(['cython', '-v', '-f', 'rasterio/_shim.pyx',
'-o', 'rasterio/_shim1.c'])
print(_)
shutil.copy('rasterio/_shim20.pyx', 'rasterio/_shim.pyx')
_ = check_output(['cython', '-v', '-f', 'rasterio/_shim.pyx',
'-o', 'rasterio/_shim20.c'])
print(_)
shutil.copy('rasterio/_shim21.pyx', 'rasterio/_shim.pyx')
_ = check_output(['cython', '-v', '-f', 'rasterio/_shim.pyx',
'-o', 'rasterio/_shim21.c'])
print(_)
sdist.run(self)
ext_options = {
'include_dirs': include_dirs,
'library_dirs': library_dirs,
'libraries': libraries,
'extra_link_args': extra_link_args,
'define_macros': []}
if not os.name == "nt":
# These options fail on Windows if using Visual Studio
ext_options['extra_compile_args'] = ['-Wno-unused-parameter',
'-Wno-unused-function']
# Copy extension options for cpp extension modules.
cpp_ext_options = copy.deepcopy(ext_options)
# Remove -std=c++11 from C extension options.
try:
ext_options['extra_link_args'].remove('-std=c++11')
ext_options['extra_compile_args'].remove('-std=c++11')
except Exception:
pass
# GDAL 2.3 and newer requires C++11
if (gdal_major_version, gdal_minor_version) >= (2, 3):
cpp11_flag = '-std=c++11'
# 'extra_compile_args' may not be defined
eca = cpp_ext_options.get('extra_compile_args', [])
if platform.system() == 'Darwin':
if cpp11_flag not in eca:
eca.append(cpp11_flag)
eca += [cpp11_flag, '-mmacosx-version-min=10.9', '-stdlib=libc++']
# TODO: Windows
elif cpp11_flag not in eca:
eca.append(cpp11_flag)
cpp_ext_options['extra_compile_args'] = eca
# Configure optional Cython coverage.
cythonize_options = {}
if os.environ.get('CYTHON_COVERAGE'):
cythonize_options['compiler_directives'] = {'linetrace': True}
cythonize_options['annotate'] = True
ext_options['define_macros'].extend(
[('CYTHON_TRACE', '1'), ('CYTHON_TRACE_NOGIL', '1')])
log.debug('ext_options:\n%s', pprint.pformat(ext_options))
if gdal_major_version >= 2:
# GDAL>=2.0 does not require vendorized rasterfill.cpp
cython_fill = ['rasterio/_fill.pyx']
sdist_fill = ['rasterio/_fill.cpp']
else:
cython_fill = ['rasterio/_fill.pyx', 'rasterio/rasterfill.cpp']
sdist_fill = ['rasterio/_fill.cpp', 'rasterio/rasterfill.cpp']
# When building from a repo, Cython is required.
if os.path.exists("MANIFEST.in") and "clean" not in sys.argv:
log.info("MANIFEST.in found, presume a repo, cythonizing...")
if not cythonize:
sys.exit(
"ERROR: Cython.Build.cythonize not found. "
"Cython is required to build from a repo.")
# Copy the GDAL version-specific shim module to _shim.pyx.
if gdal_major_version == 2 and gdal_minor_version >= 1:
shutil.copy('rasterio/_shim21.pyx', 'rasterio/_shim.pyx')
elif gdal_major_version == 2 and gdal_minor_version == 0:
shutil.copy('rasterio/_shim20.pyx', 'rasterio/_shim.pyx')
elif gdal_major_version == 1:
shutil.copy('rasterio/_shim1.pyx', 'rasterio/_shim.pyx')
ext_modules = cythonize([
Extension(
'rasterio._base', ['rasterio/_base.pyx'], **ext_options),
Extension(
'rasterio._io', ['rasterio/_io.pyx'], **ext_options),
Extension(
'rasterio._features', ['rasterio/_features.pyx'], **ext_options),
Extension(
'rasterio._env', ['rasterio/_env.pyx'], **ext_options),
Extension(
'rasterio._warp', ['rasterio/_warp.pyx'], **cpp_ext_options),
Extension(
'rasterio._fill', cython_fill, **cpp_ext_options),
Extension(
'rasterio._err', ['rasterio/_err.pyx'], **ext_options),
Extension(
'rasterio._example', ['rasterio/_example.pyx'], **ext_options),
Extension(
'rasterio._shim', ['rasterio/_shim.pyx'], **ext_options),
Extension(
'rasterio._crs', ['rasterio/_crs.pyx'], **ext_options),
Extension(
'rasterio.shutil', ['rasterio/shutil.pyx'], **ext_options)],
quiet=True, **cythonize_options)
# If there's no manifest template, as in an sdist, we just specify .c files.
else:
ext_modules = [
Extension(
'rasterio._base', ['rasterio/_base.c'], **ext_options),
Extension(
'rasterio._io', ['rasterio/_io.c'], **ext_options),
Extension(
'rasterio._features', ['rasterio/_features.c'], **ext_options),
Extension(
'rasterio._env', ['rasterio/_env.c'], **ext_options),
Extension(
'rasterio._warp', ['rasterio/_warp.cpp'], **cpp_ext_options),
Extension(
'rasterio._fill', sdist_fill, **cpp_ext_options),
Extension(
'rasterio._err', ['rasterio/_err.c'], **ext_options),
Extension(
'rasterio._example', ['rasterio/_example.c'], **ext_options),
Extension(
'rasterio._crs', ['rasterio/_crs.c'], **ext_options),
Extension(
'rasterio.shutil', ['rasterio/shutil.c'], **ext_options)]
# Copy the GDAL version-specific shim module to _shim.pyx.
if gdal_major_version == 2 and gdal_minor_version >= 1:
ext_modules.append(
Extension('rasterio._shim', ['rasterio/_shim21.c'], **ext_options))
elif gdal_major_version == 2 and gdal_minor_version == 0:
ext_modules.append(
Extension('rasterio._shim', ['rasterio/_shim20.c'], **ext_options))
elif gdal_major_version == 1:
ext_modules.append(
Extension('rasterio._shim', ['rasterio/_shim1.c'], **ext_options))
with open('README.rst') as f:
readme = f.read()
# Runtime requirements.
inst_reqs = [
'affine', 'attrs', 'cligj', 'numpy', 'snuggs>=1.4.1', 'click-plugins']
if sys.version_info < (3, 4):
inst_reqs.append('enum34')
extra_reqs = {
'ipython': ['ipython>=2.0'],
's3': ['boto3>=1.2.4'],
'plot': ['matplotlib'],
'test': [
'pytest>=2.8.2', 'pytest-cov>=2.2.0', 'boto3>=1.2.4', 'packaging',
'hypothesis'],
'docs': ['ghp-import', 'numpydoc', 'sphinx', 'sphinx-rtd-theme']}
# Add futures to 'test' for Python < 3.2.
if sys.version_info < (3, 2):
extra_reqs['test'].append('futures')
# Add all extra requirements
extra_reqs['all'] = list(set(itertools.chain(*extra_reqs.values())))
setup_args = dict(
cmdclass={'sdist': sdist_multi_gdal},
name='rasterio',
version=version,
description="Fast and direct raster I/O for use with Numpy and SciPy",
long_description=readme,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: C',
'Programming Language :: Cython',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Multimedia :: Graphics :: Graphics Conversion',
'Topic :: Scientific/Engineering :: GIS'],
keywords='raster gdal',
author='Sean Gillies',
author_email='[email protected]',
url='https://github.com/mapbox/rasterio',
license='BSD',
package_dir={'': '.'},
packages=['rasterio', 'rasterio.rio'],
entry_points='''
[console_scripts]
rio=rasterio.rio.main:main_group
[rasterio.rio_commands]
blocks=rasterio.rio.blocks:blocks
bounds=rasterio.rio.bounds:bounds
calc=rasterio.rio.calc:calc
clip=rasterio.rio.clip:clip
convert=rasterio.rio.convert:convert
edit-info=rasterio.rio.edit_info:edit
env=rasterio.rio.env:env
gcps=rasterio.rio.gcps:gcps
info=rasterio.rio.info:info
insp=rasterio.rio.insp:insp
mask=rasterio.rio.mask:mask
merge=rasterio.rio.merge:merge
overview=rasterio.rio.overview:overview
rasterize=rasterio.rio.rasterize:rasterize
rm=rasterio.rio.rm:rm
sample=rasterio.rio.sample:sample
shapes=rasterio.rio.shapes:shapes
stack=rasterio.rio.stack:stack
transform=rasterio.rio.transform:transform
warp=rasterio.rio.warp:warp
''',
include_package_data=True,
ext_modules=ext_modules,
zip_safe=False,
install_requires=inst_reqs,
extras_require=extra_reqs)
if os.environ.get('PACKAGE_DATA'):
setup_args['package_data'] = {'rasterio': ['gdal_data/*', 'proj_data/*']}
setup(**setup_args)
|
the-stack_106_20318
|
# Copyright 2017 Alethea Katherine Flowers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
def parametrize_decorator(arg_names, arg_values_list):
"""Parametrize a session.
Add new invocations to the underlying session function using the list of
``arg_values_list`` for the given ``arg_names``. Parametrization is
performed during session discovery and each invocation appears as a
separate session to nox.
Args:
arg_names (Sequence[str]): A list of argument names.
arg_values_list (Sequence[Union[Any, Tuple]]): The list of argument
values determines how often a session is invoked with different
argument values. If only one argument names was specified then
this is a simple list of values, for example ``[1, 2, 3]``. If N
argument names were specified, this must be a list of N-tuples,
where each tuple-element specifies a value for its respective
argument name, for example ``[(1, 'a'), (2, 'b')]``.
"""
# Allow args to be specified as any of 'arg', 'arg,arg2' or ('arg', 'arg2')
if not isinstance(arg_names, (list, tuple)):
arg_names = list(filter(None, [arg.strip() for arg in arg_names.split(",")]))
# If there's only one arg_name, arg_values_list should be a single item
# or list. Transform it so it'll work with the combine step.
if len(arg_names) == 1:
# In this case, the arg_values_list can also just be a single item.
if not isinstance(arg_values_list, (list, tuple)):
arg_values_list = [arg_values_list]
arg_values_list = [[value] for value in arg_values_list]
# Combine arg names and values into a list of dictionaries. These are
# 'call specs' that will be used to generate calls.
# [{arg: value1}, {arg: value2}, ...]
call_specs = []
for arg_values in arg_values_list:
call_spec = dict(zip(arg_names, arg_values))
call_specs.append(call_spec)
def inner(f):
previous_call_specs = getattr(f, "parametrize", None)
new_call_specs = update_call_specs(previous_call_specs, call_specs)
setattr(f, "parametrize", new_call_specs)
return f
return inner
def update_call_specs(call_specs, new_specs):
if not call_specs:
call_specs = [{}]
combined_specs = []
for new_spec in new_specs:
for spec in call_specs:
spec = spec.copy()
spec.update(new_spec)
combined_specs.append(spec)
return combined_specs
def generate_session_signature(func, call_spec):
args = ["{}={}".format(k, repr(call_spec[k])) for k in sorted(call_spec.keys())]
return "({})".format(", ".join(args))
def generate_calls(func, call_specs):
calls = []
for call_spec in call_specs:
def make_call_wrapper(call_spec):
@functools.wraps(func)
def call_wrapper(*args, **kwargs):
kwargs.update(call_spec)
return func(*args, **kwargs)
return call_wrapper
call = make_call_wrapper(call_spec)
call.session_signature = generate_session_signature(func, call_spec)
call.call_spec = call_spec
calls.append(call)
return calls
|
the-stack_106_20321
|
############################################
#
# Author: Luca Cinquini
#
############################################
"""
Abstract
--------
The wps module of the OWSlib package provides client-side functionality for executing invocations to a remote Web Processing Server.
Disclaimer
----------
PLEASE NOTE: the owslib wps module should be considered in beta state: it has been tested versus only a handful of WPS services (deployed by the USGS, BADC and PML).
More extensive testing is needed and feedback is appreciated.
Usage
-----
The module can be used to execute three types of requests versus a remote WPS endpoint:
a) "GetCapabilities"
- use the method wps.getcapabilities(xml=None)
- the optional keyword argument "xml" may be used to avoid a real live request, and instead read the WPS capabilities document from a cached XML file
b) "DescribeProcess"
- use the method wps.describeprocess(identifier, xml=None)
- identifier is the process identifier, retrieved from the list obtained from a previous "GetCapabilities" invocation
- the optional keyword argument "xml" may be used to avoid a real live request, and instead read the WPS process description document from a cached XML file
c) "Execute"
- use the method wps.execute(identifier, inputs, output=None, request=None, response=None),
which submits the job to the remote WPS server and returns a WPSExecution object that can be used to periodically check the job status until completion
(or error)
- the optional keyword argument "request" may be used to avoid re-building the request XML from input arguments, and instead submit a request from a
pre-made XML file
- alternatively, an "Execute" request can be built from input arguments by supplying the "identifier", "inputs" and "output" arguments to the execute() method.
- "identifier" is the mandatory process identifier
- "inputs" is a dictionary of (key,value) pairs where:
- key is a named input parameter
- value is either a string, or any python object that supports a getXml() method
In particular, a few classes are included in the package to support a FeatuteCollection input:
- "WFSFeatureCollection" can be used in conjunction with "WFSQuery" to define a FEATURE_COLLECTION retrieved from a live WFS server.
- "GMLMultiPolygonFeatureCollection" can be used to define one or more polygons of (latitude, longitude) points.
- "output" is an optional output identifier to be included in the ResponseForm section of the request.
- the optional keyword argument "response" mey be used to avoid submitting a real live request, and instead reading the WPS execution response document
from a cached XML file (for debugging or testing purposes)
- the convenience module function monitorExecution() can be used to periodically check the status of a remote running job, and eventually download the output
either to a named file, or to a file specified by the server.
Examples
--------
The files examples/wps-usgs-script.py, examples/wps-pml-script-1.py and examples/wps-pml-script-2.py contain real-world usage examples
that submits a "GetCapabilities", "DescribeProcess" and "Execute" requests to the live USGS and PML servers. To run:
cd examples
python wps-usgs-script.py
python wps-pml-script-1.py
python wps-pml-script-2.py
The file wps-client.py contains a command-line client that can be used to submit a "GetCapabilities", "DescribeProcess" or "Execute"
request to an arbitratry WPS server. For example, you can run it as follows:
cd examples
To prints out usage and example invocations: wps-client -help
To execute a (fake) WPS invocation:
wps-client.py -v -u http://cida.usgs.gov/climate/gdp/process/WebProcessingService -r GetCapabilities -x ../tests/USGSCapabilities.xml
The directory tests/ includes several doctest-style files wps_*.txt that show how to interactively submit a
"GetCapabilities", "DescribeProcess" or "Execute" request, without making a live request but rather parsing the response of cached XML response documents. To run:
cd tests
python -m doctest wps_*.txt
(or python -m doctest -v wps_*.txt for verbose output)
Also, the directory tests/ contains several examples of well-formed "Execute" requests:
- The files wps_USGSExecuteRequest*.xml contain requests that can be submitted to the live USGS WPS service.
- The files PMLExecuteRequest*.xml contain requests that can be submitted to the live PML WPS service.
"""
from __future__ import (absolute_import, division, print_function)
from owslib.etree import etree
from owslib.ows import DEFAULT_OWS_NAMESPACE, ServiceIdentification, ServiceProvider, OperationsMetadata
from time import sleep
from owslib.util import (testXMLValue, build_get_url, dump, getTypedValue,
getNamespace, element_to_string, nspath, openURL, nspath_eval, log)
from xml.dom.minidom import parseString
from owslib.namespaces import Namespaces
try: # Python 3
from urllib.parse import urlparse
except ImportError: # Python 2
from urlparse import urlparse
# namespace definition
n = Namespaces()
# These static namespaces are DEPRECIATED. Please don't use them.
# No great way of printing a message since there are at the file level
WPS_DEFAULT_NAMESPACE = n.get_namespace("wps")
WFS_NAMESPACE = n.get_namespace("wfs")
OGC_NAMESPACE = n.get_namespace("ogc")
GML_NAMESPACE = n.get_namespace("gml")
DRAW_NAMESPACE = n.get_namespace("draw")
GML_SCHEMA_LOCATION = "http://schemas.opengis.net/gml/3.1.1/base/feature.xsd"
DRAW_SCHEMA_LOCATION = 'http://cida.usgs.gov/climate/derivative/xsd/draw.xsd'
WPS_DEFAULT_SCHEMA_LOCATION = 'http://schemas.opengis.net/wps/1.0.0/wpsExecute_request.xsd'
WPS_DEFAULT_VERSION = '1.0.0'
def get_namespaces():
ns = n.get_namespaces(["ogc","wfs","wps","gml","xsi","xlink"])
ns[None] = n.get_namespace("wps")
ns["ows"] = DEFAULT_OWS_NAMESPACE
return ns
namespaces = get_namespaces()
def is_reference(val):
"""
Checks if the provided value is a reference (URL).
"""
try:
parsed = urlparse(val)
is_ref = parsed.scheme != ''
except:
is_ref = False
return is_ref
def is_literaldata(val):
"""
Checks if the provided value is a string (includes unicode).
"""
is_str = isinstance(val, str)
if not is_str:
# on python 2.x we need to check unicode
try:
is_str = isinstance(val, unicode)
except:
# unicode is not available on python 3.x
is_str = False
return is_str
def is_complexdata(val):
"""
Checks if the provided value is an implementation of IComplexData.
"""
return hasattr(val, 'getXml')
class IComplexDataInput(object):
"""
Abstract interface representing complex input object for a WPS request.
"""
def getXml(self):
"""
Method that returns the object data as an XML snippet,
to be inserted into the WPS request document sent to the server.
"""
raise NotImplementedError
class WebProcessingService(object):
"""
Class that contains client-side functionality for invoking an OGC Web Processing Service (WPS).
Implements IWebProcessingService.
"""
def __init__(self, url, version=WPS_DEFAULT_VERSION, username=None, password=None, verbose=False, skip_caps=False):
"""
Initialization method resets the object status.
By default it will execute a GetCapabilities invocation to the remote service,
which can be skipped by using skip_caps=True.
"""
# fields passed in from object initializer
self.url = url
self.username = username
self.password = password
self.version = version
self.verbose = verbose
# fields populated by method invocations
self._capabilities = None
self.identification = None
self.provider = None
self.operations=[]
self.processes=[]
if not skip_caps:
self.getcapabilities()
def getcapabilities(self, xml=None):
"""
Method that requests a capabilities document from the remote WPS server and populates this object's metadata.
keyword argument xml: local XML GetCapabilities document, prevents actual HTTP invocation.
"""
# read capabilities document
reader = WPSCapabilitiesReader(version=self.version, verbose=self.verbose)
if xml:
# read from stored XML file
self._capabilities = reader.readFromString(xml)
else:
self._capabilities = reader.readFromUrl(self.url, username=self.username, password=self.password)
log.debug(element_to_string(self._capabilities))
# populate the capabilities metadata obects from the XML tree
self._parseCapabilitiesMetadata(self._capabilities)
def describeprocess(self, identifier, xml=None):
"""
Requests a process document from a WPS service and populates the process metadata.
Returns the process object.
"""
# read capabilities document
reader = WPSDescribeProcessReader(version=self.version, verbose=self.verbose)
if xml:
# read from stored XML file
rootElement = reader.readFromString(xml)
else:
# read from server
rootElement = reader.readFromUrl(self.url, identifier)
log.info(element_to_string(rootElement))
# build metadata objects
return self._parseProcessMetadata(rootElement)
def execute(self, identifier, inputs, output=None, request=None, response=None):
"""
Submits a WPS process execution request.
Returns a WPSExecution object, which can be used to monitor the status of the job, and ultimately retrieve the result.
identifier: the requested process identifier
inputs: list of process inputs as (key, value) tuples (where value is either a string for LiteralData, or an object for ComplexData)
output: optional identifier for process output reference (if not provided, output will be embedded in the response)
request: optional pre-built XML request document, prevents building of request from other arguments
response: optional pre-built XML response document, prevents submission of request to live WPS server
"""
# instantiate a WPSExecution object
log.info('Executing WPS request...')
execution = WPSExecution(version=self.version, url=self.url, username=self.username, password=self.password, verbose=self.verbose)
# build XML request from parameters
if request is None:
requestElement = execution.buildRequest(identifier, inputs, output)
request = etree.tostring( requestElement )
execution.request = request
log.debug(request)
# submit the request to the live server
if response is None:
response = execution.submitRequest(request)
else:
response = etree.fromstring(response)
log.debug(etree.tostring(response))
# parse response
execution.parseResponse(response)
return execution
def _parseProcessMetadata(self, rootElement):
"""
Method to parse a <ProcessDescriptions> XML element and returned the constructed Process object
"""
processDescriptionElement = rootElement.find( 'ProcessDescription' )
process = Process(processDescriptionElement, verbose=self.verbose)
# override existing processes in object metadata, if existing already
found = False
for n, p in enumerate(self.processes):
if p.identifier==process.identifier:
self.processes[n]=process
found = True
# otherwise add it
if not found:
self.processes.append(process)
return process
def _parseCapabilitiesMetadata(self, root):
''' Sets up capabilities metadata objects '''
# use the WPS namespace defined in the document root
wpsns = getNamespace(root)
# loop over children WITHOUT requiring a specific namespace
for element in root:
# thie element's namespace
ns = getNamespace(element)
# <ows:ServiceIdentification> metadata
if element.tag.endswith('ServiceIdentification'):
self.identification=ServiceIdentification(element, namespace=ns)
if self.verbose==True:
dump(self.identification)
# <ows:ServiceProvider> metadata
elif element.tag.endswith('ServiceProvider'):
self.provider=ServiceProvider(element, namespace=ns)
if self.verbose==True:
dump(self.provider)
# <ns0:OperationsMetadata xmlns:ns0="http://www.opengeospatial.net/ows">
# <ns0:Operation name="GetCapabilities">
# <ns0:DCP>
# <ns0:HTTP>
# <ns0:Get xlink:href="http://ceda-wps2.badc.rl.ac.uk/wps?" xmlns:xlink="http://www.w3.org/1999/xlink" />
# </ns0:HTTP>
# </ns0:DCP>
# </ns0:Operation>
# ........
# </ns0:OperationsMetadata>
elif element.tag.endswith('OperationsMetadata'):
for child in element.findall( nspath('Operation', ns=ns) ):
self.operations.append( OperationsMetadata(child, namespace=ns) )
if self.verbose==True:
dump(self.operations[-1])
# <wps:ProcessOfferings>
# <wps:Process ns0:processVersion="1.0.0">
# <ows:Identifier xmlns:ows="http://www.opengis.net/ows/1.1">gov.usgs.cida.gdp.wps.algorithm.filemanagement.ReceiveFiles</ows:Identifier>
# <ows:Title xmlns:ows="http://www.opengis.net/ows/1.1">gov.usgs.cida.gdp.wps.algorithm.filemanagement.ReceiveFiles</ows:Title>
# </wps:Process>
# ......
# </wps:ProcessOfferings>
elif element.tag.endswith('ProcessOfferings'):
for child in element.findall( nspath('Process', ns=ns) ):
p = Process(child, verbose=self.verbose)
self.processes.append(p)
if self.verbose==True:
dump(self.processes[-1])
class WPSReader(object):
"""
Superclass for reading a WPS document into a lxml.etree infoset.
"""
def __init__(self, version=WPS_DEFAULT_VERSION, verbose=False):
self.version = version
self.verbose = verbose
def _readFromUrl(self, url, data, method='Get', username=None, password=None):
"""
Method to get and parse a WPS document, returning an elementtree instance.
url: WPS service base url.
data: GET: dictionary of HTTP (key, value) parameter pairs, POST: XML document to post
username, password: optional user credentials
"""
if method == 'Get':
# full HTTP request url
request_url = build_get_url(url, data)
log.debug(request_url)
# split URL into base url and query string to use utility function
spliturl=request_url.split('?')
u = openURL(spliturl[0], spliturl[1], method='Get', username=username, password=password)
return etree.fromstring(u.read())
elif method == 'Post':
u = openURL(url, data, method='Post', username = username, password = password)
return etree.fromstring(u.read())
else:
raise Exception("Unrecognized HTTP method: %s" % method)
def readFromString(self, string):
"""
Method to read a WPS GetCapabilities document from an XML string.
"""
if not isinstance(string, str) and not isinstance(string, bytes):
raise ValueError("Input must be of type string, not %s" % type(string))
return etree.fromstring(string)
class WPSCapabilitiesReader(WPSReader):
"""
Utility class that reads and parses a WPS GetCapabilities document into a lxml.etree infoset.
"""
def __init__(self, version=WPS_DEFAULT_VERSION, verbose=False):
# superclass initializer
super(WPSCapabilitiesReader,self).__init__(version=version, verbose=verbose)
def readFromUrl(self, url, username=None, password=None):
"""
Method to get and parse a WPS capabilities document, returning an elementtree instance.
url: WPS service base url, to which is appended the HTTP parameters: service, version, and request.
username, password: optional user credentials
"""
return self._readFromUrl(url,
{'service':'WPS', 'request':'GetCapabilities', 'version':self.version},
username=username, password=password)
class WPSDescribeProcessReader(WPSReader):
"""
Class that reads and parses a WPS DescribeProcess document into a etree infoset
"""
def __init__(self, version=WPS_DEFAULT_VERSION, verbose=False):
# superclass initializer
super(WPSDescribeProcessReader,self).__init__(version=version, verbose=verbose)
def readFromUrl(self, url, identifier, username=None, password=None):
"""
Reads a WPS DescribeProcess document from a remote service and returns the XML etree object
url: WPS service base url, to which is appended the HTTP parameters: 'service', 'version', and 'request', and 'identifier'.
"""
return self._readFromUrl(url,
{'service':'WPS', 'request':'DescribeProcess', 'version':self.version, 'identifier':identifier},
username=username, password=password)
class WPSExecuteReader(WPSReader):
"""
Class that reads and parses a WPS Execute response document into a etree infoset
"""
def __init__(self, verbose=False):
# superclass initializer
super(WPSExecuteReader,self).__init__(verbose=verbose)
def readFromUrl(self, url, data={}, method='Get', username=None, password=None):
"""
Reads a WPS status document from a remote service and returns the XML etree object.
url: the URL to submit the GET/POST request to.
"""
return self._readFromUrl(url, data, method, username=username, password=password)
class WPSExecution():
"""
Class that represents a single WPS process executed on a remote WPS service.
"""
def __init__(self, version=WPS_DEFAULT_VERSION, url=None, username=None, password=None, verbose=False):
# initialize fields
self.url = url
self.version = version
self.username = username
self.password = password
self.verbose = verbose
# request document
self.request = None
# last response document
self.response = None
# status fields retrieved from the response documents
self.process = None
self.serviceInstance = None
self.status = None
self.percentCompleted = 0
self.statusMessage = None
self.errors = []
self.statusLocation = None
self.dataInputs=[]
self.processOutputs=[]
def buildRequest(self, identifier, inputs=[], output=None):
"""
Method to build a WPS process request.
identifier: the requested process identifier
inputs: array of input arguments for the process.
- LiteralData inputs are expressed as simple (key,value) tuples where key is the input identifier, value is the value
- ComplexData inputs are expressed as (key, object) tuples, where key is the input identifier,
and the object must contain a 'getXml()' method that returns an XML infoset to be included in the WPS request
output: optional identifier if process output is to be returned as a hyperlink reference
"""
#<wps:Execute xmlns:wps="http://www.opengis.net/wps/1.0.0"
# xmlns:ows="http://www.opengis.net/ows/1.1"
# xmlns:xlink="http://www.w3.org/1999/xlink"
# xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
# service="WPS"
# version="1.0.0"
# xsi:schemaLocation="http://www.opengis.net/wps/1.0.0 http://schemas.opengis.net/wps/1.0.0/wpsExecute_request.xsd">
root = etree.Element(nspath_eval('wps:Execute', namespaces))
root.set('service', 'WPS')
root.set('version', WPS_DEFAULT_VERSION)
root.set(nspath_eval('xsi:schemaLocation', namespaces), '%s %s' % (namespaces['wps'], WPS_DEFAULT_SCHEMA_LOCATION) )
# <ows:Identifier>gov.usgs.cida.gdp.wps.algorithm.FeatureWeightedGridStatisticsAlgorithm</ows:Identifier>
identifierElement = etree.SubElement(root, nspath_eval('ows:Identifier', namespaces))
identifierElement.text = identifier
# <wps:DataInputs>
dataInputsElement = etree.SubElement(root, nspath_eval('wps:DataInputs', namespaces))
for (key,val) in inputs:
inputElement = etree.SubElement(dataInputsElement, nspath_eval('wps:Input', namespaces))
identifierElement = etree.SubElement(inputElement, nspath_eval('ows:Identifier', namespaces))
identifierElement.text = key
# Literal data
# <wps:Input>
# <ows:Identifier>DATASET_URI</ows:Identifier>
# <wps:Data>
# <wps:LiteralData>dods://igsarm-cida-thredds1.er.usgs.gov:8080/thredds/dodsC/dcp/conus_grid.w_meta.ncml</wps:LiteralData>
# </wps:Data>
# </wps:Input>
if is_literaldata(val):
log.debug("literaldata %s", key)
dataElement = etree.SubElement(inputElement, nspath_eval('wps:Data', namespaces))
literalDataElement = etree.SubElement(dataElement, nspath_eval('wps:LiteralData', namespaces))
literalDataElement.text = val
# Complex data
# <wps:Input>
# <ows:Identifier>FEATURE_COLLECTION</ows:Identifier>
# <wps:Reference xlink:href="http://igsarm-cida-gdp2.er.usgs.gov:8082/geoserver/wfs">
# <wps:Body>
# <wfs:GetFeature xmlns:wfs="http://www.opengis.net/wfs" xmlns:ogc="http://www.opengis.net/ogc" xmlns:gml="http://www.opengis.net/gml" service="WFS" version="1.1.0" outputFormat="text/xml; subtype=gml/3.1.1" xsi:schemaLocation="http://www.opengis.net/wfs ../wfs/1.1.0/WFS.xsd">
# <wfs:Query typeName="sample:CONUS_States">
# <wfs:PropertyName>the_geom</wfs:PropertyName>
# <wfs:PropertyName>STATE</wfs:PropertyName>
# <ogc:Filter>
# <ogc:GmlObjectId gml:id="CONUS_States.508"/>
# </ogc:Filter>
# </wfs:Query>
# </wfs:GetFeature>
# </wps:Body>
# </wps:Reference>
# </wps:Input>
elif is_complexdata(val):
log.debug("complexdata %s", key)
inputElement.append( val.getXml() )
else:
raise Exception('input type of "%s" parameter is unknown' % key)
# <wps:ResponseForm>
# <wps:ResponseDocument storeExecuteResponse="true" status="true">
# <wps:Output asReference="true">
# <ows:Identifier>OUTPUT</ows:Identifier>
# </wps:Output>
# </wps:ResponseDocument>
# </wps:ResponseForm>
if output is not None:
responseFormElement = etree.SubElement(root, nspath_eval('wps:ResponseForm', namespaces))
responseDocumentElement = etree.SubElement(responseFormElement, nspath_eval('wps:ResponseDocument', namespaces),
attrib={'storeExecuteResponse':'true', 'status':'true'} )
if isinstance(output, str):
self._add_output(responseDocumentElement, output, asReference=True)
elif isinstance(output, list):
for (identifier,as_reference) in output:
self._add_output(responseDocumentElement, identifier, asReference=as_reference)
else:
raise Exception('output parameter is neither string nor list. output=%s' % output)
return root
def _add_output(self, element, identifier, asReference=False):
outputElement = etree.SubElement(element, nspath_eval('wps:Output', namespaces),
attrib={'asReference':str(asReference).lower()} )
outputIdentifierElement = etree.SubElement(outputElement, nspath_eval('ows:Identifier', namespaces)).text = identifier
# wait for 60 seconds by default
def checkStatus(self, url=None, response=None, sleepSecs=60):
"""
Method to check the status of a job execution.
In the process, this method will upadte the object 'response' attribute.
url: optional 'statusLocation' URL retrieved from a previous WPS Execute response document.
If not provided, the current 'statusLocation' URL will be used.
sleepSecs: number of seconds to sleep before returning control to the caller.
"""
reader = WPSExecuteReader(verbose=self.verbose)
if response is None:
# override status location
if url is not None:
self.statusLocation = url
log.info('\nChecking execution status... (location=%s)' % self.statusLocation)
response = reader.readFromUrl(self.statusLocation, username=self.username, password=self.password)
else:
response = reader.readFromString(response)
# store latest response
self.response = etree.tostring(response)
log.debug(self.response)
self.parseResponse(response)
# sleep given number of seconds
if self.isComplete()==False:
log.info('Sleeping %d seconds...' % sleepSecs)
sleep(sleepSecs)
def getStatus(self):
return self.status
def isComplete(self):
if (self.status=='ProcessSucceeded' or self.status=='ProcessFailed' or self.status=='Exception'):
return True
elif (self.status=='ProcessStarted'):
return False
elif (self.status=='ProcessAccepted' or self.status=='ProcessPaused'):
return False
else:
raise Exception('Unknown process execution status: %s' % self.status)
def isSucceded(self):
if self.status=='ProcessSucceeded':
return True
else:
return False
def isNotComplete(self):
return not self.isComplete()
def getOutput(self, filepath=None):
"""
Method to write the outputs of a WPS process to a file:
either retrieves the referenced files from the server, or writes out the content of response embedded output.
filepath: optional path to the output file, otherwise a file will be created in the local directory with the name assigned by the server,
or default name 'wps.out' for embedded output.
"""
if self.isSucceded():
content = ''
for output in self.processOutputs:
output_content = output.retrieveData(self.username, self.password)
# ExecuteResponse contains reference to server-side output
if output_content is not "":
content = content + output_content
if filepath is None:
filepath = output.fileName
# ExecuteResponse contain embedded output
if len(output.data)>0:
if filepath is None:
filepath = 'wps.out'
for data in output.data:
content = content + data
# write out content
if content is not '':
out = open(filepath, 'wb')
out.write(content)
out.close()
log.info('Output written to file: %s' %filepath)
else:
raise Exception("Execution not successfully completed: status=%s" % self.status)
def submitRequest(self, request):
"""
Submits a WPS Execute document to a remote service, returns the XML response document from the server.
This method will save the request document and the first returned response document.
request: the XML request document to be submitted as POST to the server.
"""
self.request = request
reader = WPSExecuteReader(verbose=self.verbose)
response = reader.readFromUrl(self.url, request, method='Post', username=self.username, password=self.password)
self.response = response
return response
'''
if response is None:
# override status location
if url is not None:
self.statusLocation = url
else:
response = reader.readFromString(response)
'''
def parseResponse(self, response):
"""
Method to parse a WPS response document
"""
rootTag = response.tag.split('}')[1]
# <ns0:ExecuteResponse>
if rootTag == 'ExecuteResponse':
self._parseExecuteResponse(response)
# <ows:ExceptionReport>
elif rootTag == 'ExceptionReport':
self._parseExceptionReport(response)
else:
log.debug('Unknown Response')
# log status, errors
log.info('Execution status=%s' % self.status)
log.info('Percent completed=%s' % self.percentCompleted)
log.info('Status message=%s' % self.statusMessage)
for error in self.errors:
dump(error)
def _parseExceptionReport(self, root):
"""
Method to parse a WPS ExceptionReport document and populate this object's metadata.
"""
# set exception status, unless set already
if self.status is None:
self.status = "Exception"
for exceptionEl in root.findall( nspath('Exception', ns=namespaces['ows']) ):
self.errors.append( WPSException(exceptionEl) )
def _parseExecuteResponse(self, root):
"""
Method to parse a WPS ExecuteResponse response document and populate this object's metadata.
"""
# retrieve WPS namespace directly from root element
wpsns = getNamespace(root)
self.serviceInstance = root.get( 'serviceInstance' )
if self.statusLocation is None:
self.statusLocation = root.get( 'statusLocation' )
# <ns0:Status creationTime="2011-11-09T14:19:50Z">
# <ns0:ProcessSucceeded>PyWPS Process v.net.path successfully calculated</ns0:ProcessSucceeded>
# </ns0:Status>
# OR
# <ns0:Status creationTime="2011-11-07T08:26:44.359-06:00">
# <ns0:ProcessFailed>
# <ows:ExceptionReport xmlns:ows="http://www.opengis.net/ows/1.1">
# <ows:Exception>
# <ows:ExceptionText>Attribute null not found in feature collection</ows:ExceptionText>
# </ows:Exception>
# </ows:ExceptionReport>
# </ns0:ProcessFailed>
# </ns0:Status>
statusEl = root.find( nspath('Status/*', ns=wpsns) )
self.status = statusEl.tag.split('}')[1]
# get progress info
try:
percentCompleted = int(statusEl.get('percentCompleted'))
self.percentCompleted = percentCompleted
except:
pass
# get status message
self.statusMessage = statusEl.text
# exceptions ?
for element in statusEl:
if element.tag.endswith('ExceptionReport'):
self._parseExceptionReport(element)
self.process = Process(root.find(nspath('Process', ns=wpsns)), verbose=self.verbose)
#<wps:DataInputs xmlns:wps="http://www.opengis.net/wps/1.0.0"
# xmlns:ows="http://www.opengis.net/ows/1.1" xmlns:xlink="http://www.w3.org/1999/xlink">
for inputElement in root.findall( nspath('DataInputs/Input', ns=wpsns) ):
self.dataInputs.append( Input(inputElement) )
if self.verbose==True:
dump(self.dataInputs[-1])
# <ns:ProcessOutputs>
# xmlns:ns="http://www.opengis.net/wps/1.0.0"
for outputElement in root.findall( nspath('ProcessOutputs/Output', ns=wpsns) ):
self.processOutputs.append( Output(outputElement) )
if self.verbose==True:
dump(self.processOutputs[-1])
class ComplexData(object):
"""
Class that represents a ComplexData element in a WPS document
"""
def __init__(self, mimeType=None, encoding=None, schema=None):
self.mimeType = mimeType
self.encoding = encoding
self.schema = schema
class InputOutput(object):
"""
Superclass of a WPS input or output data object.
"""
def __init__(self, element):
self.abstract = None
# loop over sub-elements without requiring a specific namespace
for subElement in element:
# <ows:Identifier xmlns:ows="http://www.opengis.net/ows/1.1">SUMMARIZE_TIMESTEP</ows:Identifier>
if subElement.tag.endswith('Identifier'):
self.identifier = testXMLValue( subElement )
# <ows:Title xmlns:ows="http://www.opengis.net/ows/1.1">Summarize Timestep</ows:Title>
elif subElement.tag.endswith('Title'):
self.title = testXMLValue( subElement )
# <ows:Abstract xmlns:ows="http://www.opengis.net/ows/1.1">If selected, processing output will include columns with summarized statistics for all feature attribute values for each timestep</ows:Abstract>
elif subElement.tag.endswith('Abstract'):
self.abstract = testXMLValue( subElement )
self.allowedValues = []
self.supportedValues = []
self.defaultValue = None
self.dataType = None
self.anyValue = False
def _parseData(self, element):
"""
Method to parse a "Data" element
"""
# <ns0:Data>
# <ns0:ComplexData mimeType="text/plain">
# 7504912.93758151 -764109.175074507,7750849.82379226 -22141.8611641468,8561828.42371234 -897195.923493867,7724946.16844165 -602984.014261927
# </ns0:ComplexData>
# </ns0:Data>
#nspath('Data', ns=WPS_NAMESPACE)
complexDataElement = element.find( nspath('ComplexData', ns=getNamespace(element)) )
if complexDataElement is not None:
self.dataType = "ComplexData"
def _parseLiteralData(self, element, literalElementName):
"""
Method to parse the LiteralData element.
"""
# <LiteralData>
# <ows:DataType ows:reference="xs:string" xmlns:ows="http://www.opengis.net/ows/1.1" />
# <ows:AllowedValues xmlns:ows="http://www.opengis.net/ows/1.1">
# <ows:Value>COMMA</ows:Value>
# <ows:Value>TAB</ows:Value>
# <ows:Value>SPACE</ows:Value>
# </ows:AllowedValues>
# <DefaultValue>COMMA</DefaultValue>
# </LiteralData>
# <LiteralData>
# <ows:DataType ows:reference="xs:anyURI" xmlns:ows="http://www.opengis.net/ows/1.1" />
# <ows:AnyValue xmlns:ows="http://www.opengis.net/ows/1.1" />
# </LiteralData>
literalDataElement = element.find( literalElementName )
if literalDataElement is not None:
self.dataType = 'LiteralData'
for subElement in literalDataElement:
subns = getNamespace(subElement)
if subElement.tag.endswith('DataType'):
self.dataType = subElement.get( nspath("reference", ns=subns) ).split(':')[1]
elif subElement.tag.endswith('AllowedValues'):
for value in subElement.findall( nspath('Value', ns=subns) ):
self.allowedValues.append( getTypedValue(self.dataType, value.text) )
elif subElement.tag.endswith('DefaultValue'):
self.defaultValue = getTypedValue(self.dataType, subElement.text)
elif subElement.tag.endswith('AnyValue'):
self.anyValue = True
def _parseComplexData(self, element, complexDataElementName):
"""
Method to parse a ComplexData or ComplexOutput element.
"""
# <ComplexData>
# <Default>
# <Format>
# <MimeType>text/xml</MimeType>
# <Encoding>UTF-8</Encoding>
# <Schema>http://schemas.opengis.net/gml/2.0.0/feature.xsd</Schema>
# </Format>
# </Default>
# <Supported>
# <Format>
# <MimeType>text/xml</MimeType>
# <Encoding>UTF-8</Encoding>
# <Schema>http://schemas.opengis.net/gml/2.0.0/feature.xsd</Schema>
# </Format>
# <Format>
# <MimeType>text/xml</MimeType>
# <Encoding>UTF-8</Encoding>
# <Schema>http://schemas.opengis.net/gml/2.1.1/feature.xsd</Schema>
# </Format>
# </Supported>
# </ComplexData>
# OR
# <ComplexOutput defaultEncoding="UTF-8" defaultFormat="text/XML" defaultSchema="NONE">
# <SupportedComplexData>
# <Format>text/XML</Format>
# <Encoding>UTF-8</Encoding>
# <Schema>NONE</Schema>
# </SupportedComplexData>
# </ComplexOutput>
complexDataElement = element.find( complexDataElementName )
if complexDataElement is not None:
self.dataType = "ComplexData"
for supportedComlexDataElement in complexDataElement.findall( 'SupportedComplexData' ):
self.supportedValues.append( ComplexData( mimeType=testXMLValue( supportedComlexDataElement.find( 'Format' ) ),
encoding=testXMLValue( supportedComlexDataElement.find( 'Encoding' ) ),
schema=testXMLValue( supportedComlexDataElement.find( 'Schema' ) )
)
)
for formatElement in complexDataElement.findall( 'Supported/Format'):
self.supportedValues.append( ComplexData( mimeType=testXMLValue( formatElement.find( 'MimeType' ) ),
encoding=testXMLValue( formatElement.find( 'Encoding' ) ),
schema=testXMLValue( formatElement.find( 'Schema' ) )
)
)
defaultFormatElement = complexDataElement.find( 'Default/Format' )
if defaultFormatElement is not None:
self.defaultValue = ComplexData( mimeType=testXMLValue( defaultFormatElement.find( 'MimeType' ) ),
encoding=testXMLValue( defaultFormatElement.find( 'Encoding' ) ),
schema=testXMLValue( defaultFormatElement.find( 'Schema' ) )
)
class Input(InputOutput):
"""
Class that represents a WPS process input.
"""
def __init__(self, inputElement):
# superclass initializer
super(Input,self).__init__(inputElement)
# <Input maxOccurs="1" minOccurs="0">
# OR
# <MinimumOccurs>1</MinimumOccurs>
self.minOccurs = -1
if inputElement.get("minOccurs") is not None:
self.minOccurs = int( inputElement.get("minOccurs") )
if inputElement.find('MinimumOccurs') is not None:
self.minOccurs = int( testXMLValue( inputElement.find('MinimumOccurs') ) )
self.maxOccurs = -1
if inputElement.get("maxOccurs") is not None:
self.maxOccurs = int( inputElement.get("maxOccurs") )
if inputElement.find('MaximumOccurs') is not None:
self.maxOccurs = int( testXMLValue( inputElement.find('MaximumOccurs') ) )
# <LiteralData>
self._parseLiteralData(inputElement, 'LiteralData')
# <ComplexData>
self._parseComplexData(inputElement, 'ComplexData')
class Output(InputOutput):
"""
Class that represents a WPS process output.
"""
def __init__(self, outputElement):
# superclass initializer
super(Output,self).__init__(outputElement)
self.reference = None
self.mimeType = None
self.data = []
self.fileName = None
self.filePath = None
# extract wps namespace from outputElement itself
wpsns = getNamespace(outputElement)
# <ns:Reference encoding="UTF-8" mimeType="text/csv"
# href="http://cida.usgs.gov/climate/gdp/process/RetrieveResultServlet?id=1318528582026OUTPUT.601bb3d0-547f-4eab-8642-7c7d2834459e" />
referenceElement = outputElement.find( nspath('Reference', ns=wpsns) )
if referenceElement is not None:
self.reference = referenceElement.get('href')
self.mimeType = referenceElement.get('mimeType')
# <LiteralOutput>
self._parseLiteralData(outputElement, 'LiteralOutput')
# <ComplexData> or <ComplexOutput>
self._parseComplexData(outputElement, 'ComplexOutput')
# <Data>
# <ns0:Data>
# <ns0:ComplexData mimeType="text/plain">
# 7504912.93758151 -764109.175074507,7750849.82379226 -22141.8611641468,8561828.42371234 -897195.923493867,7724946.16844165 -602984.014261927
# </ns0:ComplexData>
# </ns0:Data>
# OR:
# <ns0:Data>
# <ns0:ComplexData encoding="UTF-8" mimeType="text/xml" schema="http://schemas.opengis.net/gml/2.1.2/feature.xsd">
# <ns3:FeatureCollection xsi:schemaLocation="http://ogr.maptools.org/ output_0n7ij9D.xsd" xmlns:ns3="http://ogr.maptools.org/">
# <gml:boundedBy xmlns:gml="http://www.opengis.net/gml">
# <gml:Box>
# <gml:coord><gml:X>-960123.1421801626</gml:X><gml:Y>4665723.56559387</gml:Y></gml:coord>
# <gml:coord><gml:X>-101288.6510608822</gml:X><gml:Y>5108200.011823481</gml:Y></gml:coord>
# </gml:Box>
# </gml:boundedBy>
# <gml:featureMember xmlns:gml="http://www.opengis.net/gml">
# <ns3:output fid="F0">
# <ns3:geometryProperty><gml:LineString><gml:coordinates>-960123.142180162365548,4665723.565593870356679,0 -960123.142180162365548,4665723.565593870356679,0 -960123.142180162598379,4665723.565593870356679,0 -960123.142180162598379,4665723.565593870356679,0 -711230.141176006174646,4710278.48552671354264,0 -711230.141176006174646,4710278.48552671354264,0 -623656.677859728806652,4848552.374973464757204,0 -623656.677859728806652,4848552.374973464757204,0 -410100.337491964863148,4923834.82589447684586,0 -410100.337491964863148,4923834.82589447684586,0 -101288.651060882242746,5108200.011823480948806,0 -101288.651060882242746,5108200.011823480948806,0 -101288.651060882257298,5108200.011823480948806,0 -101288.651060882257298,5108200.011823480948806,0</gml:coordinates></gml:LineString></ns3:geometryProperty>
# <ns3:cat>1</ns3:cat>
# <ns3:id>1</ns3:id>
# <ns3:fcat>0</ns3:fcat>
# <ns3:tcat>0</ns3:tcat>
# <ns3:sp>0</ns3:sp>
# <ns3:cost>1002619.181</ns3:cost>
# <ns3:fdist>0</ns3:fdist>
# <ns3:tdist>0</ns3:tdist>
# </ns3:output>
# </gml:featureMember>
# </ns3:FeatureCollection>
# </ns0:ComplexData>
# </ns0:Data>
dataElement = outputElement.find( nspath('Data', ns=wpsns) )
if dataElement is not None:
complexDataElement = dataElement.find( nspath('ComplexData', ns=wpsns) )
if complexDataElement is not None:
self.dataType = "ComplexData"
self.mimeType = complexDataElement.get('mimeType')
if complexDataElement.text is not None and complexDataElement.text.strip() is not '':
self.data.append(complexDataElement.text.strip())
for child in complexDataElement:
self.data.append(etree.tostring(child))
literalDataElement = dataElement.find( nspath('LiteralData', ns=wpsns) )
if literalDataElement is not None:
self.dataType = literalDataElement.get('dataType')
if literalDataElement.text is not None and literalDataElement.text.strip() is not '':
self.data.append(literalDataElement.text.strip())
def retrieveData(self, username=None, password=None):
"""
Method to retrieve data from server-side reference:
returns "" if the reference is not known.
username, password: credentials to access the remote WPS server
"""
url = self.reference
if url is None:
return ""
# a) 'http://cida.usgs.gov/climate/gdp/process/RetrieveResultServlet?id=1318528582026OUTPUT.601bb3d0-547f-4eab-8642-7c7d2834459e'
# b) 'http://rsg.pml.ac.uk/wps/wpsoutputs/outputImage-11294Bd6l2a.tif'
log.info('Output URL=%s' % url)
if '?' in url:
spliturl=url.split('?')
u = openURL(spliturl[0], spliturl[1], method='Get', username = username, password = password)
# extract output filepath from URL query string
self.fileName = spliturl[1].split('=')[1]
else:
u = openURL(url, '', method='Get', username = username, password = password)
# extract output filepath from base URL
self.fileName = url.split('/')[-1]
return u.read()
def writeToDisk(self, path=None, username=None, password=None):
"""
Method to write an output of a WPS process to disk:
it either retrieves the referenced file from the server, or write out the content of response embedded output.
filepath: optional path to the output file, otherwise a file will be created in the local directory with the name assigned by the server,
username, password: credentials to access the remote WPS server
"""
# Check if ExecuteResponse contains reference to server-side output
content = self.retrieveData(username, password)
# ExecuteResponse contain embedded output
if content is "" and len(self.data)>0:
self.fileName = self.identifier
for data in self.data:
content = content + data
# write out content
if content is not "":
if self.fileName == "":
self.fileName = self.identifier
self.filePath = path + self.fileName
out = open(self.filePath, 'wb')
out.write(content)
out.close()
log.info('Output written to file: %s' %self.filePath)
class WPSException:
"""
Class representing an exception raised by a WPS.
"""
def __init__(self, root):
self.code = root.attrib.get("exceptionCode", None)
self.locator = root.attrib.get("locator", None)
textEl = root.find( nspath('ExceptionText', ns=getNamespace(root)) )
if textEl is not None:
self.text = textEl.text
else:
self.text = ""
class Process(object):
"""
Class that represents a WPS process.
"""
def __init__(self, elem, verbose=False):
""" Initialization method extracts all available metadata from an XML document (passed in as etree object) """
# <ns0:ProcessDescriptions service="WPS" version="1.0.0"
# xsi:schemaLocation="http://www.opengis.net/wps/1.0.0 http://schemas.opengis.net/wps/1.0.0/wpsDescribeProcess_response.xsd"
# xml:lang="en-US" xmlns:ns0="http://www.opengis.net/wps/1.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
# OR:
# <ns0:Process ns0:processVersion="1.0.0">
self._root = elem
self.verbose = verbose
wpsns = getNamespace(elem)
# <ProcessDescription statusSupported="true" storeSupported="true" ns0:processVersion="1.0.0">
self.processVersion = elem.get( nspath('processVersion', ns=wpsns) )
self.statusSupported = bool( elem.get( "statusSupported" ) )
self.storeSupported = bool( elem.get( "storeSupported" ) )
self.abstract = None
for child in elem:
# this element's namespace
ns = getNamespace(child)
# <ows:Identifier xmlns:ows="http://www.opengis.net/ows/1.1">gov.usgs.cida.gdp.wps.algorithm.FeatureWeightedGridStatisticsAlgorithm</ows:Identifier>
if child.tag.endswith('Identifier'):
self.identifier = testXMLValue( child )
# <ows:Title xmlns:ows="http://www.opengis.net/ows/1.1">Feature Weighted Grid Statistics</ows:Title>
elif child.tag.endswith('Title'):
self.title = testXMLValue( child )
# <ows:Abstract xmlns:ows="http://www.opengis.net/ows/1.1">This algorithm generates area weighted statistics of a gridded dataset for a set of vector polygon features. Using the bounding-box that encloses the feature data and the time range, if provided, a subset of the gridded dataset is requested from the remote gridded data server. Polygon representations are generated for cells in the retrieved grid. The polygon grid-cell representations are then projected to the feature data coordinate reference system. The grid-cells are used to calculate per grid-cell feature coverage fractions. Area-weighted statistics are then calculated for each feature using the grid values and fractions as weights. If the gridded dataset has a time range the last step is repeated for each time step within the time range or all time steps if a time range was not supplied.</ows:Abstract>
elif child.tag.endswith('Abstract'):
self.abstract = testXMLValue( child )
if self.verbose==True:
dump(self)
# <DataInputs>
self.dataInputs = []
for inputElement in elem.findall( 'DataInputs/Input' ):
self.dataInputs.append( Input(inputElement) )
if self.verbose==True:
dump(self.dataInputs[-1], prefix='\tInput: ')
# <ProcessOutputs>
self.processOutputs = []
for outputElement in elem.findall( 'ProcessOutputs/Output' ):
self.processOutputs.append( Output(outputElement) )
if self.verbose==True:
dump(self.processOutputs[-1], prefix='\tOutput: ')
class ComplexDataInput(IComplexDataInput, ComplexData):
def __init__(self, value, mimeType=None, encoding=None, schema=None):
super(ComplexDataInput, self).__init__(mimeType=mimeType, encoding=encoding, schema=schema)
self.value = value
def getXml(self):
if is_reference(self.value):
return self.complexDataAsReference()
else:
return self.complexDataRaw()
def complexDataAsReference(self):
"""
<wps:Reference xlink:href="http://somewhere/test.xml"/>
"""
refElement = etree.Element(nspath_eval('wps:Reference', namespaces),
attrib = { nspath_eval("xlink:href",namespaces) : self.value} )
return refElement
def complexDataRaw(self):
'''
<wps:Data>
<wps:ComplexData mimeType="text/xml" encoding="UTF-8"
schema="http://schemas.opengis.net/gml/3.1.1/base/feature.xsd">
</wps:ComplexData>
</wps:Data>
'''
dataElement = etree.Element(nspath_eval('wps:Data', namespaces))
attrib = dict()
if self.encoding:
attrib['encoding'] = self.encoding
if self.schema:
attrib['schema'] = self.schema
if self.mimeType:
attrib['mimeType'] = self.mimeType
complexDataElement = etree.SubElement(dataElement, nspath_eval('wps:ComplexData', namespaces), attrib=attrib )
complexDataElement.text = self.value
return dataElement
class FeatureCollection(IComplexDataInput):
'''
Base class to represent a Feature Collection used as input to a WPS request.
The method getXml() is invoked by the WPS execute() method to build the WPS request.
All subclasses must implement the getXml() method to provide their specific XML.
Implements IComplexDataInput.
'''
def __init__(self):
pass
def getXml(self):
raise NotImplementedError
class WFSFeatureCollection(FeatureCollection):
'''
FeatureCollection specified by a WFS query.
All subclasses must implement the getQuery() method to provide the specific query portion of the XML.
'''
def __init__(self, wfsUrl, wfsQuery):
'''
wfsUrl: the WFS service URL
example: wfsUrl = "http://igsarm-cida-gdp2.er.usgs.gov:8082/geoserver/wfs"
wfsQuery : a WFS query instance
'''
self.url = wfsUrl
self.query = wfsQuery
# <wps:Reference xlink:href="http://igsarm-cida-gdp2.er.usgs.gov:8082/geoserver/wfs">
# <wps:Body>
# <wfs:GetFeature xmlns:wfs="http://www.opengis.net/wfs" xmlns:ogc="http://www.opengis.net/ogc" xmlns:gml="http://www.opengis.net/gml" service="WFS" version="1.1.0" outputFormat="text/xml; subtype=gml/3.1.1" xsi:schemaLocation="http://www.opengis.net/wfs ../wfs/1.1.0/WFS.xsd">
# .......
# </wfs:GetFeature>
# </wps:Body>
# </wps:Reference>
def getXml(self):
root = etree.Element(nspath_eval('wps:Reference', namespaces), attrib = { nspath_eval("xlink:href",namespaces) : self.url} )
bodyElement = etree.SubElement(root, nspath_eval('wps:Body', namespaces))
getFeatureElement = etree.SubElement(bodyElement, nspath_eval('wfs:GetFeature', namespaces),
attrib = { "service":"WFS",
"version":"1.1.0",
"outputFormat":"text/xml; subtype=gml/3.1.1",
nspath_eval("xsi:schemaLocation",namespaces):"%s %s" % (namespaces['wfs'], '../wfs/1.1.0/WFS.xsd')})
# <wfs:Query typeName="sample:CONUS_States">
# <wfs:PropertyName>the_geom</wfs:PropertyName>
# <wfs:PropertyName>STATE</wfs:PropertyName>
# <ogc:Filter>
# <ogc:GmlObjectId gml:id="CONUS_States.508"/>
# </ogc:Filter>
# </wfs:Query>
getFeatureElement.append( self.query.getXml() )
return root
class WFSQuery(IComplexDataInput):
'''
Class representing a WFS query, for insertion into a WFSFeatureCollection instance.
Implements IComplexDataInput.
'''
def __init__(self, typeName, propertyNames=[], filters=[]):
self.typeName = typeName
self.propertyNames = propertyNames
self.filters = filters
def getXml(self):
# <wfs:Query typeName="sample:CONUS_States">
# <wfs:PropertyName>the_geom</wfs:PropertyName>
# <wfs:PropertyName>STATE</wfs:PropertyName>
# <ogc:Filter>
# <ogc:GmlObjectId gml:id="CONUS_States.508"/>
# </ogc:Filter>
# </wfs:Query>
queryElement = etree.Element(nspath_eval('wfs:Query', namespaces), attrib = { "typeName":self.typeName })
for propertyName in self.propertyNames:
propertyNameElement = etree.SubElement(queryElement, nspath_eval('wfs:PropertyName', namespaces))
propertyNameElement.text = propertyName
if len(self.filters)>0:
filterElement = etree.SubElement(queryElement, nspath_eval('ogc:Filter', namespaces))
for filter in self.filters:
gmlObjectIdElement = etree.SubElement(filterElement, nspath_eval('ogc:GmlObjectId', namespaces),
attrib={nspath_eval('gml:id', namespaces):filter})
return queryElement
class GMLMultiPolygonFeatureCollection(FeatureCollection):
'''
Class that represents a FeatureCollection defined as a GML multi-polygon.
'''
def __init__(self, polygons):
'''
Initializer accepts an array of polygons, where each polygon is an array of (lat,lon) tuples.
Example: polygons = [ [(-102.8184, 39.5273), (-102.8184, 37.418), (-101.2363, 37.418), (-101.2363, 39.5273), (-102.8184, 39.5273)],
[(-92.8184, 39.5273), (-92.8184, 37.418), (-91.2363, 37.418), (-91.2363, 39.5273), (-92.8184, 39.5273)] ]
'''
self.polygons = polygons
def getXml(self):
'''
<wps:Data>
<wps:ComplexData mimeType="text/xml" encoding="UTF-8"
schema="http://schemas.opengis.net/gml/3.1.1/base/feature.xsd">
<gml:featureMembers xmlns:ogc="http://www.opengis.net/ogc"
xmlns:draw="gov.usgs.cida.gdp.draw" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:ows="http://www.opengis.net/ows" xmlns:gml="http://www.opengis.net/gml"
xmlns:xlink="http://www.w3.org/1999/xlink"
xsi:schemaLocation="gov.usgs.cida.gdp.draw http://cida.usgs.gov/climate/derivative/xsd/draw.xsd">
<gml:box gml:id="box.1">
<gml:the_geom>
<gml:MultiPolygon srsDimension="2"
srsName="http://www.opengis.net/gml/srs/epsg.xml#4326">
<gml:polygonMember>
<gml:Polygon>
<gml:exterior>
<gml:LinearRing>
<gml:posList>-102.8184 39.5273 -102.8184 37.418 -101.2363 37.418 -101.2363 39.5273 -102.8184 39.5273</gml:posList>
</gml:LinearRing>
</gml:exterior>
</gml:Polygon>
</gml:polygonMember>
</gml:MultiPolygon>
</gml:the_geom>
<gml:ID>0</gml:ID>
</gml:box>
</gml:featureMembers>
</wps:ComplexData>
</wps:Data>
'''
dataElement = etree.Element(nspath_eval('wps:Data', namespaces))
complexDataElement = etree.SubElement(dataElement, nspath_eval('wps:ComplexData', namespaces),
attrib={"mimeType":"text/xml", "encoding":"UTF-8", "schema":GML_SCHEMA_LOCATION} )
featureMembersElement = etree.SubElement(complexDataElement, nspath_eval('gml:featureMembers', namespaces),
attrib={ nspath_eval("xsi:schemaLocation",namespaces):"%s %s" % (DRAW_NAMESPACE, DRAW_SCHEMA_LOCATION)})
boxElement = etree.SubElement(featureMembersElement, nspath_eval('gml:box', namespaces), attrib={ nspath_eval("gml:id",namespaces):"box.1" })
geomElement = etree.SubElement(boxElement, nspath_eval('gml:the_geom', namespaces))
multiPolygonElement = etree.SubElement(geomElement, nspath_eval('gml:MultiPolygon', namespaces),
attrib={"srsDimension":"2", "srsName":"http://www.opengis.net/gml/srs/epsg.xml#4326"} )
for polygon in self.polygons:
polygonMemberElement = etree.SubElement(multiPolygonElement, nspath_eval('gml:polygonMember', namespaces))
polygonElement = etree.SubElement(polygonMemberElement, nspath_eval('gml:Polygon', namespaces))
exteriorElement = etree.SubElement(polygonElement, nspath_eval('gml:exterior', namespaces))
linearRingElement = etree.SubElement(exteriorElement, nspath_eval('gml:LinearRing', namespaces))
posListElement = etree.SubElement(linearRingElement, nspath_eval('gml:posList', namespaces))
posListElement.text = ' '.join(["%s %s" % (x, y) for x, y in polygon[:] ])
idElement = etree.SubElement(boxElement, nspath_eval('gml:ID', namespaces))
idElement.text = "0"
return dataElement
def monitorExecution(execution, sleepSecs=3, download=False, filepath=None):
'''
Convenience method to monitor the status of a WPS execution till it completes (succesfully or not),
and write the output to file after a succesfull job completion.
execution: WPSExecution instance
sleepSecs: number of seconds to sleep in between check status invocations
download: True to download the output when the process terminates, False otherwise
filepath: optional path to output file (if downloaded=True), otherwise filepath will be inferred from response document
'''
while execution.isComplete()==False:
execution.checkStatus(sleepSecs=sleepSecs)
log.info('Execution status: %s' % execution.status)
if execution.isSucceded():
if download:
execution.getOutput(filepath=filepath)
else:
for output in execution.processOutputs:
if output.reference is not None:
log.info('Output URL=%s' % output.reference)
else:
for ex in execution.errors:
log.error('Error: code=%s, locator=%s, text=%s' % (ex.code, ex.locator, ex.text))
def printValue(value):
'''
Utility method to format a value for printing.
'''
# ComplexData type
if isinstance(value, ComplexData):
return "mimeType=%s, encoding=%s, schema=%s" % (value.mimeType, value.encoding, value.schema)
# other type
else:
return value
def printInputOutput(value, indent=''):
'''
Utility method to inspect an input/output element.
'''
# InputOutput fields
print('%s identifier=%s, title=%s, abstract=%s, data type=%s' % (indent, value.identifier, value.title, value.abstract, value.dataType))
for val in value.allowedValues:
print('%s Allowed Value: %s' % (indent, printValue(val)))
if value.anyValue:
print(' Any value allowed')
for val in value.supportedValues:
print('%s Supported Value: %s' % (indent, printValue(val)))
print('%s Default Value: %s ' % (indent, printValue(value.defaultValue)))
# Input fields
if isinstance(value, Input):
print('%s minOccurs=%d, maxOccurs=%d' % (indent, value.minOccurs, value.maxOccurs))
# Output fields
if isinstance(value, Output):
print('%s reference=%s, mimeType=%s' % (indent, value.reference, value.mimeType))
for datum in value.data:
print('%s Data Value: %s' % (indent, printValue(datum)))
|
the-stack_106_20322
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.md', which is part of this source code package.
#
from kubernetes_py.utils import is_valid_string
class SubresourceReference(object):
"""
https://kubernetes.io/docs/api-reference/extensions/v1beta1/definitions/#_v1beta1_subresourcereference
"""
def __init__(self, model=None):
super(SubresourceReference, self).__init__()
self._kind = None
self._name = None
self._api_version = None
self._subresource = None
if model is not None:
self._build_with_model(model)
def _build_with_model(self, model=None):
if "kind" in model:
self.kind = model["kind"]
if "name" in model:
self.name = model["name"]
if "apiVersion" in model:
self.api_version = model["apiVersion"]
if "subresource" in model:
self.subresource = model["subresource"]
# ------------------------------------------------------------------------------------- kind
@property
def kind(self):
return self._kind
@kind.setter
def kind(self, k=None):
if not is_valid_string(k):
raise SyntaxError("SubresourceReference: kind: [ {} ] is invalid.".format(k))
self._kind = k
# ------------------------------------------------------------------------------------- name
@property
def name(self):
return self._name
@name.setter
def name(self, n=None):
if not is_valid_string(n):
raise SyntaxError("SubresourceReference: name: [ {} ] is invalid.".format(n))
self._name = n
# ------------------------------------------------------------------------------------- apiVersion
@property
def api_version(self):
return self._api_version
@api_version.setter
def api_version(self, v=None):
if not is_valid_string(v):
raise SyntaxError("SubresourceReference: api_version: [ {} ] is invalid.".format(v))
self._api_version = v
# ------------------------------------------------------------------------------------- subresource
@property
def subresource(self):
return self._subresource
@subresource.setter
def subresource(self, s=None):
if not is_valid_string(s):
raise SyntaxError("SubresourceReference: subresource: [ {} ] is invalid.".format(s))
self._subresource = s
# ------------------------------------------------------------------------------------- serialize
def serialize(self):
data = {}
if self.kind is not None:
data["kind"] = self.kind
if self.name is not None:
data["name"] = self.name
if self.api_version is not None:
data["apiVersion"] = self.api_version
if self.subresource is not None:
data["subresource"] = self.subresource
return data
|
the-stack_106_20324
|
import matplotlib.pyplot as plt
import numpy as np
def show_inline(image, title=''):
f, ax = plt.subplots(1, 1, figsize=(10,10))
ax.grid(False)
ax.set_xticks([])
ax.set_yticks([])
ax.imshow(image)
ax.set_title(title)
plt.show()
def get_patches(out, k, patch_size=36, random=False):
image = out['image']
graph = out['graph']
if random:
importance_scores = np.random.uniform(size=out['importance_scores'].size)
else:
importance_scores = out['importance_scores']
image = np.pad(image,
((patch_size, patch_size), (patch_size, patch_size), (0, 0)),
mode="constant",
constant_values=255)
important_indices = (-importance_scores).argsort()[:k]
important_centroids = graph.ndata['centroid'][important_indices, :].cpu().numpy().astype(int)
patches = []
for i in range(k):
x, y = important_centroids[i] + patch_size
patch = image[y - int(patch_size / 2): y + int(patch_size / 2),
x - int(patch_size / 2): x + int(patch_size / 2),
:]
patches.append(patch)
return patches
def plot_patches(patches, ncol=5, patch_size=36):
nrow = len(patches) // ncol
patches = np.stack(patches, axis=0)
patches = np.reshape(patches, newshape=(nrow, ncol, patch_size, patch_size, 3))
for i in range(nrow):
for j in range(ncol):
if j == 0:
grid_ = patches[i, j]
else:
grid_ = np.hstack((grid_, patches[i, j]))
if i == 0:
grid = grid_
else:
grid = np.vstack((grid, grid_))
show_inline(grid)
|
the-stack_106_20325
|
#!/usr/bin/env python3
import sqlite3
import sys
from typing import List
from time import time
from clvm_rs import run_generator
from clvm import KEYWORD_FROM_ATOM, KEYWORD_TO_ATOM
from clvm.casts import int_from_bytes
from clvm.operators import OP_REWRITE
from chia.types.full_block import FullBlock
from chia.types.blockchain_format.program import Program
from chia.consensus.default_constants import DEFAULT_CONSTANTS
from chia.wallet.puzzles.rom_bootstrap_generator import get_generator
from chia.types.condition_opcodes import ConditionOpcode
GENERATOR_ROM = bytes(get_generator())
native_opcode_names_by_opcode = dict(
("op_%s" % OP_REWRITE.get(k, k), op) for op, k in KEYWORD_FROM_ATOM.items() if k not in "qa."
)
def run_gen(env_data: bytes, block_program_args: bytes):
max_cost = DEFAULT_CONSTANTS.MAX_BLOCK_COST_CLVM
cost_per_byte = DEFAULT_CONSTANTS.COST_PER_BYTE
# we don't charge for the size of the generator ROM. However, we do charge
# cost for the operations it executes
max_cost -= len(env_data) * cost_per_byte
env_data = b"\xff" + env_data + b"\xff" + block_program_args + b"\x80"
try:
return run_generator(
GENERATOR_ROM,
env_data,
KEYWORD_TO_ATOM["q"][0],
KEYWORD_TO_ATOM["a"][0],
native_opcode_names_by_opcode,
max_cost,
0,
)
except Exception as e:
# GENERATOR_RUNTIME_ERROR
print(f"Exception: {e}")
return (117, [], None)
cond_map = {
ConditionOpcode.AGG_SIG_UNSAFE[0]: 0,
ConditionOpcode.AGG_SIG_ME[0]: 1,
ConditionOpcode.CREATE_COIN[0]: 2,
ConditionOpcode.RESERVE_FEE[0]: 3,
ConditionOpcode.CREATE_COIN_ANNOUNCEMENT[0]: 4,
ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT[0]: 5,
ConditionOpcode.CREATE_PUZZLE_ANNOUNCEMENT[0]: 6,
ConditionOpcode.ASSERT_PUZZLE_ANNOUNCEMENT[0]: 7,
ConditionOpcode.ASSERT_MY_COIN_ID[0]: 8,
ConditionOpcode.ASSERT_MY_PARENT_ID[0]: 9,
ConditionOpcode.ASSERT_MY_PUZZLEHASH[0]: 10,
ConditionOpcode.ASSERT_MY_AMOUNT[0]: 11,
ConditionOpcode.ASSERT_SECONDS_RELATIVE[0]: 12,
ConditionOpcode.ASSERT_SECONDS_ABSOLUTE[0]: 13,
ConditionOpcode.ASSERT_HEIGHT_RELATIVE[0]: 14,
ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE[0]: 15,
}
c = sqlite3.connect(sys.argv[1])
rows = c.execute("SELECT header_hash, height, block FROM full_blocks ORDER BY height")
height_to_hash: List[bytes] = []
for r in rows:
hh = bytes.fromhex(r[0])
height = r[1]
block = FullBlock.from_bytes(r[2])
if len(height_to_hash) <= height:
assert len(height_to_hash) == height
height_to_hash.append(hh)
else:
height_to_hash[height] = hh
if height > 0:
prev_hh = block.prev_header_hash
h = height - 1
while height_to_hash[h] != prev_hh:
height_to_hash[h] = prev_hh
ref = c.execute("SELECT block FROM full_blocks WHERE header_hash=?", (prev_hh.hex(),))
ref_block = FullBlock.from_bytes(ref.fetchone()[0])
prev_hh = ref_block.prev_header_hash
h -= 1
if h < 0:
break
if block.transactions_generator is None:
continue
# add the block program arguments
block_program_args = bytearray(b"\xff")
num_refs = 0
for h in block.transactions_generator_ref_list:
ref = c.execute("SELECT block FROM full_blocks WHERE header_hash=?", (height_to_hash[h].hex(),))
ref_block = FullBlock.from_bytes(ref.fetchone()[0])
block_program_args += b"\xff"
block_program_args += Program.to(bytes(ref_block.transactions_generator)).as_bin()
num_refs += 1
ref.close()
block_program_args += b"\x80\x80"
start_time = time()
err, result, cost = run_gen(bytes(block.transactions_generator), bytes(block_program_args))
run_time = time() - start_time
if err is not None:
print(f"ERROR: {hh.hex()} {height} {err}")
break
num_removals = 0
fees = 0
conditions = [0] * 16
for res in result:
num_removals += 1
for cond in res.conditions:
for cwa in cond[1]:
if cwa.opcode == ConditionOpcode.RESERVE_FEE[0]:
fees += int_from_bytes(cwa.vars[0])
conditions[cond_map[cwa.opcode]] += 1
print(
f"{hh.hex()}\t{height}\t{cost}\t{run_time:0.3f}\t{num_refs}\t{fees}\t"
f"{len(bytes(block.transactions_generator))}\t"
f"{num_removals}\t" + "\t".join([f"{cond}" for cond in conditions])
)
|
the-stack_106_20326
|
#!/usr/bin/env python3
"""
logplot.py
Usage:
logplot.py [options] [-e=<regex>...] [<filename>...]
logplot.py -h | --help
logplot.py --version
Options:
-h --help Show this screen.
--version Show version.
-e=<regex> Pattern to match in file.
-d, --dateformat=<dateformat> Date format in strptime format.
-i, --interval=<interval>[m|h|d] Sample interval in seconds with optional
suffix to denote minutes, hours, or days.
-o, --output=<filename> Output filename [default: logplot.html].
-t, --title=<title> Title for plot [default: Events over Time].
"""
from collections import Counter, defaultdict, deque
from fileinput import FileInput
import re
import sys
from docopt import docopt
import timeplots
def get_timestamp(logtime, text):
try:
timestamp = logtime.strptime(text)
except ValueError as e:
print(repr(e), file=sys.stderr)
else:
print(">>>", timestamp, end="\r", flush=True)
return timestamp
def match_all(logtime, lines):
for line in lines:
yield get_timestamp(logtime, line)
def match_regex(logtime, lines, expressions):
expressions = [(exp, re.compile(exp)) for exp in expressions]
for line in lines:
for expression, regex in expressions:
if regex.search(line):
yield expression, get_timestamp(logtime, line)
def get_interval(interval):
if not interval:
return "events", {}
interval = f"{interval}s" if interval.isnumeric() else interval
value, units = interval[:-1], interval[-1]
units = {"s": "seconds", "m": "minutes", "h": "hours", "d": "days"}.get(units)
if not units or not value.isnumeric():
raise ValueError("Invalid interval specified.")
units, interval = f"events every {value} {units}", {units: int(value)}
if value == "1":
units = units[:-1]
return units, interval
def main():
args = docopt(__doc__)
expressions = args.get("-e")
title = args.get("--title")
output_filename = args.get("--output")
units, interval = get_interval(args.get("--interval"))
lines = (line for line in FileInput(args.get("<filename>")))
logtime = timeplots.TimeParser(date_format=args.get("<dateformat>"), **interval)
plotter = timeplots.Plotter(width=1400)
plotter.new_plot(title=title, units=units)
if expressions:
# Prefer creating buckets over defaultdict:
# - Predefined buckets assign colors based on order of command line args.
# - With defaultdict, colors are assigned based on order in logs.
buckets = {exp: deque() for exp in expressions}
for expression, timestamp in match_regex(logtime, lines, expressions):
buckets[expression].append(timestamp)
for expression, times in buckets.items():
c = Counter(times)
times, data = zip(*sorted(c.items()))
times, data = zip(*timeplots.missing_time_data(times, data))
plotter.add_line(expression, times, data)
else:
c = Counter(match_all(logtime, lines))
times, data = zip(*sorted(c.items()))
times, data = zip(*timeplots.missing_time_data(times, data))
plotter.add_line("values", times, data)
print(f"Saving to file: '{output_filename}'")
plotter.render(filename=output_filename, title=title)
if __name__ == "__main__":
sys.exit(main())
|
the-stack_106_20327
|
from collections import OrderedDict
import copy
import getpass
import itertools
import numpy as np
from scipy import signal
import time
LOCAL_MODE = getpass.getuser() == 'tom'
CONFIG = {
'halite_config_setting_divisor': 1.0,
'collect_smoothed_multiplier': 0.0,
'collect_actual_multiplier': 5.0,
'collect_less_halite_ships_multiplier_base': 0.55,
'collect_base_nearest_distance_exponent': 0.2,
'return_base_multiplier': 8.0,
'return_base_less_halite_ships_multiplier_base': 0.85,
'early_game_return_base_additional_multiplier': 0.1,
'early_game_return_boost_step': 50,
'establish_base_smoothed_multiplier': 0.0,
'establish_first_base_smoothed_multiplier_correction': 2.0,
'establish_base_dm_exponent': 1.1,
'first_base_no_4_way_camping_spot_bonus': 300*0,
'start_camp_if_not_winning': 0,
'max_camper_ship_budget': 2*1,
'relative_step_start_camping': 0.15,
'establish_base_deposit_multiplier': 1.0,
'establish_base_less_halite_ships_multiplier_base': 1.0,
'max_attackers_per_base': 3*1,
'attack_base_multiplier': 300.0,
'attack_base_less_halite_ships_multiplier_base': 0.9,
'attack_base_halite_sum_multiplier': 2.0,
'attack_base_run_opponent_multiplier': 1.0,
'attack_base_catch_opponent_multiplier': 1.0,
'collect_run_opponent_multiplier': 10.0,
'return_base_run_opponent_multiplier': 2.5,
'establish_base_run_opponent_multiplier': 2.5,
'collect_catch_opponent_multiplier': 1.0,
'return_base_catch_opponent_multiplier': 1.0,
'establish_base_catch_opponent_multiplier': 0.5,
'two_step_avoid_boxed_opponent_multiplier_base': 0.7,
'n_step_avoid_boxed_opponent_multiplier_base': 0.45,
'min_consecutive_chase_extrapolate': 6,
'chase_return_base_exponential_bonus': 2.0,
'ignore_catch_prob': 0.3,
'max_initial_ships': 60,
'max_final_ships': 60,
'max_standard_ships_decided_end_pack_hunting': 2,
'nearby_ship_halite_spawn_constant': 3.0,
'nearby_halite_spawn_constant': 5.0,
'remaining_budget_spawn_constant': 0.2,
'spawn_score_threshold': 75.0,
'boxed_in_halite_convert_divisor': 1.0,
'n_step_avoid_min_die_prob_cutoff': 0.05,
'n_step_avoid_window_size': 7,
'influence_map_base_weight': 2.0,
'influence_map_min_ship_weight': 0.0,
'influence_weights_additional_multiplier': 2.0,
'influence_weights_exponent': 8.0,
'escape_influence_prob_divisor': 3.0,
'rescue_ships_in_trouble': 1,
'target_strategic_base_distance': 8.0,
'target_strategic_num_bases_ship_divisor': 9,
'target_strategic_triangle_weight': 20.0, # initially: 20
'target_strategic_independent_base_distance_multiplier': 8.0, # initially 8.0
'target_strategic_influence_desirability_multiplier': 1.0, # initially: 1.0
'target_strategic_potential_divisor': 15.0, # initially: 15.0
'max_spawn_relative_step_divisor': 12.0,
'no_spawn_near_base_ship_limit': 100,
'avoid_cycles': 1,
'max_risk_n_step_risky': 0.5,
'max_steps_n_step_risky': 70,
'log_near_base_distance': 2,
'max_recent_considered_relevant_zero_move_count': 120,
'near_base_2_step_risky_min_count': 50,
'relative_stand_still_collect_boost': 1.5,
'initial_collect_boost_away_from_base': 2.0,
'start_hunting_season_relative_step': 0.1875,
'end_hunting_season_relative_step': 0.75,
'early_hunting_season_less_collect_relative_step': 0.375,
'max_standard_ships_early_hunting_season': 2,
'late_hunting_season_more_collect_relative_step': 0.5,
'late_hunting_season_collect_max_n_step_risk': 0.2,
'after_hunting_season_collect_max_n_step_risk': 0.5,
'late_hunting_season_standard_min_fraction': 0.7,
'max_standard_ships_late_hunting_season': 15,
'collect_on_safe_return_relative_step': 0.075,
'min_halite_to_stop_early_hunt': 15000.0,
'early_best_opponent_relative_step': 0.5,
'surrounding_ships_cycle_extrapolate_step_count': 5,
'surrounding_ships_extended_cycle_extrapolate_step_count': 7,
}
NORTH = "NORTH"
SOUTH = "SOUTH"
EAST = "EAST"
WEST = "WEST"
CONVERT = "CONVERT"
SPAWN = "SPAWN"
NOT_NONE_DIRECTIONS = [NORTH, SOUTH, EAST, WEST]
MOVE_DIRECTIONS = [None, NORTH, SOUTH, EAST, WEST]
MOVE_DIRECTIONS_TO_ID = {None: 0, NORTH: 1, SOUTH: 2, EAST: 3, WEST: 4}
RELATIVE_DIR_MAPPING = {None: (0, 0), NORTH: (-1, 0), SOUTH: (1, 0),
EAST: (0, 1), WEST: (0, -1)}
RELATIVE_DIR_TO_DIRECTION_MAPPING = {
v: k for k, v in RELATIVE_DIR_MAPPING.items()}
OPPOSITE_MAPPING = {None: None, NORTH: SOUTH, SOUTH: NORTH, EAST: WEST,
WEST: EAST}
RELATIVE_DIRECTIONS = [(-1, 0), (1, 0), (0, -1), (0, 1), (0, 0)]
RELATIVE_NOT_NONE_DIRECTIONS = [(-1, 0), (1, 0), (0, -1), (0, 1)]
MOVE_GATHER_OPTIONS = [(-1, 0, False), (1, 0, False), (0, -1, False),
(0, 1, False), (0, 0, True)]
TWO_STEP_THREAT_DIRECTIONS = {
(-2, 0): [(-1, 0)],
(-1, -1): [(-1, 0), (0, -1)],
(-1, 0): [(-1, 0), (0, 0)],
(-1, 1): [(-1, 0), (0, 1)],
(0, -2): [(0, -1)],
(0, -1): [(0, -1), (0, 0)],
(0, 1): [(0, 1), (0, 0)],
(0, 2): [(0, 1)],
(1, -1): [(1, 0), (0, -1)],
(1, 0): [(1, 0), (0, 0)],
(1, 1): [(1, 0),(0, 1)],
(2, 0): [(1, 0)],
}
GAUSSIAN_2D_KERNELS = {}
for dim in range(3, 20, 2):
# Modified from https://scipy-lectures.org/intro/scipy/auto_examples/solutions/plot_image_blur.html
center_distance = np.floor(np.abs(np.arange(dim) - (dim-1)/2))
horiz_distance = np.tile(center_distance, [dim, 1])
vert_distance = np.tile(np.expand_dims(center_distance, 1), [1, dim])
manh_distance = horiz_distance + vert_distance
kernel = np.exp(-manh_distance/(dim/4))
kernel[manh_distance > dim/2] = 0
GAUSSIAN_2D_KERNELS[dim] = kernel
DISTANCES = {}
DISTANCE_MASKS = {}
HALF_PLANES_CATCH = {}
HALF_PLANES_RUN = {}
ROW_COL_DISTANCE_MASKS = {}
ROW_COL_MAX_DISTANCE_MASKS = {}
ROW_COL_BOX_MAX_DISTANCE_MASKS = {}
ROW_COL_BOX_DIR_MAX_DISTANCE_MASKS = {}
BOX_DIR_MAX_DISTANCE = 4
BOX_DIRECTION_MASKS = {}
ROW_MASK = {}
COLUMN_MASK = {}
DISTANCE_MASK_DIM = 21
half_distance_mask_dim = int(DISTANCE_MASK_DIM/2)
for row in range(DISTANCE_MASK_DIM):
row_mask = np.zeros((DISTANCE_MASK_DIM, DISTANCE_MASK_DIM), dtype=np.bool)
row_mask[row] = 1
col_mask = np.zeros((DISTANCE_MASK_DIM, DISTANCE_MASK_DIM), dtype=np.bool)
col_mask[:, row] = 1
ROW_MASK [row] = row_mask
COLUMN_MASK[row] = col_mask
for col in range(DISTANCE_MASK_DIM):
horiz_distance = np.minimum(
np.abs(np.arange(DISTANCE_MASK_DIM) - col),
np.abs(np.arange(DISTANCE_MASK_DIM) - col - DISTANCE_MASK_DIM))
horiz_distance = np.minimum(
horiz_distance,
np.abs(np.arange(DISTANCE_MASK_DIM) - col + DISTANCE_MASK_DIM))
vert_distance = np.minimum(
np.abs(np.arange(DISTANCE_MASK_DIM) - row),
np.abs(np.arange(DISTANCE_MASK_DIM) - row - DISTANCE_MASK_DIM))
vert_distance = np.minimum(
vert_distance,
np.abs(np.arange(DISTANCE_MASK_DIM) - row + DISTANCE_MASK_DIM))
horiz_distance = np.tile(horiz_distance, [DISTANCE_MASK_DIM, 1])
vert_distance = np.tile(np.expand_dims(vert_distance, 1),
[1, DISTANCE_MASK_DIM])
manh_distance = horiz_distance + vert_distance
kernel = np.exp(-manh_distance/(DISTANCE_MASK_DIM/4))
DISTANCE_MASKS[(row, col)] = kernel
DISTANCES[(row, col)] = manh_distance
catch_distance_masks = {}
run_distance_masks = {}
for d in MOVE_DIRECTIONS:
if d is None:
catch_rows = np.array([]).astype(np.int)
catch_cols = np.array([]).astype(np.int)
if d == NORTH:
catch_rows = np.mod(row - np.arange(half_distance_mask_dim) - 1,
DISTANCE_MASK_DIM)
catch_cols = np.arange(DISTANCE_MASK_DIM)
box_dir_rows = np.mod(row + np.arange(BOX_DIR_MAX_DISTANCE) + 1,
DISTANCE_MASK_DIM)
box_dir_cols = np.mod(col + np.arange(
2*(BOX_DIR_MAX_DISTANCE+1)-1) - BOX_DIR_MAX_DISTANCE,
DISTANCE_MASK_DIM)
if d == SOUTH:
catch_rows = np.mod(row + np.arange(half_distance_mask_dim) + 1,
DISTANCE_MASK_DIM)
catch_cols = np.arange(DISTANCE_MASK_DIM)
box_dir_rows = np.mod(row - np.arange(BOX_DIR_MAX_DISTANCE) - 1,
DISTANCE_MASK_DIM)
box_dir_cols = np.mod(col + np.arange(
2*(BOX_DIR_MAX_DISTANCE+1)-1) - BOX_DIR_MAX_DISTANCE,
DISTANCE_MASK_DIM)
if d == WEST:
catch_cols = np.mod(col - np.arange(half_distance_mask_dim) - 1,
DISTANCE_MASK_DIM)
catch_rows = np.arange(DISTANCE_MASK_DIM)
box_dir_cols = np.mod(col + np.arange(BOX_DIR_MAX_DISTANCE) + 1,
DISTANCE_MASK_DIM)
box_dir_rows = np.mod(row + np.arange(
2*(BOX_DIR_MAX_DISTANCE+1)-1) - BOX_DIR_MAX_DISTANCE,
DISTANCE_MASK_DIM)
if d == EAST:
catch_cols = np.mod(col + np.arange(half_distance_mask_dim) + 1,
DISTANCE_MASK_DIM)
catch_rows = np.arange(DISTANCE_MASK_DIM)
box_dir_cols = np.mod(col - np.arange(BOX_DIR_MAX_DISTANCE) - 1,
DISTANCE_MASK_DIM)
box_dir_rows = np.mod(row + np.arange(
2*(BOX_DIR_MAX_DISTANCE+1)-1) - BOX_DIR_MAX_DISTANCE,
DISTANCE_MASK_DIM)
catch_mask = np.zeros((DISTANCE_MASK_DIM, DISTANCE_MASK_DIM),
dtype=np.bool)
catch_mask[catch_rows[:, None], catch_cols] = 1
run_mask = np.copy(catch_mask)
run_mask[row, col] = 1
catch_distance_masks[d] = catch_mask
run_distance_masks[d] = run_mask
if d is not None:
box_dir_mask = np.zeros((DISTANCE_MASK_DIM, DISTANCE_MASK_DIM),
dtype=np.bool)
box_dir_mask[box_dir_rows[:, None], box_dir_cols] = 1
if d in [NORTH, SOUTH]:
box_dir_mask &= (horiz_distance <= vert_distance)
else:
box_dir_mask &= (horiz_distance >= vert_distance)
ROW_COL_BOX_DIR_MAX_DISTANCE_MASKS[(row, col, d)] = box_dir_mask
HALF_PLANES_CATCH[(row, col)] = catch_distance_masks
HALF_PLANES_RUN[(row, col)] = run_distance_masks
for d in range(1, DISTANCE_MASK_DIM):
ROW_COL_DISTANCE_MASKS[(row, col, d)] = manh_distance == d
for d in range(half_distance_mask_dim):
ROW_COL_MAX_DISTANCE_MASKS[(row, col, d)] = manh_distance <= d
ROW_COL_BOX_MAX_DISTANCE_MASKS[(row, col, d)] = np.logical_and(
horiz_distance <= d, vert_distance <= d)
for dist in range(2, half_distance_mask_dim+1):
dist_mask_dim = dist*2+1
row_pos = np.tile(np.expand_dims(np.arange(dist_mask_dim), 1),
[1, dist_mask_dim])
col_pos = np.tile(np.arange(dist_mask_dim), [dist_mask_dim, 1])
for direction in NOT_NONE_DIRECTIONS:
if direction == NORTH:
box_mask = (row_pos < dist) & (
np.abs(col_pos-dist) <= (dist-row_pos))
if direction == SOUTH:
box_mask = (row_pos > dist) & (
np.abs(col_pos-dist) <= (row_pos-dist))
if direction == WEST:
box_mask = (col_pos < dist) & (
np.abs(row_pos-dist) <= (dist-col_pos))
if direction == EAST:
box_mask = (col_pos > dist) & (
np.abs(row_pos-dist) <= (col_pos-dist))
BOX_DIRECTION_MASKS[(dist, direction)] = box_mask
CONSIDERED_OTHER_DISTANCES = [13]
OTHER_DISTANCES = {}
for other_distance in CONSIDERED_OTHER_DISTANCES:
for row in range(other_distance):
for col in range(other_distance):
horiz_distance = np.minimum(
np.abs(np.arange(other_distance) - col),
np.abs(np.arange(other_distance) - col - other_distance))
horiz_distance = np.minimum(
horiz_distance,
np.abs(np.arange(other_distance) - col + other_distance))
vert_distance = np.minimum(
np.abs(np.arange(other_distance) - row),
np.abs(np.arange(other_distance) - row - other_distance))
vert_distance = np.minimum(
vert_distance,
np.abs(np.arange(other_distance) - row + other_distance))
horiz_distance = np.tile(horiz_distance, [other_distance, 1])
vert_distance = np.tile(np.expand_dims(vert_distance, 1),
[1, other_distance])
manh_distance = horiz_distance + vert_distance
OTHER_DISTANCES[(row, col, other_distance)] = manh_distance
D2_ROW_COL_SHIFTS_DISTANCES = [
(-2, 0, 2),
(-1, -1, 2), (-1, 0, 1), (-1, 1, 2),
(0, -2, 2), (0, -1, 1), (0, 1, 1), (0, 2, 2),
(1, -1, 2), (1, 0, 1), (1, 1, 2),
(2, 0, 2),
]
def row_col_from_square_grid_pos(pos, size):
col = pos % size
row = pos // size
return row, col
def move_ship_row_col(row, col, direction, size):
if direction == "NORTH":
return (size-1 if row == 0 else row-1, col)
elif direction == "SOUTH":
return (row+1 if row < (size-1) else 0, col)
elif direction == "EAST":
return (row, col+1 if col < (size-1) else 0)
elif direction == "WEST":
return (row, size-1 if col == 0 else col-1)
elif direction is None:
return (row, col)
def get_directional_distance(r1, c1, r2, c2, size, d):
relative_pos = get_relative_position(r1, c1, r2, c2, size)
if d == NORTH:
directional_distance = -relative_pos[0]
elif d == SOUTH:
directional_distance = relative_pos[0]
elif d == EAST:
directional_distance = relative_pos[1]
elif d == WEST:
directional_distance = -relative_pos[1]
return directional_distance
def mirror_edges(observation, num_mirror_dim):
if num_mirror_dim > 0:
# observation = np.arange(225).reshape((15,15)) # Debugging test
assert len(observation.shape) == 2
grid_size = observation.shape[0]
new_grid_size = grid_size + 2*num_mirror_dim
mirrored_obs = np.full((new_grid_size, new_grid_size), np.nan)
# Fill in the original data
mirrored_obs[num_mirror_dim:(-num_mirror_dim),
num_mirror_dim:(-num_mirror_dim)] = observation
# Add top and bottom mirrored data
mirrored_obs[:num_mirror_dim, num_mirror_dim:(
-num_mirror_dim)] = observation[-num_mirror_dim:, :]
mirrored_obs[-num_mirror_dim:, num_mirror_dim:(
-num_mirror_dim)] = observation[:num_mirror_dim, :]
# Add left and right mirrored data
mirrored_obs[:, :num_mirror_dim] = mirrored_obs[
:, -(2*num_mirror_dim):(-num_mirror_dim)]
mirrored_obs[:, -num_mirror_dim:] = mirrored_obs[
:, num_mirror_dim:(2*num_mirror_dim)]
observation = mirrored_obs
return observation
def smooth2d(grid, smooth_kernel_dim=7, return_kernel=False):
edge_augmented = mirror_edges(grid, smooth_kernel_dim-1)
kernel = GAUSSIAN_2D_KERNELS[int(2*smooth_kernel_dim-1)]
convolved = signal.convolve2d(edge_augmented, kernel, mode="valid")
if return_kernel:
return convolved, kernel
else:
return convolved
def get_relative_position(row, col, other_row, other_col, size):
if row >= other_row:
if (other_row + size - row) < (row - other_row):
row_diff = (other_row + size - row)
else:
row_diff = -(row - other_row)
else:
if (row + size - other_row) < (other_row - row):
row_diff = -(row + size - other_row)
else:
row_diff = other_row - row
if col >= other_col:
if (other_col + size - col) < (col - other_col):
col_diff = (other_col + size - col)
else:
col_diff = -(col - other_col)
else:
if (col + size - other_col) < (other_col - col):
col_diff = -(col + size - other_col)
else:
col_diff = other_col - col
return (row_diff, col_diff)
def update_scores_opponent_ships(
config, collect_grid_scores, return_to_base_scores, establish_base_scores,
attack_base_scores, opponent_ships, opponent_bases, halite_ships, row, col,
grid_size, spawn_cost, drop_None_valid, obs_halite, collect_rate, np_rng,
opponent_ships_sensible_actions, opponent_ships_sensible_actions_no_risk,
ignore_bad_attack_directions, observation, ship_k, my_bases, my_ships,
steps_remaining, history, escape_influence_probs, player_ids, env_obs_ids,
env_observation, main_base_distances, nearest_base_distances,
end_game_base_return, camping_override_strategy,
attack_campers_override_strategy, boxed_in_attack_squares,
safe_to_collect, boxed_in_zero_halite_opponents, ignore_convert_positions,
avoid_attack_squares_zero_halite, n_step_avoid_min_die_prob_cutoff,
safe_to_return_halites, safe_to_return_base_halites,
my_nearest_base_distances):
direction_halite_diff_distance_raw = {
NORTH: [], SOUTH: [], EAST: [], WEST: []}
my_bases_or_ships = np.logical_or(my_bases, my_ships)
chase_details = history['chase_counter'][0].get(ship_k, None)
take_my_square_next_halite_diff = None
take_my_next_square_dir = None
wide_cycle_mask = ROW_COL_MAX_DISTANCE_MASKS[row, col, 3]
tight_cycle_mask = ROW_COL_MAX_DISTANCE_MASKS[row, col, 2]
opponents_in_cycle = np.any(opponent_ships[tight_cycle_mask]) and (
np.all(history['empty_or_cycled_positions'][wide_cycle_mask]) or (
np.all(history['empty_or_extended_cycled_positions'][tight_cycle_mask])))
if opponents_in_cycle:
print("EXTRAPOLATING OPPONENT CYCLIC BEHAVIOR", observation['step'], row,
col)
if len(camping_override_strategy) == 0:
navigation_zero_halite_risk_threshold = 0
else:
navigation_zero_halite_risk_threshold = camping_override_strategy[0]
if camping_override_strategy[1].max() >= 1e4:
collect_grid_scores = 1e-4*collect_grid_scores + (
camping_override_strategy[1])
else:
collect_grid_scores += camping_override_strategy[1]
attack_base_scores += camping_override_strategy[2]
if len(attack_campers_override_strategy) > 0:
ignore_opponent_row = attack_campers_override_strategy[0]
ignore_opponent_col = attack_campers_override_strategy[1]
ignore_opponent_distance = attack_campers_override_strategy[5]
collect_grid_scores[ignore_opponent_row, ignore_opponent_col] += (
attack_campers_override_strategy[2])
navigation_zero_halite_risk_threshold = max(
navigation_zero_halite_risk_threshold,
attack_campers_override_strategy[6])
else:
ignore_opponent_row = None
ignore_opponent_col = None
ignore_opponent_distance = None
# Identify directions where I can certainly reach the base in time and always
# mark them as valid
ship_halite = halite_ships[row, col]
safe_return_base_directions = []
if ship_halite < safe_to_return_halites[row, col]:
for base_safe_return_halite, base_location in safe_to_return_base_halites:
if ship_halite < base_safe_return_halite[row, col]:
for d in get_dir_from_target(
row, col, base_location[0], base_location[1], grid_size):
if not d is None and not d in safe_return_base_directions:
safe_return_base_directions.append(d)
# if observation['step'] == 131 and ship_k in ['63-1']:
# import pdb; pdb.set_trace()
can_stay_still_zero_halite = True
for row_shift, col_shift, distance in D2_ROW_COL_SHIFTS_DISTANCES:
considered_row = (row + row_shift) % grid_size
considered_col = (col + col_shift) % grid_size
if opponent_ships[considered_row, considered_col] and (
ignore_opponent_row is None or (((
considered_row != ignore_opponent_row) or (
considered_col != ignore_opponent_col)) and (
ignore_opponent_distance > 2))):
relevant_dirs = []
halite_diff = halite_ships[row, col] - halite_ships[
considered_row, considered_col]
assume_take_my_square_next = False
# if observation['step'] == 266 and row == 11 and col == 15:
# import pdb; pdb.set_trace()
# Extrapolate the opponent behavior if we have been chased for a
# while and chasing is likely to continue
if distance == 1 and chase_details is not None and (
chase_details[1] >= config[
'min_consecutive_chase_extrapolate']) and (
considered_row, considered_col) == (
chase_details[4], chase_details[5]):
chaser_row = chase_details[4]
chaser_col = chase_details[5]
to_opponent_dir = get_dir_from_target(
row, col, chaser_row, chaser_col, grid_size)[0]
opp_to_me_dir = OPPOSITE_MAPPING[to_opponent_dir]
rel_opp_to_me_dir = RELATIVE_DIR_MAPPING[opp_to_me_dir]
opp_can_move_to_me = rel_opp_to_me_dir in (
opponent_ships_sensible_actions_no_risk[chaser_row, chaser_col])
# There is a unique opponent id with the least amount of halite
# on the chaser square or the chaser has at least one friendly
# ship that can replace it
chaser_can_replace = None
chaser_is_chased_by_not_me = None
if opp_can_move_to_me:
chaser_id = player_ids[chaser_row, chaser_col]
near_chaser = ROW_COL_MAX_DISTANCE_MASKS[
chaser_row, chaser_col, 1]
near_halite = halite_ships[near_chaser]
near_chaser_friendly_halite = near_halite[
(near_halite >= 0) & (player_ids[near_chaser] == chaser_id)]
min_non_chaser_halite = near_halite[
(near_halite >= 0) & (
player_ids[near_chaser] != chaser_id)].min()
min_near_chaser_halite = near_halite[near_halite >= 0].min()
opponent_min_hal_ids = player_ids[np.logical_and(
near_chaser, halite_ships == min_near_chaser_halite)]
near_me = ROW_COL_MAX_DISTANCE_MASKS[row, col, 1]
near_me_threat_players = player_ids[np.logical_and(
near_me, (halite_ships >= 0) & (
halite_ships < halite_ships[row, col]))]
double_opp_chase = (near_me_threat_players.size > 1) and (
np.all(near_me_threat_players == chaser_id))
chaser_can_replace = ((opponent_min_hal_ids.size > 1) and (
np.all(opponent_min_hal_ids == chaser_id) or (
(opponent_min_hal_ids == chaser_id).sum() > 1)) or (
(near_chaser_friendly_halite <= (
min_non_chaser_halite)).sum() > 1)) or double_opp_chase
if opp_can_move_to_me and not chaser_can_replace:
chaser_players_index = env_obs_ids[chaser_id]
chaser_k = [k for k, v in env_observation.players[
chaser_players_index][2].items() if v[0] == (
chaser_row*grid_size + chaser_col)][0]
chaser_is_chased = chaser_k in history[
'chase_counter'][chaser_id]
chaser_is_chased_by_not_me = chaser_is_chased
if chaser_is_chased:
chaser_chaser = history['chase_counter'][chaser_id][chaser_k]
chaser_is_chased_by_not_me = (chaser_chaser[4] is None) or (
player_ids[chaser_chaser[4], chaser_chaser[5]] != 0)
if opp_can_move_to_me and not chaser_can_replace and not (
chaser_is_chased_by_not_me):
assume_take_my_square_next = True
take_my_square_next_halite_diff = halite_diff
take_my_next_square_dir = to_opponent_dir
# if observation['step'] == 96 and ship_k in ['80-1']:
# import pdb; pdb.set_trace()
can_ignore_ship = False
if (considered_row, considered_col) in boxed_in_zero_halite_opponents:
can_stay_still_zero_halite = can_stay_still_zero_halite and (
distance == 2)
else:
if halite_ships[row, col] == halite_ships[
considered_row, considered_col]:
opponent_id = player_ids[considered_row, considered_col]
# Note: use the opponent distance because the opponent model is
# learned using the opponent distance to the nearest base (with near
# base distance cutoff typically at 2)
is_near_base = nearest_base_distances[
considered_row, considered_col] <= config['log_near_base_distance']
risk_lookup_k = str(is_near_base) + '_' + str(distance)
if distance == 2:
can_ignore_ship = history['zero_halite_move_behavior'][
opponent_id][risk_lookup_k] <= (
navigation_zero_halite_risk_threshold)
else:
risk_lookup_k_dist_zero = str(is_near_base) + '_' + str(0)
d1_threat = history['zero_halite_move_behavior'][
opponent_id][risk_lookup_k] > (
navigation_zero_halite_risk_threshold)
d0_threat = history['zero_halite_move_behavior'][
opponent_id][risk_lookup_k_dist_zero] > (
navigation_zero_halite_risk_threshold)
can_stay_still_zero_halite = can_stay_still_zero_halite and (
not d0_threat)
# if is_near_base and history['zero_halite_move_behavior'][
# opponent_id][str(is_near_base) + '_' + str(0) + '_ever_risky']:
# import pdb; pdb.set_trace()
can_ignore_ship = not (d0_threat or d1_threat)
if not assume_take_my_square_next and not can_ignore_ship:
relevant_dirs += [] if row_shift >= 0 else [NORTH]
relevant_dirs += [] if row_shift <= 0 else [SOUTH]
relevant_dirs += [] if col_shift <= 0 else [EAST]
relevant_dirs += [] if col_shift >= 0 else [WEST]
# When the opponents are in a cycle: only consider the direction I
# expect my opponent to be at in the next step (if any)
if opponents_in_cycle:
relevant_dirs = []
opponent_ship_key = history['opponent_ship_pos_to_key'][(
considered_row, considered_col)]
opponent_id = player_ids[considered_row, considered_col]
likely_opponent_action = history['opponent_cycle_counters'][
opponent_id-1][opponent_ship_key][1][0]
likely_opponent_next_pos = move_ship_row_col(
considered_row, considered_col, likely_opponent_action, grid_size)
relative_other_pos = get_relative_position(
row, col, likely_opponent_next_pos[0], likely_opponent_next_pos[1],
grid_size)
current_opp_relative_dir = get_relative_position(
row, col, considered_row, considered_col, grid_size)
if np.abs(relative_other_pos[0]) + np.abs(
relative_other_pos[1]) <= 1:
# At distance 1 or 0
# import pdb; pdb.set_trace()
if relative_other_pos[0] == 0 and relative_other_pos[1] == 0:
relevant_dirs = [RELATIVE_DIR_TO_DIRECTION_MAPPING[
current_opp_relative_dir]]
elif relative_other_pos == (0, 0):
relevant_dirs = [RELATIVE_DIR_TO_DIRECTION_MAPPING[
relative_other_pos]]
# if observation['step'] == 215 and ship_k == '2-2':
# import pdb; pdb.set_trace()
for d in relevant_dirs:
direction_halite_diff_distance_raw[d].append(
(halite_diff, distance))
direction_halite_diff_distance = {}
for d in direction_halite_diff_distance_raw:
vals = np.array(direction_halite_diff_distance_raw[d])
if vals.size:
diffs = vals[:, 0]
distances = vals[:, 1]
max_diff = diffs.max()
if max_diff > 0:
if can_stay_still_zero_halite:
greater_min_distance = distances[diffs > 0].min()
else:
# My halite is > 0 and I have a threat at D1 of an aggressive equal
# halite ships and a threat of a less halite ship at D2
greater_min_distance = distances[diffs >= 0].min()
direction_halite_diff_distance[d] = (max_diff, greater_min_distance)
elif max_diff == 0:
equal_min_distance = distances[diffs == 0].min()
direction_halite_diff_distance[d] = (max_diff, equal_min_distance)
else:
min_diff = diffs.min()
min_diff_min_distance = distances[diffs == min_diff].min()
direction_halite_diff_distance[d] = (min_diff, min_diff_min_distance)
else:
direction_halite_diff_distance[d] = None
preferred_directions = []
strongly_preferred_directions = []
valid_directions = copy.copy(MOVE_DIRECTIONS)
one_step_valid_directions = copy.copy(MOVE_DIRECTIONS)
bad_directions = []
ignore_catch = np_rng.uniform() < config['ignore_catch_prob']
# if observation['step'] == 221 and ship_k == '54-1':
# import pdb; pdb.set_trace()
# x=1
for direction, halite_diff_dist in direction_halite_diff_distance.items():
if halite_diff_dist is not None:
move_row, move_col = move_ship_row_col(row, col, direction, grid_size)
no_escape_bonus = 0 if not (
boxed_in_attack_squares[move_row, move_col]) else 5e3
halite_diff = halite_diff_dist[0]
if halite_diff >= 0:
# I should avoid a collision
distance_multiplier = 1/halite_diff_dist[1]
mask_collect_return = np.copy(HALF_PLANES_RUN[(row, col)][direction])
valid_directions.remove(direction)
one_step_valid_directions.remove(direction)
bad_directions.append(direction)
if halite_diff_dist[1] == 1:
if halite_diff > 0 or not can_stay_still_zero_halite:
# Only suppress the stay still action if the opponent has something
# to gain.
# Exception: the opponent may aggressively attack my zero halite
# ships
if None in valid_directions:
valid_directions.remove(None)
one_step_valid_directions.remove(None)
bad_directions.append(None)
else:
mask_collect_return[row, col] = False
# I can safely mine halite at the current square if the opponent ship
# is >1 move away
if halite_diff_dist[1] > 1:
mask_collect_return[row, col] = False
collect_grid_scores -= mask_collect_return*(ship_halite+spawn_cost)*(
config['collect_run_opponent_multiplier'])*distance_multiplier
return_to_base_scores -= mask_collect_return*(ship_halite+spawn_cost)*(
config['return_base_run_opponent_multiplier'])
base_nearby_in_direction_mask = np.logical_and(
ROW_COL_MAX_DISTANCE_MASKS[(row, col, 2)], mask_collect_return)
base_nearby_in_direction = np.logical_and(
base_nearby_in_direction_mask, opponent_bases).sum() > 0
if not ignore_bad_attack_directions and not base_nearby_in_direction:
attack_base_scores -= mask_collect_return*(ship_halite+spawn_cost)*(
config['attack_base_run_opponent_multiplier'])
mask_establish = np.copy(mask_collect_return)
mask_establish[row, col] = False
establish_base_scores -= mask_establish*(ship_halite+spawn_cost)*(
config['establish_base_run_opponent_multiplier'])
elif halite_diff < 0 and (
not ignore_catch or no_escape_bonus > 0) and (not (
move_row, move_col) in ignore_convert_positions):
# I would like a collision unless if there is another opponent ship
# chasing me - risk avoiding policy for now: if there is at least
# one ship in a direction that has less halite, I should avoid it
if no_escape_bonus > 0:
halite_diff = max(-spawn_cost/2, halite_diff) - no_escape_bonus
else:
halite_diff = 0 # Dubious choice, likely not very important
# halite_diff = max(-spawn_cost/2, halite_diff) - no_escape_bonus
distance_multiplier = 1/halite_diff_dist[1]
mask_collect_return = np.copy(HALF_PLANES_CATCH[(row, col)][direction])
collect_grid_scores -= mask_collect_return*halite_diff*(
config['collect_catch_opponent_multiplier'])*distance_multiplier
return_to_base_scores -= mask_collect_return*halite_diff*(
config['return_base_catch_opponent_multiplier'])*distance_multiplier
attack_base_scores -= mask_collect_return*halite_diff*(
config['attack_base_catch_opponent_multiplier'])*distance_multiplier
mask_establish = np.copy(mask_collect_return)
mask_establish[row, col] = False
establish_base_scores -= mask_establish*halite_diff*(
config['establish_base_catch_opponent_multiplier'])*(
distance_multiplier)
if no_escape_bonus > 0:
strongly_preferred_directions.append(direction)
if boxed_in_attack_squares[row, col] and no_escape_bonus > 0 and (
ship_halite > 0 or obs_halite[row, col] == 0):
# Also incentivize the None action when it is a possible escape
# square of an opponent - divide by 2 to make the None action less
# dominant (likely check in several directions)
collect_grid_scores[row, col] += no_escape_bonus/2
if not None in strongly_preferred_directions:
strongly_preferred_directions.append(None)
preferred_directions.append(direction)
if take_my_square_next_halite_diff is not None and None in valid_directions:
valid_directions.remove(None)
one_step_valid_directions.remove(None)
bad_directions.append(None)
if drop_None_valid and None in valid_directions:
valid_directions.remove(None)
one_step_valid_directions.remove(None)
valid_non_base_directions = []
base_directions = []
for d in valid_directions:
move_row, move_col = move_ship_row_col(row, col, d, grid_size)
if not opponent_bases[move_row, move_col] :
valid_non_base_directions.append(d)
else:
base_directions.append(d)
# For the remaining valid non base directions: compute a score that resembles
# the probability of being boxed in during the next step
two_step_bad_directions = []
n_step_bad_directions = []
n_step_bad_directions_die_probs = {}
if steps_remaining > 1:
for d in valid_non_base_directions:
my_next_halite = halite_ships[row, col] if d != None else (
halite_ships[row, col] + int(collect_rate*obs_halite[row, col]))
move_row, move_col = move_ship_row_col(row, col, d, grid_size)
my_next_halite = 0 if my_bases[move_row, move_col] else my_next_halite
opponent_mask = ROW_COL_MAX_DISTANCE_MASKS[(move_row, move_col, 3)]
less_halite_threat_opponents = np.where(np.logical_and(
opponent_mask, np.logical_and(
opponent_ships, my_next_halite > halite_ships)))
num_threat_ships = less_halite_threat_opponents[0].size
if num_threat_ships > 1 and not d in safe_return_base_directions:
all_dir_threat_counter = {
(-1, 0): 0, (1, 0): 0, (0, -1): 0, (0, 1): 0, (0, 0): 0}
for i in range(num_threat_ships):
other_row = less_halite_threat_opponents[0][i]
other_col = less_halite_threat_opponents[1][i]
relative_other_pos = get_relative_position(
move_row, move_col, other_row, other_col, grid_size)
for diff_rel_row, diff_rel_col, other_gather in MOVE_GATHER_OPTIONS:
# Only consider sensible opponent actions
if (diff_rel_row, diff_rel_col) in opponent_ships_sensible_actions[
other_row, other_col]:
is_threat = (not other_gather) or (my_next_halite > (
halite_ships[other_row, other_col] + int(
collect_rate*obs_halite[other_row, other_col])))
if is_threat:
other_rel_row = relative_other_pos[0] + diff_rel_row
other_rel_col = relative_other_pos[1] + diff_rel_col
move_diff = np.abs(other_rel_row) + np.abs(other_rel_col)
if move_diff < 3 and move_diff > 0:
threat_dirs = TWO_STEP_THREAT_DIRECTIONS[
(other_rel_row, other_rel_col)]
for threat_row_diff, threat_col_diff in threat_dirs:
all_dir_threat_counter[
(threat_row_diff, threat_col_diff)] += 1
# if observation['step'] == 112 and ship_k == '76-1':
# import pdb; pdb.set_trace()
# Aggregate the threat count in all_dir_threat_counter
threat_counts = np.array(list(all_dir_threat_counter.values()))
threat_score = np.sqrt(threat_counts.prod())
if threat_score > 0:
# Disincentivize an action that can get me boxed in on the next step
mask_avoid_two_steps = np.copy(HALF_PLANES_RUN[(row, col)][d])
if d is not None:
mask_avoid_two_steps[row, col] = False
collect_grid_scores[mask_avoid_two_steps] *= ((
config['two_step_avoid_boxed_opponent_multiplier_base']) ** (
threat_score))
return_to_base_scores[mask_avoid_two_steps] *= ((
config['two_step_avoid_boxed_opponent_multiplier_base']) ** (
threat_score))
establish_base_scores[mask_avoid_two_steps] *= ((
config['two_step_avoid_boxed_opponent_multiplier_base']) ** (
threat_score))
two_step_bad_directions.append(d)
if d not in two_step_bad_directions and not end_game_base_return and (
my_next_halite > 0) and (not d in safe_return_base_directions) and (
d is not None or not safe_to_collect[row, col]):
# For the remaining valid directions: compute a score that resembles
# the probability of being boxed in sometime in the future
opponent_mask_lt = ROW_COL_MAX_DISTANCE_MASKS[
(move_row, move_col, min(
steps_remaining, config['n_step_avoid_window_size']))]
less_halite_threat_opponents_lt = np.where(np.logical_and(
opponent_mask_lt, np.logical_and(
opponent_ships, my_next_halite > halite_ships)))
num_threat_ships_lt = less_halite_threat_opponents_lt[0].size
# Ignore the box in threat if I have a base and at least one zero
# halite ship one step from the move square
ignore_threat = my_bases[
ROW_COL_DISTANCE_MASKS[(move_row, move_col, 1)]].sum() > 0 and ((
halite_ships[np.logical_and(
my_ships,
ROW_COL_DISTANCE_MASKS[move_row, move_col, 1])] == 0).sum() > 0)
# if observation['step'] == 359 and ship_k == '67-1':
# import pdb; pdb.set_trace()
if not ignore_threat:
lt_catch_prob = {k: [] for k in RELATIVE_NOT_NONE_DIRECTIONS}
for i in range(num_threat_ships_lt):
other_row = less_halite_threat_opponents_lt[0][i]
other_col = less_halite_threat_opponents_lt[1][i]
other_sensible_actions = opponent_ships_sensible_actions[
other_row, other_col]
relative_other_pos = get_relative_position(
move_row, move_col, other_row, other_col, grid_size)
# Give less weight to the other ship if there is a base of mine or
# a/multiple less halite ships in between
# FUTURE WORK: Also give additional move leeway if I have nearby
# bases? Especially relevant for None (collect) actions
distance_move_other = np.abs(relative_other_pos).sum()
mask_between_move_and_threat = np.logical_and(
DISTANCES[(move_row, move_col)] < distance_move_other,
DISTANCES[(other_row, other_col)] < distance_move_other)
less_halite_ship_base_count = np.logical_and(
np.logical_and(my_bases_or_ships, mask_between_move_and_threat),
halite_ships <= halite_ships[other_row, other_col]).sum() + 0*(
my_bases[ROW_COL_MAX_DISTANCE_MASKS[
move_row, move_col, 2]].sum())
my_material_defense_multiplier = 2**less_halite_ship_base_count
for threat_dir in RELATIVE_NOT_NONE_DIRECTIONS:
nz_dim = int(threat_dir[0] == 0)
dir_offset = relative_other_pos[nz_dim]*threat_dir[nz_dim]
other_dir_abs_offset = np.abs(relative_other_pos[1-nz_dim])
# if observation['step'] == 155 and ship_k == '63-2':
# import pdb; pdb.set_trace()
if dir_offset >= 0 and (other_dir_abs_offset-1) <= dir_offset:
# Ignore the threat if the ship is on the diagonal and can not
# move in the direction of the threat dir
if (other_dir_abs_offset-1) == dir_offset and len(
other_sensible_actions) < len(MOVE_DIRECTIONS):
if nz_dim == 0:
threat_other_dir = (
0, 1 if relative_other_pos[1-nz_dim] < 0 else -1)
else:
threat_other_dir = (
1 if relative_other_pos[1-nz_dim] < 0 else -1, 0)
threat_other_dirs = [threat_other_dir, threat_dir]
threats_actionable = np.array([
t in other_sensible_actions for t in threat_other_dirs])
consider_this_threat = np.any(threats_actionable)
if threats_actionable[1] and not threats_actionable[0]:
# Lower the threat weight - the opponent can not directly
# attack the considered threat direction and can only move
# along the threat direction
other_dir_abs_offset += 2
else:
consider_this_threat = True
if other_dir_abs_offset == 0 and dir_offset == 0:
# The scenario where a one step threat is ignored due to
# being chased for a while and moving to the threat is
# currently considered.
# This avoids division by zero but is overridden later anyway
other_dir_abs_offset = 2
if consider_this_threat:
lt_catch_prob[threat_dir].append(max(2,
other_dir_abs_offset+dir_offset)*(
my_material_defense_multiplier))
# Add a "bootstrapped" catch probability using the density of the
# players towards the edge of the threat direction
# Only add it if the next halite is > 0 (otherwise assume I can
# always escape)
# Also factor in the distance to my nearest non abandoned base
if my_next_halite > 0:
current_nearest_base_distance = my_nearest_base_distances[row, col]
moved_nearest_base_distance = my_nearest_base_distances[
move_row, move_col]
move_distance_difference = current_nearest_base_distance - (
moved_nearest_base_distance)
for threat_dir in RELATIVE_NOT_NONE_DIRECTIONS:
dens_threat_rows = np.mod(move_row + threat_dir[0]*(
np.arange(config['n_step_avoid_window_size']//2,
config['n_step_avoid_window_size'])), grid_size)
dens_threat_cols = np.mod(move_col + threat_dir[1]*(
1+np.arange(config['n_step_avoid_window_size']//2,
config['n_step_avoid_window_size'])), grid_size)
escape_probs = escape_influence_probs[
dens_threat_rows, dens_threat_cols]
mean_escape_prob = escape_probs.mean()
if escape_probs[:2].min() < 1:
if move_distance_difference > 0:
# When in trouble, it is typically better to move towards one
# of my bases. The move closer distance is of course 1.
mean_escape_prob *= 1.25
if mean_escape_prob < 1:
lt_catch_prob[threat_dir].append(1/(1-mean_escape_prob+1e-9))
# if observation['step'] == 75 and ship_k == '64-1' and d in [
# EAST, WEST]:
# import pdb; pdb.set_trace()
# if observation['step'] == 112 and ship_k == '76-1':
# import pdb; pdb.set_trace()
if np.all([len(v) > 0 for v in lt_catch_prob.values()]):
# Interpretation: for a threat at distance d, I have a probability
# of surviving it of (d-1)/d. The probability of surviving all
# threat is the product of all individual threats
survive_probs = np.array([
(np.maximum(0.2, (np.array(lt_catch_prob[k])-1)/np.array(
lt_catch_prob[k]))).prod() for k in lt_catch_prob])
min_die_prob = 1-survive_probs.max()
if main_base_distances.max() > 0:
if main_base_distances[move_row, move_col] <= 2:
min_die_prob = 0
else:
min_die_prob = max(
0, min_die_prob-0.33**main_base_distances[
move_row, move_col])
# if observation['step'] == 155 and ship_k in ['63-2', '63-1']:
# import pdb; pdb.set_trace()
# Disincentivize an action that can get me boxed in during the next
# N steps
mask_avoid_n_steps = np.copy(HALF_PLANES_RUN[(row, col)][d])
if d is not None:
mask_avoid_n_steps[row, col] = False
collect_grid_scores[mask_avoid_n_steps] *= ((
config['n_step_avoid_boxed_opponent_multiplier_base']) ** (
min_die_prob))
return_to_base_scores[mask_avoid_n_steps] *= (
config['n_step_avoid_boxed_opponent_multiplier_base']) ** (
min_die_prob)
establish_base_scores[mask_avoid_n_steps] *= (
config['n_step_avoid_boxed_opponent_multiplier_base']) ** (
min_die_prob)
n_step_bad_directions_die_probs[d] = min_die_prob
# Correction to act with more risk towards the end of the game
die_prob_cutoff = (n_step_avoid_min_die_prob_cutoff + 0.01*max(
0, 50-steps_remaining))
if d is None:
if observation['relative_step'] > config[
'end_hunting_season_relative_step']:
die_prob_cutoff = max(die_prob_cutoff, config[
'after_hunting_season_collect_max_n_step_risk'])
elif observation['relative_step'] > config[
'late_hunting_season_more_collect_relative_step']:
die_prob_cutoff = max(die_prob_cutoff, config[
'late_hunting_season_collect_max_n_step_risk'])
# print(observation['step'], die_prob_cutoff)
if min_die_prob > die_prob_cutoff:
n_step_bad_directions.append(d)
# if observation['step'] == 215 and ship_k == '2-2':
# import pdb; pdb.set_trace()
# Corner case: if I have a zero halite ship that is boxed in by other zero
# halite ships on a zero halite square: compute the risk for all available
# actions and only retain the actions with the lowest collision risks
if halite_ships[row, col] == 0 and len(valid_directions) == 0 and (
obs_halite[row, col] == 0):
risk_scores = np.zeros(len(MOVE_DIRECTIONS))
for risk_id, d in enumerate(MOVE_DIRECTIONS):
move_row, move_col = move_ship_row_col(row, col, d, grid_size)
for potential_threat_dir in MOVE_DIRECTIONS:
threat_row, threat_col = move_ship_row_col(
move_row, move_col, potential_threat_dir, grid_size)
if opponent_ships[threat_row, threat_col] and halite_ships[
threat_row, threat_col] == 0:
opponent_id = player_ids[threat_row, threat_col]
is_near_base = nearest_base_distances[
threat_row, threat_col] <= config['log_near_base_distance']
distance = int(d is not None) + int(potential_threat_dir is not None)
risk_lookup_k = str(is_near_base) + '_' + str(distance)
risk_scores[risk_id] = max(
risk_scores[risk_id], history['zero_halite_move_behavior'][
opponent_id][risk_lookup_k])
best_risk_score = risk_scores.min()
if best_risk_score < 0.05:
valid_directions = [d for d_id, d in enumerate(
MOVE_DIRECTIONS) if risk_scores[d_id] == best_risk_score]
else:
valid_directions = [None]
one_step_valid_directions = copy.copy(valid_directions)
bad_directions = list(set(MOVE_DIRECTIONS) - set(valid_directions))
# if observation['step'] == 169 and ship_k == '65-2':
# import pdb; pdb.set_trace()
# Corner case: if I have a zero halite ship that is boxed in by other zero
# halite ships on a non-zero halite square: prefer moving in directions where
# there is a lower risk of losing the ship as a function of opponent zero
# halite behavior
if halite_ships[row, col] == 0 and obs_halite[row, col] > 0 and (
(len(valid_directions) == 1 and (valid_directions[0] is None)) or (
len(valid_directions) == 0)):
risk_scores = np.zeros(len(MOVE_DIRECTIONS))
risk_scores[0] = 1 # Definitely don't stand still
for risk_id, d in enumerate(MOVE_DIRECTIONS):
move_row, move_col = move_ship_row_col(row, col, d, grid_size)
for potential_threat_dir in MOVE_DIRECTIONS:
threat_row, threat_col = move_ship_row_col(
move_row, move_col, potential_threat_dir, grid_size)
if opponent_ships[threat_row, threat_col] and halite_ships[
threat_row, threat_col] == 0:
opponent_id = player_ids[threat_row, threat_col]
is_near_base = nearest_base_distances[
threat_row, threat_col] <= config['log_near_base_distance']
distance = int(d is not None) + int(potential_threat_dir is not None)
risk_lookup_k = str(is_near_base) + '_' + str(distance)
risk_scores[risk_id] = max(
risk_scores[risk_id], history['zero_halite_move_behavior'][
opponent_id][risk_lookup_k])
best_risk_score = risk_scores.min()
valid_directions = [d for d_id, d in enumerate(
MOVE_DIRECTIONS) if risk_scores[d_id] == best_risk_score]
one_step_valid_directions = copy.copy(valid_directions)
bad_directions = list(set(MOVE_DIRECTIONS) - set(valid_directions))
# Treat attack squares I should avoid with a zero halite ship as N-step bad
# directions, if that leaves us with options
if np.any(avoid_attack_squares_zero_halite) and halite_ships[
row, col] == 0 and steps_remaining > 1:
avoid_attack_directions = []
for d in valid_non_base_directions:
move_row, move_col = move_ship_row_col(row, col, d, grid_size)
if avoid_attack_squares_zero_halite[move_row, move_col]:
avoid_attack_directions.append(d)
if len(avoid_attack_directions):
all_bad_dirs = set(bad_directions + (
two_step_bad_directions + n_step_bad_directions))
updated_bad_dirs = all_bad_dirs.union(set(avoid_attack_directions))
if len(updated_bad_dirs) > len(all_bad_dirs) and len(
updated_bad_dirs) < len(MOVE_DIRECTIONS):
new_bad_directions = list(updated_bad_dirs.difference(all_bad_dirs))
# import pdb; pdb.set_trace()
n_step_bad_directions.extend(new_bad_directions)
for new_bad_dir in new_bad_directions:
if not new_bad_dir in n_step_bad_directions_die_probs:
n_step_bad_directions_die_probs[new_bad_dir] = 0
# Corner case: if I can replace a chaser position and there are only very
# bad two step escape directions left: replace the chaser
if take_my_next_square_dir is not None and (
take_my_next_square_dir in two_step_bad_directions):
make_chase_replace_n_bad = True
for d in NOT_NONE_DIRECTIONS:
if not d == take_my_next_square_dir:
if d in n_step_bad_directions:
if n_step_bad_directions_die_probs[d] < 0.6:
make_chase_replace_n_bad = False
break
elif d in valid_directions:
make_chase_replace_n_bad = False
break
if make_chase_replace_n_bad:
print("CHASE: turning two step bad into n step bad", observation['step'],
row, col)
two_step_bad_directions.remove(take_my_next_square_dir)
# Treat the chasing - replace chaser position as an n-step bad action.
# Otherwise, we can get trapped in a loop of dumb behavior.
if take_my_next_square_dir is not None and not take_my_next_square_dir in (
two_step_bad_directions) and not take_my_next_square_dir in (
n_step_bad_directions):
n_step_bad_directions.append(take_my_next_square_dir)
n_step_bad_directions_die_probs[take_my_next_square_dir] = 1/4
# If all valid non base directions are n step bad actions: drop n step bad
# actions (call them 2 step bad) that are significantly worse than other n
# step bad actions
all_original_n_step_bad_directions = copy.copy(n_step_bad_directions)
all_n_step_bad_directions_die_probs = copy.copy(
n_step_bad_directions_die_probs)
if len(n_step_bad_directions) > 1 and len(
n_step_bad_directions) == len(valid_non_base_directions) and np.all(
np.array([d in n_step_bad_directions for d in (
valid_non_base_directions)])):
die_probs = np.array(list(n_step_bad_directions_die_probs.values()))
max_die_prob = min(die_probs.min()*2, die_probs.min()+0.1)
delete_from_n_step_bad = []
for d in n_step_bad_directions:
if n_step_bad_directions_die_probs[d] > max_die_prob and (
not d in safe_return_base_directions):
delete_from_n_step_bad.append(d)
for d in delete_from_n_step_bad:
two_step_bad_directions.append(d)
n_step_bad_directions.remove(d)
del n_step_bad_directions_die_probs[d]
if valid_non_base_directions:
valid_not_preferred_dirs = list(set(
two_step_bad_directions + n_step_bad_directions))
if valid_not_preferred_dirs and (
len(valid_non_base_directions) - len(valid_not_preferred_dirs)) > 0:
# Drop 2 and n step bad directions if that leaves us with valid options
bad_directions.extend(valid_not_preferred_dirs)
bad_directions = list(set(bad_directions))
valid_directions = list(
set(valid_directions) - set(valid_not_preferred_dirs))
else:
# Drop 2 step bad directions if that leaves us with valid options
valid_not_preferred_dirs = set(two_step_bad_directions)
if valid_not_preferred_dirs and (
len(valid_non_base_directions) - len(valid_not_preferred_dirs)) > 0:
bad_directions.extend(valid_not_preferred_dirs)
valid_directions = list(
set(valid_directions) - set(valid_not_preferred_dirs))
# Only keep the strongly preferred directions if there are any
if len(strongly_preferred_directions) > 0:
preferred_directions = strongly_preferred_directions
# Drop repetitive actions if that leaves us with valid options
if ship_k in history['avoid_cycle_actions']:
repetitive_action = history['avoid_cycle_actions'][ship_k]
if repetitive_action in valid_directions and len(valid_directions) > 1:
valid_directions.remove(repetitive_action)
if repetitive_action in preferred_directions:
preferred_directions.remove(repetitive_action)
if repetitive_action in one_step_valid_directions:
one_step_valid_directions.remove(repetitive_action)
bad_directions.append(repetitive_action)
# if observation['step'] == 180 and ship_k == '10-2':
# import pdb; pdb.set_trace()
return (collect_grid_scores, return_to_base_scores, establish_base_scores,
attack_base_scores, preferred_directions, valid_directions,
len(bad_directions) == len(MOVE_DIRECTIONS), two_step_bad_directions,
n_step_bad_directions, one_step_valid_directions,
n_step_bad_directions_die_probs, all_original_n_step_bad_directions,
all_n_step_bad_directions_die_probs)
# Update the scores as a function of blocking opponent bases
def update_scores_blockers(
collect_grid_scores, return_to_base_scores, establish_base_scores,
attack_base_scores, row, col, grid_size, blockers,
blocker_max_distances_to_consider, valid_directions,
one_step_valid_directions, early_base_direct_dir=None,
blocker_max_distance=half_distance_mask_dim, update_attack_base=True):
one_step_bad_directions = []
for d in NOT_NONE_DIRECTIONS:
if d == NORTH:
rows = np.mod(row - (1 + np.arange(blocker_max_distance)), grid_size)
cols = np.repeat(col, blocker_max_distance)
considered_vals = blockers[rows, col]
considered_max_distances = blocker_max_distances_to_consider[rows, col]
elif d == SOUTH:
rows = np.mod(row + (1 + np.arange(blocker_max_distance)), grid_size)
cols = np.repeat(col, blocker_max_distance)
considered_vals = blockers[rows, col]
considered_max_distances = blocker_max_distances_to_consider[rows, col]
elif d == WEST:
rows = np.repeat(row, blocker_max_distance)
cols = np.mod(col - (1 + np.arange(blocker_max_distance)), grid_size)
considered_vals = blockers[row, cols]
considered_max_distances = blocker_max_distances_to_consider[row, cols]
elif d == EAST:
rows = np.repeat(row, blocker_max_distance)
cols = np.mod(col + (1 + np.arange(blocker_max_distance)), grid_size)
considered_vals = blockers[row, cols]
considered_max_distances = blocker_max_distances_to_consider[row, cols]
if d == early_base_direct_dir:
considered_vals[0] = 1
is_blocking = np.logical_and(considered_vals, np.arange(
blocker_max_distance) < considered_max_distances)
if np.any(is_blocking):
first_blocking_id = np.where(is_blocking)[0][0]
mask_rows = rows[first_blocking_id:]
mask_cols = cols[first_blocking_id:]
collect_grid_scores[mask_rows, mask_cols] = -1e12
return_to_base_scores[mask_rows, mask_cols] = -1e12
establish_base_scores[mask_rows, mask_cols] = -1e12
if update_attack_base:
attack_base_scores[mask_rows, mask_cols] = -1e12
if first_blocking_id == 0:
one_step_bad_directions.append(d)
if d in valid_directions:
valid_directions.remove(d)
if d in one_step_valid_directions:
one_step_valid_directions.remove(d)
# Lower the score for entire quadrants when the two quadrant directions are
# blocking the movement
num_bad_one_directions = len(one_step_bad_directions)
if num_bad_one_directions > 1:
for i in range(num_bad_one_directions-1):
bad_direction_1 = one_step_bad_directions[i]
for j in range(i+1, num_bad_one_directions):
bad_direction_2 = one_step_bad_directions[j]
if (bad_direction_1 in [NORTH, SOUTH]) != (
bad_direction_2 in [NORTH, SOUTH]):
bad_quadrant_mask = np.logical_and(
HALF_PLANES_CATCH[row, col][bad_direction_1],
HALF_PLANES_CATCH[row, col][bad_direction_2])
collect_grid_scores[bad_quadrant_mask] = -1e12
return_to_base_scores[bad_quadrant_mask] = -1e12
establish_base_scores[bad_quadrant_mask] = -1e12
if update_attack_base:
attack_base_scores[bad_quadrant_mask] = -1e12
# Additional logic for the use of avoiding collisions when there is only a
# single escape direction
if blockers[row, col]:
collect_grid_scores[row, col] = -1e12
return_to_base_scores[row, col] = -1e12
establish_base_scores[row, col] = -1e12
attack_base_scores[row, col] = -1e12
if None in valid_directions:
valid_directions.remove(None)
if None in one_step_valid_directions:
one_step_valid_directions.remove(None)
return (collect_grid_scores, return_to_base_scores, establish_base_scores,
attack_base_scores, valid_directions, one_step_valid_directions,
one_step_bad_directions)
def set_scores_single_nearby_zero(scores, nearby, size, ship_row, ship_col,
nearby_distance=1):
nearby_pos = np.where(nearby)
row = nearby_pos[0][0]
col = nearby_pos[1][0]
next_nearby_pos = None
drop_None_valid = False
for i in range(-nearby_distance, nearby_distance+1):
near_row = (row + i) % size
for j in range(-nearby_distance, nearby_distance+1):
near_col = (col + j) % size
if i != 0 or j != 0:
# Don't gather near the base and don't move on top of it
scores[near_row, near_col] = -1e7
if near_row == ship_row and near_col == ship_col:
next_nearby_pos = get_dir_from_target(
ship_row, ship_col, row, col, size)[0]
else:
if near_row == ship_row and near_col == ship_col:
# Don't stay on top of the base
drop_None_valid = True
return scores, next_nearby_pos, drop_None_valid
def grid_distance(r1, c1, r2, c2, size):
horiz_diff = c2-c1
horiz_distance = min(np.abs(horiz_diff),
min(np.abs(horiz_diff-size), np.abs(horiz_diff+size)))
vert_diff = r2-r1
vert_distance = min(np.abs(vert_diff),
min(np.abs(vert_diff-size), np.abs(vert_diff+size)))
return horiz_distance+vert_distance
def override_early_return_base_scores(
base_return_grid_multiplier, my_bases, ship_row, ship_col, my_ship_count):
base_pos = np.where(my_bases)
base_row = base_pos[0][0]
base_col = base_pos[1][0]
dist_to_base = DISTANCES[base_row, base_col][ship_row, ship_col]
# Remember the rule that blocks spawning when a ship is about to return
if dist_to_base <= 10-my_ship_count:
base_return_grid_multiplier[base_row, base_col] = 0
return base_return_grid_multiplier
def get_nearest_base_distances(grid_size, ignore_abandoned, observation):
base_dms = []
base_distances = []
# for b in player_obs[1]:
# row, col = row_col_from_square_grid_pos(player_obs[1][b], grid_size)
# if not (row, col) in ignore_abandoned:
# base_dms.append(DISTANCE_MASKS[(row, col)])
# base_distances.append(DISTANCES[(row, col)])
my_bases = np.copy(observation['rewards_bases_ships'][0][1])
for r, c in ignore_abandoned:
my_bases[r, c] = 0
num_my_bases = my_bases.sum()
if num_my_bases > 0:
my_base_positions = np.where(my_bases)
for base_id in range(num_my_bases):
base_row = my_base_positions[0][base_id]
base_col = my_base_positions[1][base_id]
base_dms.append(DISTANCE_MASKS[(base_row, base_col)])
base_distances.append(DISTANCES[(base_row, base_col)])
if base_dms:
base_nearest_distance_scores = np.stack(base_dms).max(0)
all_base_distances = np.stack(base_distances)
else:
base_nearest_distance_scores = np.ones((grid_size, grid_size))
all_base_distances = 99*np.ones((1, grid_size, grid_size))
nearest_base_distances = np.min(all_base_distances, 0)
return (base_nearest_distance_scores, all_base_distances,
nearest_base_distances)
def get_valid_opponent_ship_actions(
config, rewards_bases_ships, halite_ships, size, history,
nearest_base_distances, observation, env_config):
opponent_ships_sensible_actions = {}
opponent_ships_sensible_actions_no_risk = {}
boxed_in_zero_halite_opponents = []
likely_convert_opponent_positions = []
possible_convert_opponent_positions = []
num_agents = len(rewards_bases_ships)
convert_cost = env_config.convertCost
stacked_bases = np.stack([rbs[1] for rbs in rewards_bases_ships])
stacked_ships = np.stack([rbs[2] for rbs in rewards_bases_ships])
num_players = stacked_ships.shape[0]
grid_size = stacked_ships.shape[1]
player_base_ids = -1*np.ones((grid_size, grid_size))
boxed_in_attack_squares = np.zeros((grid_size, grid_size), dtype=np.bool)
boxed_in_opponent_ids = -1*np.ones((grid_size, grid_size), dtype=np.int)
opponent_single_escape_pos = np.zeros(
(grid_size, grid_size), dtype=np.bool)
single_escape_mapping = {}
for i in range(num_players):
player_base_ids[stacked_bases[i]] = i
for i in range(1, num_agents):
opponent_ships = stacked_ships[i]
enemy_ships = np.delete(stacked_ships, (i), axis=0).sum(0)
ship_pos = np.where(opponent_ships)
num_ships = ship_pos[0].size
for j in range(num_ships):
valid_rel_directions = copy.copy(RELATIVE_DIRECTIONS)
valid_rel_directions_no_move_risk = copy.copy(RELATIVE_DIRECTIONS)
row = ship_pos[0][j]
col = ship_pos[1][j]
ship_halite = halite_ships[row, col]
for row_diff in range(-2, 3):
for col_diff in range(-2, 3):
distance = (np.abs(row_diff) + np.abs(col_diff))
if distance == 1 or distance == 2:
other_row = (row + row_diff) % size
other_col = (col + col_diff) % size
if enemy_ships[other_row, other_col]:
hal_diff = halite_ships[other_row, other_col] - ship_halite
# if observation['step'] == 189 and row == 14 and col == 2:
# import pdb; pdb.set_trace()
ignores_move_collision = False
risky_stay_still_collision = False
if halite_ships[row, col] == halite_ships[
other_row, other_col]:
# Note: use the opponent distance because the opponent model is
# learned using the opponent distance to the nearest base (with
# near base distance cutoff typically at 2)
is_near_base = nearest_base_distances[
other_row, other_col] <= config['log_near_base_distance']
risk_lookup_k = str(is_near_base) + '_' + str(distance) + (
'_ever_risky')
if distance == 2:
ignores_move_collision = history[
'zero_halite_move_behavior'][i][risk_lookup_k]
else:
risk_lookup_k_dist_zero = str(is_near_base) + '_' + str(
0) + '_ever_risky'
risky_stay_still_collision = history[
'zero_halite_move_behavior'][i][risk_lookup_k]
ignores_move_collision = history[
'zero_halite_move_behavior'][i][risk_lookup_k_dist_zero]
# if ignores_move_collision and distance == 1:
# import pdb; pdb.set_trace()
# x=1
rem_dirs = []
if risky_stay_still_collision:
rem_dirs += [(0, 0)] if distance == 1 and hal_diff <= 0 else []
else:
rem_dirs += [(0, 0)] if distance == 1 and hal_diff < 0 else []
if not ignores_move_collision:
rem_dirs += [(-1, 0)] if row_diff < 0 and hal_diff <= 0 else []
rem_dirs += [(1, 0)] if row_diff > 0 and hal_diff <= 0 else []
rem_dirs += [(0, -1)] if col_diff < 0 and hal_diff <= 0 else []
rem_dirs += [(0, 1)] if col_diff > 0 and hal_diff <= 0 else []
for d in rem_dirs:
if d in valid_rel_directions:
valid_rel_directions.remove(d)
# if observation['step'] == 146 and row == 13 and col == 13:
# import pdb; pdb.set_trace()
# Don't check for risky opponent zero halite behavior
rem_dirs = []
if risky_stay_still_collision:
rem_dirs += [(0, 0)] if distance == 1 and hal_diff <= 0 else []
else:
rem_dirs += [(0, 0)] if distance == 1 and hal_diff < 0 else []
rem_dirs += [(-1, 0)] if row_diff < 0 and hal_diff <= 0 else []
rem_dirs += [(1, 0)] if row_diff > 0 and hal_diff <= 0 else []
rem_dirs += [(0, -1)] if col_diff < 0 and hal_diff <= 0 else []
rem_dirs += [(0, 1)] if col_diff > 0 and hal_diff <= 0 else []
for d in rem_dirs:
if d in valid_rel_directions_no_move_risk:
valid_rel_directions_no_move_risk.remove(d)
# Prune for opponent base positions
rem_dirs = []
for rel_dir in valid_rel_directions:
d = RELATIVE_DIR_TO_DIRECTION_MAPPING[rel_dir]
move_row, move_col = move_ship_row_col(row, col, d, grid_size)
move_base_id = player_base_ids[move_row, move_col]
if move_base_id >= 0 and move_base_id != i:
rem_dirs.append(rel_dir)
for d in rem_dirs:
valid_rel_directions.remove(d)
# if observation['step'] == 146 and row == 14 and col == 13:
# import pdb; pdb.set_trace()
rem_dirs = []
for rel_dir in valid_rel_directions_no_move_risk:
d = RELATIVE_DIR_TO_DIRECTION_MAPPING[rel_dir]
move_row, move_col = move_ship_row_col(row, col, d, grid_size)
move_base_id = player_base_ids[move_row, move_col]
if move_base_id >= 0 and move_base_id != i:
rem_dirs.append(rel_dir)
for d in rem_dirs:
valid_rel_directions_no_move_risk.remove(d)
if len(valid_rel_directions) == 0:
player_halite_budget = observation['rewards_bases_ships'][i][0]
if ((ship_halite + player_halite_budget) >= convert_cost):
if ship_halite >= history['inferred_boxed_in_conv_threshold'][i][1]:
likely_convert_opponent_positions.append((row, col))
if ship_halite >= history['inferred_boxed_in_conv_threshold'][i][0]:
possible_convert_opponent_positions.append((row, col))
if ship_halite > 0:
for d in MOVE_DIRECTIONS:
move_row, move_col = move_ship_row_col(row, col, d, grid_size)
boxed_in_attack_squares[move_row, move_col] = True
boxed_in_opponent_ids[move_row, move_col] = i
if ship_halite == 0 and len(valid_rel_directions_no_move_risk) == 1 and (
valid_rel_directions_no_move_risk[0] == (0, 0)):
boxed_in_zero_halite_opponents.append((row, col))
if len(valid_rel_directions_no_move_risk) == 1:
escape_dir = RELATIVE_DIR_TO_DIRECTION_MAPPING[
valid_rel_directions_no_move_risk[0]]
escape_square = move_ship_row_col(row, col, escape_dir, grid_size)
opponent_single_escape_pos[escape_square] = 1
single_escape_mapping[(row, col)] = escape_square
opponent_ships_sensible_actions[(row, col)] = valid_rel_directions
opponent_ships_sensible_actions_no_risk[(row, col)] = (
valid_rel_directions_no_move_risk)
# if observation['step'] == 146:
# import pdb; pdb.set_trace()
# Do another pass over the zero halite ships to figure if they are boxed in
# by the escape squares of their own non zero halite ships - these ships
# will very likely take risky actions and should therefore be avoided
if np.any(opponent_single_escape_pos):
for j in range(num_ships):
row = ship_pos[0][j]
col = ship_pos[1][j]
ship_halite = halite_ships[row, col]
if ship_halite == 0:
valid_rel_directions = opponent_ships_sensible_actions[(row, col)]
valid_rel_directions_no_move_risk = (
opponent_ships_sensible_actions_no_risk[row, col])
# if observation['step'] == 146 and row == 15 and col == 12:
# import pdb; pdb.set_trace()
# if observation['step'] == 146 and row == 14 and col == 13:
# import pdb; pdb.set_trace()
for d in NOT_NONE_DIRECTIONS:
move_row, move_col = move_ship_row_col(row, col, d, grid_size)
if opponent_single_escape_pos[move_row, move_col]:
my_escape_square = False
if (row, col) in single_escape_mapping:
my_escape_square = (move_row, move_col) == (
single_escape_mapping[row, col])
if my_escape_square:
# Still treat it as a bad direction if there is another ship
# that has my escape square as it's only escape square
num_escape_count = np.array(
[v == (move_row, move_col) for v in (
single_escape_mapping.values())]).sum()
my_escape_square = num_escape_count == 1
if not my_escape_square:
avoid_rel_direction = RELATIVE_DIR_MAPPING[d]
if avoid_rel_direction in valid_rel_directions:
valid_rel_directions.remove(avoid_rel_direction)
if avoid_rel_direction in valid_rel_directions_no_move_risk:
valid_rel_directions_no_move_risk.remove(avoid_rel_direction)
if (len(valid_rel_directions_no_move_risk) == 0 or (
len(valid_rel_directions_no_move_risk) == 1 and (
valid_rel_directions_no_move_risk[0] == (0, 0)))) and (
not (row, col) in boxed_in_zero_halite_opponents):
# print("AVOIDING chained zero halite collision",
# observation['step'], row, col)
boxed_in_zero_halite_opponents.append((row, col))
opponent_ships_sensible_actions[(row, col)] = valid_rel_directions
opponent_ships_sensible_actions_no_risk[(row, col)] = (
valid_rel_directions_no_move_risk)
return (opponent_ships_sensible_actions,
opponent_ships_sensible_actions_no_risk, boxed_in_attack_squares,
boxed_in_opponent_ids, boxed_in_zero_halite_opponents,
likely_convert_opponent_positions,
possible_convert_opponent_positions)
def scale_attack_scores_bases_ships(
config, observation, player_obs, spawn_cost, non_abandoned_base_distances,
weighted_base_mask, steps_remaining, obs_halite, halite_ships, history,
smoothed_halite, player_influence_maps,
nearest_base_distances_with_my_excluded, player_ids,
laplace_smoother_rel_ship_count=4, initial_normalize_ship_diff=10,
final_normalize_ship_diff=3):
stacked_bases = np.stack([rbs[1] for rbs in observation[
'rewards_bases_ships']])
my_bases = stacked_bases[0]
# Exclude bases that are persistently camped by opponents
for base_pos in history['my_base_not_attacked_positions']:
my_bases[base_pos] = 0
stacked_opponent_bases = stacked_bases[1:]
stacked_ships = np.stack([rbs[2] for rbs in observation[
'rewards_bases_ships']])
stacked_opponent_ships = stacked_ships[1:]
base_counts = stacked_opponent_bases.sum((1, 2))
my_ship_count = len(player_obs[2])
ship_counts = stacked_opponent_ships.sum((1, 2))
grid_size = stacked_opponent_bases.shape[1]
approximate_scores = history['current_scores']
num_players = stacked_bases.shape[0]
player_ranks = np.zeros(num_players)
for i in range(num_players):
player_ranks[i] = (approximate_scores >= approximate_scores[i]).sum()
# print(approximate_scores)
# Factor 1: an opponent with less bases is more attractive to attack
base_count_multiplier = np.where(base_counts == 0, 0, 1/(base_counts+1e-9))
# Factor 2: an opponent that is closer in score is more attractive to attack
spawn_diffs = (approximate_scores[0] - approximate_scores[1:])/spawn_cost
abs_spawn_diffs = np.abs(spawn_diffs)
currently_winning = approximate_scores[0] >= approximate_scores[1:]
approximate_score_diff = approximate_scores[0] - approximate_scores[1:]
normalize_diff = initial_normalize_ship_diff - observation['relative_step']*(
initial_normalize_ship_diff-final_normalize_ship_diff)
abs_rel_normalized_diff = np.maximum(
0, (normalize_diff-abs_spawn_diffs)/normalize_diff)
rel_score_max_y = initial_normalize_ship_diff/normalize_diff
rel_score_multiplier = abs_rel_normalized_diff*rel_score_max_y
# Factor 3: an opponent with less ships is more attractive to attack since it
# is harder for them to defend the base
rel_ship_count_multiplier = (my_ship_count+laplace_smoother_rel_ship_count)/(
ship_counts+laplace_smoother_rel_ship_count)
# Additional term: attack bases nearby my main base
opponent_bases = stacked_opponent_bases.sum(0).astype(np.bool)
if opponent_bases.sum() > 0 and non_abandoned_base_distances.max() > 0:
additive_nearby_main_base = 3/max(0.15, observation['relative_step'])/(
1.5**non_abandoned_base_distances)/(
weighted_base_mask[my_bases].sum())
additive_nearby_main_base[~opponent_bases] = 0
else:
additive_nearby_main_base = 0
attack_multipliers = base_count_multiplier*rel_score_multiplier*(
rel_ship_count_multiplier)
tiled_multipliers = np.tile(attack_multipliers.reshape((-1, 1, 1)),
[1, grid_size, grid_size])
# if observation['step'] == 391:
# import pdb; pdb.set_trace()
opponent_bases_scaled = (stacked_opponent_bases*tiled_multipliers).sum(0) + (
additive_nearby_main_base)
# Compute the priority of attacking the ships of opponents
opponent_ships_scaled = np.maximum(0, 1 - np.abs(
approximate_scores[0]-approximate_scores[1:])/steps_remaining/10)
# print(observation['step'], opponent_ships_scaled, approximate_scores)
# if observation['step'] == 300:
# import pdb; pdb.set_trace()
# If I am winning by a considerable margin before the game is over, and the
# number three is far behind the number two: go ballistic on the number two
# Prefer opponent bases that are close to my bases and halite, and where the
# opponent has a relatively low density
# Make sure to guarantee some continuity with a start and stop mode
# Ballistic scenarios:
# - I am well ahead of all opponents: target the initial best agent
# - I am winning with a solid margin and the number three is far behind
# the number two: target the number two
# - I am in a close fight with the number two/one and the number three is
# very far behind: target the number two
winning_massively = np.all(spawn_diffs >= (
18-9*observation['relative_step']))
if not winning_massively:
history['ballistic_early_best_target_mode'] = False
winning_very_clearly = np.all(spawn_diffs >= (
14-7*observation['relative_step']))
winning_clearly = np.all(spawn_diffs >= (8-4*observation['relative_step']))
winning_considerably = np.all(spawn_diffs >= (
6-4*observation['relative_step'] + int(history[
'ballistic_early_best_target_mode'])))
winning_massively_near_end_game = winning_massively and observation[
'relative_step'] > 0.75
winning_massively_before_end_game = winning_massively and not (
winning_massively_near_end_game)
first_opp_id = np.argsort(spawn_diffs)[0]
second_opp_id = np.argsort(spawn_diffs)[1]
second_third_spawn_diff = spawn_diffs[second_opp_id] - spawn_diffs[
first_opp_id]
very_tight_fight_for_first = np.abs(spawn_diffs[first_opp_id]) < 1 and (
spawn_diffs[second_opp_id] >= (12-8*observation['relative_step']))
tight_fight_for_first = np.abs(spawn_diffs[first_opp_id]) < 3 and (
spawn_diffs[second_opp_id] >= (8-6*observation['relative_step']))
prev_ballistic_mode = history['ballistic_mode']
should_start_ballistic = (not winning_massively_before_end_game) and (
winning_clearly and second_third_spawn_diff > (
7-2*observation['relative_step']) or very_tight_fight_for_first or (
winning_massively_near_end_game)) and (
my_ship_count >= 15-max(0, 40*(observation['relative_step']-0.8)))
should_continue_ballistic = not (
winning_massively_before_end_game) and (winning_very_clearly or (
winning_clearly and (second_third_spawn_diff > 1)) or (
winning_considerably and (
second_third_spawn_diff > (2-observation['relative_step']))) or (
tight_fight_for_first)
) and (my_ship_count >= 10-max(0, 20*(observation['relative_step']-0.8)))
ballistic_mode = should_start_ballistic or (
prev_ballistic_mode and should_continue_ballistic)
# Select the next target in line if the opponent has no bases and no ships
if history['ballistic_early_best_targets_sorted'] is not None:
for opponent_id in history['ballistic_early_best_targets_sorted']:
ballistic_early_best_target_mode_target = opponent_id
num_opponent_bases = stacked_bases[opponent_id+1].sum()
num_opponent_ships = stacked_ships[opponent_id+1].sum()
if num_opponent_bases > 0 or num_opponent_ships > 0:
break
else:
ballistic_early_best_target_mode_target = first_opp_id
# print(observation['step'], ballistic_early_best_target_mode_target)
# if observation['step'] == 146:
# import pdb; pdb.set_trace()
# Ballistic early best target mode override of the opponent id: prefer to
# attack opponents that have a base which is close to one of my non
# abandoned bases
opponent_base_positions = np.where(stacked_opponent_bases.sum(0) > 0)
opponent_near_my_base_distances = nearest_base_distances_with_my_excluded[
opponent_base_positions]
targeted_base_override = None
if np.any(opponent_base_positions) and winning_very_clearly and (
opponent_near_my_base_distances.min() < 6):
prev_ballistic_target_override = history['prev_ballistic_target_override']
if history['prev_ballistic_target_override'] is not None and (
opponent_bases[prev_ballistic_target_override]):
targeted_base_override = prev_ballistic_target_override
else:
# Sort annoying bases by score: prefer to attack opponent bases that
# belong to the best opponent
smoothed_halite = smooth2d(observation['halite'])
opponent_near_my_base_scores = opponent_near_my_base_distances + 0.6*(
player_ranks[player_ids[opponent_base_positions]-1]) - 1e-9*(
smoothed_halite[opponent_base_positions])
target_base_id = np.argmin(opponent_near_my_base_scores)
targeted_base_override = (
opponent_base_positions[0][target_base_id],
opponent_base_positions[1][target_base_id])
history['prev_ballistic_target_override'] = targeted_base_override
if ballistic_mode and not prev_ballistic_mode and (
winning_massively_near_end_game):
# Switch to early best target mode - override of the target id
print(observation['step'], "Start attack on early best target",
ballistic_early_best_target_mode_target+1)
ballistic_early_best_target_mode = True
ballistic_target_id = ballistic_early_best_target_mode_target
elif ballistic_mode:
ballistic_early_best_target_mode = history[
'ballistic_early_best_target_mode'] and winning_very_clearly
if ballistic_early_best_target_mode or winning_massively_near_end_game:
# Early best target mode
ballistic_target_id = ballistic_early_best_target_mode_target
else:
# Standard ballistic mode
ballistic_target_id = first_opp_id
# print(observation['step'], "Winning massively near end?",
# winning_massively_near_end_game, ballistic_target_id)
else:
ballistic_early_best_target_mode = False
# Consider going ballistic on the nearest contender for the second place
# when the first place no longer seems possible
first_out_of_reach = spawn_diffs.min() <= (
-40+36*observation['relative_step']) # This should be conservative
if first_out_of_reach and np.abs(spawn_diffs[first_opp_id]) > np.abs(
spawn_diffs[second_opp_id]):
ballistic_target_id = second_opp_id
third_opp_id = np.argsort(spawn_diffs)[2]
spawn_diffs_not_best = np.array([spawn_diffs[i] for i in range(3) if (
not i == first_opp_id)])
winning_clearly_second = np.all(
spawn_diffs_not_best >= (8-4*observation['relative_step']))
winning_considerably_second = np.all(spawn_diffs_not_best >= (
6-4*observation['relative_step']))
third_fourth_spawn_diff = spawn_diffs[third_opp_id] - (
spawn_diffs[second_opp_id])
very_tight_fight_for_second = (
np.abs(spawn_diffs[second_opp_id]) < np.abs(
spawn_diffs[third_opp_id])/2) and (
spawn_diffs[third_opp_id] >= (12-8*observation['relative_step']))
tight_fight_for_second = (
np.abs(spawn_diffs[second_opp_id]) < np.abs(
spawn_diffs[third_opp_id])) and (
spawn_diffs[third_opp_id] >= (10-7*observation['relative_step']))
should_start_ballistic = (
winning_clearly_second and third_fourth_spawn_diff > (
4-2*observation['relative_step']) or (
very_tight_fight_for_second)) and (
my_ship_count >= 15-max(0, 40*(observation['relative_step']-0.8)))
should_continue_ballistic = ((
winning_clearly_second and (third_fourth_spawn_diff > 1)) or (
winning_considerably_second and (
third_fourth_spawn_diff > (2-observation['relative_step']))) or (
tight_fight_for_second)
) and (my_ship_count >= 10-max(
0, 20*(observation['relative_step']-0.8)))
ballistic_mode = should_start_ballistic or (
prev_ballistic_mode and should_continue_ballistic)
# if observation['step'] == 363:
# import pdb; pdb.set_trace()
# if ballistic_mode:
# print("SECOND BALLISTIC MODE", observation['step'],
# ballistic_target_id)
if not ballistic_mode:
ballistic_target_id = 0 # This could be 1 or 2 as well
history['ballistic_early_best_target_mode'] = (
ballistic_early_best_target_mode)
if not ballistic_mode and targeted_base_override is not None:
print("Go ballistic on nearby base", observation['step'],
targeted_base_override)
ballistic_mode = True
ballistic_target_id = np.argmax(base_counts)
num_target_bases = base_counts[ballistic_target_id]
ballistic_attack_base_targets = []
if ballistic_mode and num_target_bases > 0:
target_bases = stacked_opponent_bases[ballistic_target_id]
target_base_locations = np.where(target_bases)
attack_target_base_scores = np.zeros(num_target_bases)
my_base_density = smooth2d(my_bases, 10)
for base_id in range(num_target_bases):
base_row = target_base_locations[0][base_id]
base_col = target_base_locations[1][base_id]
attack_target_base_scores[base_id] = 5e-4*smoothed_halite[
base_row, base_col] + player_influence_maps[0, base_row, base_col] - (
player_influence_maps[ballistic_target_id+1, base_row, base_col]) + (
100*int((base_row, base_col) in history['prev_step'][
'ballistic_attack_base_targets'])) + 10*my_base_density[
base_row, base_col]
# import pdb; pdb.set_trace()
ordered_base_ids = np.argsort(-attack_target_base_scores)
num_attacked_bases = 1 # 1 is plenty of aggression for the world
for attack_id in range(num_attacked_bases):
if attack_id == 0 and targeted_base_override is not None:
# print("Targeted base override", observation['step'],
# targeted_base_override)
base_row, base_col = targeted_base_override
else:
base_id = ordered_base_ids[attack_id]
base_row = target_base_locations[0][base_id]
base_col = target_base_locations[1][base_id]
opponent_bases_scaled[base_row, base_col] = 1e4
ballistic_attack_base_targets.append((base_row, base_col))
del_keys = []
for k in history['camping_ships_strategy']:
if history['camping_ships_strategy'][k][5] in (
ballistic_attack_base_targets):
del_keys.append(k)
for k in del_keys:
del history['camping_ships_strategy'][k]
# print(observation['step'], ballistic_mode, ballistic_attack_base_targets)
history['ballistic_mode'] = ballistic_mode
return (opponent_bases_scaled, opponent_ships_scaled,
abs_rel_normalized_diff, currently_winning, approximate_score_diff,
history, ballistic_attack_base_targets)
def get_influence_map(config, stacked_bases, stacked_ships, halite_ships,
observation, player_obs, smooth_kernel_dim=7):
# FUTURE WORK: incorporate the number of ships in computing the base weights
# Reasoning: a base without ships is not really a threat
all_ships = stacked_ships.sum(0).astype(np.bool)
my_ships = stacked_ships[0].astype(np.bool)
if my_ships.sum() == 0:
return None, None, None, None, None, None
num_players = stacked_ships.shape[0]
grid_size = my_ships.shape[0]
ship_range = 1-config['influence_map_min_ship_weight']
all_ships_halite = halite_ships[all_ships]
unique_vals, unique_counts = np.unique(
all_ships_halite, return_counts=True)
assert np.all(np.diff(unique_vals) > 0)
unique_halite_vals = np.sort(unique_vals).astype(np.int).tolist()
num_ships = all_ships_halite.size
halite_ranks = [np.array(
[unique_halite_vals.index(hs) for hs in halite_ships[
stacked_ships[i]]]) for i in range(num_players)]
less_rank_cum_counts = np.cumsum(unique_counts)
num_unique = unique_counts.size
halite_rank_counts = [np.array(
[less_rank_cum_counts[r-1] if r > 0 else 0 for r in (
halite_ranks[i])]) for i in range(num_players)]
ship_weights = [1 - r/(num_ships-1+1e-9)*ship_range for r in (
halite_rank_counts)]
raw_influence_maps = np.zeros((num_players, grid_size, grid_size))
raw_influence_maps_unweighted = np.zeros((num_players, grid_size, grid_size))
influence_maps = np.zeros((num_players, grid_size, grid_size))
influence_maps_unweighted = np.zeros((num_players, grid_size, grid_size))
for i in range(num_players):
raw_influence_maps[i][stacked_ships[i]] += ship_weights[i]
raw_influence_maps[i][stacked_bases[i]] += config[
'influence_map_base_weight']
raw_influence_maps_unweighted[i][stacked_ships[i]] += 1
raw_influence_maps_unweighted[i][stacked_bases[i]] += 1
influence_maps[i] = smooth2d(raw_influence_maps[i],
smooth_kernel_dim=smooth_kernel_dim)
influence_maps_unweighted[i] = smooth2d(
raw_influence_maps_unweighted[i], smooth_kernel_dim=smooth_kernel_dim)
my_influence = influence_maps[0]
max_other_influence = influence_maps[1:].max(0)
influence_map = my_influence - max_other_influence
influence_map_unweighted = influence_maps_unweighted[0] - (
influence_maps_unweighted[1:].sum(0))
# Define the escape influence map
rem_other_influence = influence_maps[1:].sum(0) - max_other_influence
escape_influence_map = 3*my_influence-(
2*max_other_influence+rem_other_influence)
escape_influence_probs = np.exp(np.minimum(0, escape_influence_map)/config[
'escape_influence_prob_divisor'])
# Derive the priority scores based on the influence map
priority_scores = 1/(1+np.abs(influence_map))
# Extract a dict of my ship weights
ship_priority_weights = {}
for ship_k in player_obs[2]:
row, col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
ship_halite = halite_ships[row, col]
halite_rank = unique_halite_vals.index(ship_halite)
ship_priority_weights[ship_k] = 1 - halite_rank/(
num_unique-1+1e-9)*ship_range
return (influence_map, influence_map_unweighted, influence_maps,
priority_scores, ship_priority_weights, escape_influence_probs)
# Compute the weighted base mask - the base with value one represents the
# main base and the values are used as a multiplier in the return to base
# scores.
def get_weighted_base_mask(stacked_bases, stacked_ships, observation,
history, consistent_main_base_bonus=3):
my_bases = stacked_bases[0]
# Exclude bases that are persistently camped by opponents
for base_pos in history['my_base_not_attacked_positions']:
my_bases[base_pos] = 0
num_bases = my_bases.sum()
my_base_locations = np.where(my_bases)
grid_size = stacked_bases.shape[1]
ship_diff_smoothed = smooth2d(stacked_ships[0] - stacked_ships[1:].sum(0))
if num_bases == 0:
base_mask = np.ones((grid_size, grid_size))
main_base_distances = -1*np.ones((grid_size, grid_size))
non_abandoned_base_distances = -1*np.ones((grid_size, grid_size))
elif num_bases >= 1:
all_non_abandoned_base_distances = []
for base_id in range(num_bases):
base_row = my_base_locations[0][base_id]
base_col = my_base_locations[1][base_id]
all_non_abandoned_base_distances.append(DISTANCES[
base_row, base_col])
non_abandoned_base_distances = np.stack(
all_non_abandoned_base_distances).min(0)
# Add a bonus to identify the main base id, but don't include the bonus
# in the base scaling
ship_diff_smoothed_with_bonus = np.copy(ship_diff_smoothed)
prev_main_base_location = history['prev_step']['my_main_base_location']
# print(observation['step'], prev_main_base_location, num_bases)
if prev_main_base_location[0] >= 0:
ship_diff_smoothed_with_bonus[prev_main_base_location] += (
consistent_main_base_bonus)
base_densities = ship_diff_smoothed[my_base_locations]
base_densities_with_bonus = ship_diff_smoothed_with_bonus[
my_base_locations]
highest_base_density_with_bonus = base_densities_with_bonus.max()
best_ids = np.where(
base_densities_with_bonus == highest_base_density_with_bonus)[0]
highest_base_density = base_densities[best_ids[0]]
# Subtract some small value of the non max densities to break rare ties
main_base_row = my_base_locations[0][best_ids[0]]
main_base_col = my_base_locations[1][best_ids[0]]
main_base_distances = DISTANCES[main_base_row, main_base_col]
all_densities = np.minimum(ship_diff_smoothed, highest_base_density-1e-5)
all_densities[main_base_row, main_base_col] += 1e-5
# Linearly compute the weighted base mask: 1 is my best base and 0 is the
# lowest ship_diff_smoothed value
all_densities -= all_densities.min()
base_mask = all_densities/all_densities.max()
return (base_mask, main_base_distances, non_abandoned_base_distances,
ship_diff_smoothed)
# Force returning to a base when the episode is almost over
def force_return_base_end_episode(
my_bases, base_return_grid_multiplier, main_base_distances, row, col,
steps_remaining, opponent_less_halite_ships, weighted_base_mask,
safe_to_collect):
num_bases = my_bases.sum()
base_positions = np.where(my_bases)
# List the bases I *can* return to
can_return_scores = np.zeros(num_bases)
for i in range(num_bases):
base_row = base_positions[0][i]
base_col = base_positions[1][i]
base_distance = DISTANCES[row, col][base_row, base_col]
threat_mask = np.logical_and(
DISTANCES[(row, col)] <= base_distance,
DISTANCES[(base_row, base_col)] <= base_distance)
if base_distance > 1:
threat_mask[row, col] = 0
threat_mask[base_row, base_col] = 0
threat_ships_mask = opponent_less_halite_ships[threat_mask]
can_return_scores[i] = (base_distance <= steps_remaining)*(10+
max(int(safe_to_collect[row, col]),
weighted_base_mask[base_row, base_col]) - 5*(
threat_ships_mask.mean()) - base_distance/30)
# if observation['step'] == 384 and row == 8 and col == 11:
# import pdb; pdb.set_trace()
# Force an emergency return if the best return scores demand an urgent
# return in order to bring the halite home before the episode is over
end_game_base_return = False
if num_bases > 0:
best_return_id = np.argmax(can_return_scores)
best_base_row = base_positions[0][best_return_id]
best_base_col = base_positions[1][best_return_id]
best_base_distance = DISTANCES[row, col][best_base_row, best_base_col]
end_game_base_return = best_base_distance in [
steps_remaining-1, steps_remaining]
if end_game_base_return:
base_return_grid_multiplier[best_base_row, best_base_col] += 1e15
return base_return_grid_multiplier, end_game_base_return
def edge_aware_square_subset_mask(data, row, col, window, box, grid_size):
# Figure out how many rows to roll the data and box to end up with a
# contiguous subset
min_row = row - window
max_row = row + window
if min_row < 0:
data = np.roll(data, -min_row, axis=0)
box = np.roll(box, -min_row, axis=0)
elif max_row >= grid_size:
data = np.roll(data, grid_size-max_row-1, axis=0)
box = np.roll(box, grid_size-max_row-1, axis=0)
# Figure out how many columns to roll the data and box to end up with a
# contiguous subset
min_col = col - window
max_col = col + window
if min_col < 0:
data = np.roll(data, -min_col, axis=1)
box = np.roll(box, -min_col, axis=1)
elif max_col >= grid_size:
data = np.roll(data, grid_size-max_col-1, axis=1)
box = np.roll(box, grid_size-max_col-1, axis=1)
return data[box]
def update_scores_opponent_boxing_in(
all_ship_scores, stacked_ships, observation, env_config,
opponent_ships_sensible_actions, halite_ships, steps_remaining, player_obs,
np_rng, opponent_ships_scaled, collect_rate, obs_halite,
main_base_distances, history, on_rescue_mission,
my_defend_base_ship_positions, env_observation, player_influence_maps,
override_move_squares_taken, ignore_convert_positions,
convert_unavailable_positions, always_attack_opponent_id,
num_non_abandoned_bases, likely_convert_opponent_positions,
possible_convert_opponent_positions, my_current_base_distances,
box_in_window=3, min_attackers_to_box=4):
# Loop over the opponent ships and derive if I can box them in
# For now this is just greedy. We should probably consider decoupling finding
# targets from actually boxing in.
# FUTURE WORK: proper handling of opponent bases
opponent_positions = np.where(stacked_ships[1:].sum(0) > 0)
opponent_bases = np.stack([rbs[1] for rbs in observation[
'rewards_bases_ships']])[1:].sum(0)
num_opponent_ships = opponent_positions[0].size
double_window = box_in_window*2
dist_mask_dim = 2*double_window+1
nearby_rows = np.tile(np.expand_dims(np.arange(dist_mask_dim), 1),
[1, dist_mask_dim])
nearby_cols = np.tile(np.arange(dist_mask_dim), [dist_mask_dim, 1])
ships_available = np.copy(stacked_ships[0]) & (~on_rescue_mission) & (
~my_defend_base_ship_positions) & (~convert_unavailable_positions)
boxing_in = np.zeros_like(on_rescue_mission)
grid_size = stacked_ships.shape[1]
# ship_pos_to_key = {v[0]: k for k, v in player_obs[2].items()}
prev_step_boxing_in_ships = history['prev_step_boxing_in_ships']
num_players = stacked_ships.shape[0]
spawn_cost = env_config.spawnCost
ship_pos_to_key = {}
for i in range(num_players):
ship_pos_to_key.update({
v[0]: k for k, v in env_observation.players[i][2].items()})
# Loop over the camping ships and exclude the ones from the available mask
# that have flagged they are not available for boxing in
camping_ships_strategy = history['camping_ships_strategy']
for ship_k in camping_ships_strategy:
if not camping_ships_strategy[ship_k][3]:
camping_row, camping_col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
ships_available[camping_row, camping_col] = 0
# Loop over the ships that attack opponent camplers and exclude them from the
# available mask
attack_opponent_campers = history['attack_opponent_campers']
for ship_k in attack_opponent_campers:
attacking_camper_row, attacking_camper_col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
ships_available[attacking_camper_row, attacking_camper_col] = 0
# Loop over the ships that are stuck in a loop and mark them as unavailable
for ship_k in history['avoid_cycle_actions']:
cycle_row, cycle_col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
ships_available[cycle_row, cycle_col] = 0
original_ships_available = np.copy(ships_available)
my_ship_density = smooth2d(ships_available, smooth_kernel_dim=2)
# Compute the priorities of attacking each ship
# Compute the minimum opponent halite in the neighborhood of each square
# by looping over all opponent ships
attack_ship_priorities = np.zeros(num_opponent_ships)
near_opponent_min_halite = np.ones((grid_size, grid_size))*1e6
near_opponent_2_min_halite = np.ones((grid_size, grid_size))*1e6
near_opponent_specific_2_min_halite = [
np.ones((grid_size, grid_size))*1e6 for _ in range(num_players)]
should_attack = np.zeros(num_opponent_ships, dtype=np.bool)
for i in range(num_opponent_ships):
row = opponent_positions[0][i]
col = opponent_positions[1][i]
opponent_ship_k = ship_pos_to_key[row*grid_size+col]
boxing_in_prev_step = opponent_ship_k in prev_step_boxing_in_ships
opponent_halite = halite_ships[row, col]
clipped_opponent_halite = min(spawn_cost, opponent_halite)
opponent_id = np.where(stacked_ships[:, row, col])[0][0]
attack_ship_priorities[i] = 1e5*boxing_in_prev_step + (
clipped_opponent_halite) + 1000*(
opponent_ships_scaled[opponent_id-1]) + 1000*my_ship_density[row, col]
near_opp_mask = ROW_COL_MAX_DISTANCE_MASKS[(row, col, box_in_window)]
near_opp_2_mask = ROW_COL_MAX_DISTANCE_MASKS[(row, col, 2)]
near_opponent_min_halite[near_opp_mask] = np.minimum(
opponent_halite, near_opponent_min_halite[near_opp_mask])
near_opponent_2_min_halite[near_opp_2_mask] = np.minimum(
opponent_halite, near_opponent_2_min_halite[near_opp_2_mask])
near_opponent_specific_2_min_halite[opponent_id][near_opp_2_mask] = (
np.minimum(opponent_halite,
near_opponent_specific_2_min_halite[opponent_id][
near_opp_2_mask]))
# if observation['step'] == 163 and row == 2:
# import pdb; pdb.set_trace()
should_attack[i] = (main_base_distances[row, col] >= (9-(
observation['relative_step']*6 + 3*num_non_abandoned_bases)) or (
opponent_halite < history[
'inferred_boxed_in_conv_threshold'][opponent_id][0]) or (
always_attack_opponent_id == opponent_id)) and not (
(row, col) in ignore_convert_positions)
box_opponent_positions = []
boxing_in_ships = []
ships_on_box_mission = {}
opponent_ship_order = np.argsort(-attack_ship_priorities)
for i in range(num_opponent_ships):
opponent_ship_id = opponent_ship_order[i]
row = opponent_positions[0][opponent_ship_id]
col = opponent_positions[1][opponent_ship_id]
opponent_id = np.where(stacked_ships[:, row, col])[0][0]
opponent_ship_k = ship_pos_to_key[row*grid_size+col]
sensible_target_actions = opponent_ships_sensible_actions[row, col]
target_halite = halite_ships[row, col]
my_less_halite_mask = np.logical_and(
halite_ships < target_halite, ships_available)
# if observation['step'] == 210 and row == 1 and col == 8:
# import pdb; pdb.set_trace()
# Drop non zero halite ships towards the end of a game (they should return)
my_less_halite_mask = np.logical_and(
my_less_halite_mask, np.logical_or(
halite_ships == 0, steps_remaining > 20))
max_dist_mask = ROW_COL_MAX_DISTANCE_MASKS[(row, col, double_window)]
my_less_halite_mask &= max_dist_mask
box_pos = ROW_COL_BOX_MAX_DISTANCE_MASKS[row, col, double_window]
# if observation['step'] == 157 and row == 13 and col == 1:
# import pdb; pdb.set_trace()
if my_less_halite_mask.sum() >= min_attackers_to_box and should_attack[
opponent_ship_id]:
# Look up the near opponent min halite in the square which is in the
# middle between my attackers and the target - don't attack when there is
# a less halite ship near that ship or if there is an equal halite ship
# near that square and close to the opponent
my_considered_pos = np.where(my_less_halite_mask)
if my_considered_pos[0].size:
considered_rows = my_considered_pos[0]
considered_cols = my_considered_pos[1]
mid_rows = np.where(
np.abs(considered_rows-row) <= (grid_size // 2),
np.round((considered_rows*(1-1e-9)+row*(1+1e-9))/2),
np.where(considered_rows*(1-1e-9)+row*(1+1e-9) >= grid_size,
np.round(
(considered_rows*(1-1e-9)+row*(1+1e-9)-grid_size)/2),
np.mod(np.round(
(considered_rows*(1-1e-9)+row*(1+1e-9)+grid_size)/2),
grid_size))
).astype(np.int)
mid_cols = np.where(
np.abs(considered_cols-col) <= (grid_size // 2),
np.round((considered_cols*(1-1e-9)+col*(1+1e-9))/2),
np.where(considered_cols*(1-1e-9)+col*(1+1e-9) >= grid_size,
np.round(
(considered_cols*(1-1e-9)+col*(1+1e-9)-grid_size)/2),
np.mod(np.round(
(considered_cols*(1-1e-9)+col*(1+1e-9)+grid_size)/2),
grid_size))
).astype(np.int)
# Only box in with ships that can safely do so without becoming a
# target themselves. Take more risk when the halite on board is
# equal to that of other target surrounding ships (typically 0 halite)
considered_to_target_distances = DISTANCES[(row, col)][
(considered_rows, considered_cols)]
considered_min_halite_limits = np.where(
considered_to_target_distances < 3, near_opponent_2_min_halite[
(mid_rows, mid_cols)], near_opponent_min_halite[
(mid_rows, mid_cols)])
drop_ids = (considered_min_halite_limits < (
halite_ships[(considered_rows, considered_cols)])) | (
(considered_min_halite_limits == (
halite_ships[(considered_rows, considered_cols)])) & (
near_opponent_specific_2_min_halite[opponent_id][
(row, col)] <= (
halite_ships[(considered_rows, considered_cols)])))
if np.any(drop_ids):
drop_row_ids = considered_rows[drop_ids]
drop_col_ids = considered_cols[drop_ids]
my_less_halite_mask[(drop_row_ids, drop_col_ids)] = 0
my_less_halite_mask_box = edge_aware_square_subset_mask(
my_less_halite_mask, row, col, double_window, box_pos,
grid_size)
nearby_less_halite_mask = my_less_halite_mask_box.reshape(
(dist_mask_dim, dist_mask_dim))
# if observation['step'] == 32:
# import pdb; pdb.set_trace()
my_num_nearby = nearby_less_halite_mask.sum()
else:
my_num_nearby = 0
if my_num_nearby >= min_attackers_to_box:
# Check all directions to make sure I can box the opponent in
can_box_in = True
box_in_mask_dirs = np.zeros(
(4, dist_mask_dim, dist_mask_dim), dtype=np.bool)
for dim_id, d in enumerate(NOT_NONE_DIRECTIONS):
dir_and_ships = BOX_DIRECTION_MASKS[(double_window, d)] & (
nearby_less_halite_mask)
if not np.any(dir_and_ships):
can_box_in = False
break
else:
box_in_mask_dirs[dim_id] = dir_and_ships
if can_box_in:
# Sketch out the escape squares for the target ship
opponent_distances = np.abs(nearby_rows-double_window) + np.abs(
nearby_cols-double_window)
opponent_euclid_distances = np.sqrt(
(nearby_rows-double_window)**2 + (
nearby_cols-double_window)**2)
nearby_mask_pos = np.where(nearby_less_halite_mask)
my_nearest_distances = np.stack([np.abs(
nearby_rows-nearby_mask_pos[0][j]) + np.abs(
nearby_cols-nearby_mask_pos[1][j]) for j in range(
my_num_nearby)])
my_nearest_euclid_distances = np.stack([np.sqrt((
nearby_rows-nearby_mask_pos[0][j])**2 + (
nearby_cols-nearby_mask_pos[1][j])**2) for j in range(
my_num_nearby)])
# No boxing in if the opponent has a base in one of the escape squares
escape_squares = opponent_distances <= my_nearest_distances.min(0)
cropped_distances = OTHER_DISTANCES[
(double_window, double_window, dist_mask_dim)]
for dim_id, d in enumerate(NOT_NONE_DIRECTIONS):
box_dir_mask = BOX_DIRECTION_MASKS[(double_window, d)]
closest_dim_distance = cropped_distances[
box_in_mask_dirs[dim_id]].min()
escape_squares[box_dir_mask] &= (
cropped_distances[box_dir_mask] <= closest_dim_distance)
if not np.any(observation['rewards_bases_ships'][opponent_id][1][
box_pos][escape_squares.flatten()]):
# Let's box the opponent in!
# We should move towards the opponent if we can do so without opening
# up an escape direction
# if observation['step'] == 32:
# import pdb; pdb.set_trace()
# Order the planning by priority of direction and distance to the
# opponent
# Reasoning: mid-distance ships plan first since that allows fast
# boxing in - the nearby ships then just have to cover the remaining
# directions.
# Ships which cover hard to cover directions plan later.
box_in_mask_dirs_sum = box_in_mask_dirs.sum((1, 2))
ship_priorities = np.zeros(my_num_nearby)
must_attack_converting_square = ((row, col) in (
likely_convert_opponent_positions)) and not (
(row, col) in ignore_convert_positions) and ((
always_attack_opponent_id == opponent_id) or (
my_current_base_distances[:, row, col].min() < 5))
threatened_one_step = set()
for j in range(my_num_nearby):
my_row = nearby_mask_pos[0][j]
my_col = nearby_mask_pos[1][j]
box_directions = box_in_mask_dirs[:, my_row, my_col]
opponent_distance = np.abs(my_row-double_window) + np.abs(
my_col-double_window)
ship_priorities[j] = 20/(
box_in_mask_dirs_sum[box_directions].prod())+np.abs(
opponent_distance**0.9-box_in_window**0.9)
if opponent_distance == 2 and box_directions.sum() == 2 and np.all(
box_in_mask_dirs_sum[box_directions] == 1):
two_step_dirs = [MOVE_DIRECTIONS[move_id+1] for move_id in (
np.where(box_directions)[0])]
threatened_one_step.update(two_step_dirs)
# I can always attack all escape squares if I have at least 5 ships
# at a maximum distance of two with at least one attacker on each
# half plane
vert_diff = double_window-nearby_mask_pos[0]
horiz_diff = double_window-nearby_mask_pos[1]
distances = np.abs(vert_diff) + np.abs(horiz_diff)
is_near = distances <= 2
near_vert_diff = vert_diff[is_near]
near_horiz_diff = horiz_diff[is_near]
i_can_attack_all_escape_squares = distances.min() == 1 and (
is_near.sum() >= 5) and np.sign(near_vert_diff).ptp() == 2 and (
np.sign(near_horiz_diff).ptp() == 2)
if i_can_attack_all_escape_squares and (distances == 1).sum() == 1:
# I can only attack all escape squares if my attacker can be
# replaced
one_step_diff_id = np.argmin(distances)
single_attack_row = nearby_mask_pos[0][one_step_diff_id]
single_attack_col = nearby_mask_pos[1][one_step_diff_id]
can_replace = False
for row_offset in [-1, 1]:
for col_offset in [-1, 1]:
if nearby_less_halite_mask[single_attack_row + row_offset,
single_attack_col + col_offset]:
can_replace = True
break
i_can_attack_all_escape_squares = can_replace
# DISCERN if we are just chasing or actually attacking the ship in
# the next move - dummy rule to have at least K neighboring ships
# for us to attack the position of the targeted ship - this makes it
# hard to guess the escape direction
ship_target_1_distances = my_nearest_distances[
:, double_window, double_window] == 1
next_step_attack = (len(sensible_target_actions) == 0 and (
ship_target_1_distances.sum() > 2)) or (
i_can_attack_all_escape_squares) or (
must_attack_converting_square and np.any(
ship_target_1_distances))
# if next_step_attack and not (
# (len(sensible_target_actions) == 0 and (
# ship_target_1_distances.sum() > 2)) or (
# i_can_attack_all_escape_squares)):
# import pdb; pdb.set_trace()
opponent_boxed_bases = edge_aware_square_subset_mask(
opponent_bases, row, col, double_window, box_pos,
grid_size).reshape((dist_mask_dim, dist_mask_dim))
pos_taken = np.copy(opponent_boxed_bases)
box_override_assignment_not_next_attack = {}
if next_step_attack:
# If there is a ship that can take the position of my attacker:
# attack with that ship and replace its position.
# Otherwise pick a random attacker and keep the others in place.
# Initial approach: don't move with ships at distance 1.
ship_target_2_distance_ids = np.where(my_nearest_distances[
:, double_window, double_window] == 2)[0].tolist()
move_ids_directions_next_attack = {}
# Reorder ship_target_2_distance_ids so that the ones that can
# replace a 1 step threat are considered last, except when there is
# only a single 1 step threat (it would always move to the target).
# Also prefer to consider ships that only have a single option
# to move to the target first
two_step_distance_scores = np.zeros(
len(ship_target_2_distance_ids))
for two_step_id, two_step_diff_id in enumerate(
ship_target_2_distance_ids):
my_row = nearby_mask_pos[0][two_step_diff_id]
my_col = nearby_mask_pos[1][two_step_diff_id]
mask_between = get_mask_between_exclude_ends(
my_row, my_col, double_window, double_window, dist_mask_dim)
two_step_distance_scores[two_step_id] = mask_between.sum() + 10*(
nearby_less_halite_mask[mask_between].sum())*(
ship_target_1_distances.sum() > 1)
# if observation['step'] == 134:
# import pdb; pdb.set_trace()
ship_target_2_distance_ids = np.array(
ship_target_2_distance_ids)[
np.argsort(two_step_distance_scores)].tolist()
# Add the positions of the one step attackers
for one_step_diff_id in np.where(ship_target_1_distances)[0]:
my_row = nearby_mask_pos[0][one_step_diff_id]
my_col = nearby_mask_pos[1][one_step_diff_id]
# If I only have one ship that can attack the target: attack with
# that ship!
if ship_target_1_distances.sum() == 1:
attack_direction = get_dir_from_target(
my_row, my_col, double_window, double_window,
grid_size=1000)[0]
pos_taken[double_window, double_window] = True
move_ids_directions_next_attack[one_step_diff_id] = (
attack_direction)
else:
pos_taken[my_row, my_col] = 1
# if observation['step'] == 176:
# import pdb; pdb.set_trace()
two_step_pos_taken = []
while ship_target_2_distance_ids:
two_step_diff_id = ship_target_2_distance_ids.pop(0)
my_row = nearby_mask_pos[0][two_step_diff_id]
my_col = nearby_mask_pos[1][two_step_diff_id]
# Consider the shortest directions towards the target
shortest_directions = get_dir_from_target(
my_row, my_col, double_window, double_window, grid_size=1000)
has_selected_action = False
for d in shortest_directions:
# Prefer empty 1-step to target spaces over replacing a one
# step threat
move_row, move_col = move_ship_row_col(
my_row, my_col, d, size=1000)
if not pos_taken[move_row, move_col] and (not (
(move_row, move_col) in two_step_pos_taken)):
two_step_pos_taken.append((move_row, move_col))
move_ids_directions_next_attack[two_step_diff_id] = d
has_selected_action = True
break
if not has_selected_action:
# Replace a 1-step threatening ship
for d in shortest_directions:
move_row, move_col = move_ship_row_col(
my_row, my_col, d, size=1000)
if pos_taken[move_row, move_col] and not pos_taken[
double_window, double_window] and not opponent_boxed_bases[
move_row, move_col]:
move_ids_directions_next_attack[two_step_diff_id] = d
# Find the ids of the 1-step ship and make sure that ship
# attacks
replaced_id = np.where(my_nearest_distances[
:, move_row, move_col] == 0)[0][0]
one_step_attack_dir = get_dir_from_target(
move_row, move_col, double_window, double_window,
grid_size=1000)[0]
move_ids_directions_next_attack[replaced_id] = (
one_step_attack_dir)
pos_taken[double_window, double_window] = True
# Recompute the priority of the remaining two step ships
# Prefer ships with the lowest pos_taken shortest actions
two_step_distance_scores = np.zeros(
len(ship_target_2_distance_ids))
for two_step_id, two_step_diff_id in enumerate(
ship_target_2_distance_ids):
my_row = nearby_mask_pos[0][two_step_diff_id]
my_col = nearby_mask_pos[1][two_step_diff_id]
shortest_directions = get_dir_from_target(
my_row, my_col, double_window, double_window,
grid_size=1000)
for d in shortest_directions:
move_row, move_col = move_ship_row_col(
my_row, my_col, d, size=1000)
two_step_distance_scores[two_step_id] += int(
not (pos_taken[move_row, move_col] or (
(move_row, move_col) in two_step_pos_taken)))
ship_target_2_distance_ids = np.array(
ship_target_2_distance_ids)[
np.argsort(two_step_distance_scores)].tolist()
one_step_diff_ids = np.where(ship_target_1_distances)[0]
if pos_taken[double_window, double_window]:
# Add the remaining one step attackers with stay in place actions
for one_step_diff_id in one_step_diff_ids:
if not one_step_diff_id in move_ids_directions_next_attack:
move_ids_directions_next_attack[one_step_diff_id] = None
else:
# Prefer to avoid stay in place actions with zero halite ships
real_mask_pos = (
np.mod(nearby_mask_pos[0]+row-double_window, grid_size),
np.mod(nearby_mask_pos[1]+col-double_window, grid_size)
)
one_step_halite_on_board = halite_ships[real_mask_pos][
one_step_diff_ids]
one_step_halite_on_square = obs_halite[real_mask_pos][
one_step_diff_ids]
prefers_box_in = (one_step_halite_on_board == 0) & (
one_step_halite_on_square > 0)
if np.all(~prefers_box_in):
one_step_diff_ids_attack = one_step_diff_ids
else:
one_step_diff_ids_attack = one_step_diff_ids[
prefers_box_in]
# Of the remaining attack options: prefer an attacker from the
# direction where we have the highest influence, relative to the
# targeted opponent
# one_step_attacker_id = np_rng.choice(one_step_diff_ids_attack)
my_influences = player_influence_maps[0][real_mask_pos][
one_step_diff_ids_attack]
opponent_influences = player_influence_maps[opponent_id][
real_mask_pos][one_step_diff_ids_attack]
influence_differences = my_influences - opponent_influences
one_step_attacker_id = one_step_diff_ids_attack[
np.argmax(influence_differences)]
# Pick a random one step attacker to attack the target and make
# sure the remaining 1-step ships stay in place
for one_step_diff_id in one_step_diff_ids:
if one_step_diff_id == one_step_attacker_id:
my_row = nearby_mask_pos[0][one_step_diff_id]
my_col = nearby_mask_pos[1][one_step_diff_id]
attack_dir = get_dir_from_target(
my_row, my_col, double_window, double_window,
grid_size=1000)[0]
else:
attack_dir = None
move_ids_directions_next_attack[one_step_diff_id] = attack_dir
elif len(sensible_target_actions) == 0 or (
len(sensible_target_actions) == 1 and (
sensible_target_actions[0] == (0, 0))):
# Inspect what directions I can move right next to when the
# opponent has no valid escape actions. Use a greedy search to
# determine the action selection order
can_box_immediately = []
can_box_immediately_counts = np.zeros(4)
for j in range(my_num_nearby):
my_row = nearby_mask_pos[0][j]
my_col = nearby_mask_pos[1][j]
box_directions = box_in_mask_dirs[:, my_row, my_col]
opponent_distance = np.abs(my_row-double_window) + np.abs(
my_col-double_window)
if opponent_distance <= 2:
immediate_box_dirs = np.where(box_directions)[0]
can_box_immediately.append((
j, immediate_box_dirs, box_directions, my_row, my_col))
can_box_immediately_counts[box_directions] += 1
can_box_progress = [list(cb) for cb in can_box_immediately]
can_box_immediately_counts_progress = np.copy(
can_box_immediately_counts)
not_boxed_dirs = np.ones(4, dtype=np.bool)
# if observation['step'] == 97:
# import pdb; pdb.set_trace()
# Iteratively look for directions where I can box in in one step
# when I have others that can box in the remaining directions
# and nobody else can box that direction in
box_in_mask_rem_dirs_sum = np.copy(box_in_mask_dirs_sum)
while len(can_box_progress) > 0 and np.any(not_boxed_dirs) and (
can_box_immediately_counts_progress.sum() > 0):
considered_dir = np.argmin(
can_box_immediately_counts_progress + 100*(
can_box_immediately_counts_progress <= 0) + 1e-2*(
box_in_mask_rem_dirs_sum))
considered_dir_ids = [(
j, cb[0], box_in_mask_rem_dirs_sum[cb[1]], cb[1], cb[3],
cb[4]) for j, cb in enumerate(can_box_progress) if (
considered_dir in cb[1] and np.all(
box_in_mask_rem_dirs_sum[cb[1]] >= 1))]
num_considered_dir_ids = len(considered_dir_ids)
if num_considered_dir_ids > 0:
# Tie breaker: the one with the most ships in the other dir
# support
if num_considered_dir_ids > 1:
scores = np.zeros(num_considered_dir_ids)
for k in range(num_considered_dir_ids):
scores[k] = 100*len(considered_dir_ids[k][2]) - (
considered_dir_ids[k][2].sum())
picked_dir_id = np.argmin(scores)
else:
picked_dir_id = 0
picked = considered_dir_ids[picked_dir_id]
box_override_assignment_not_next_attack[picked[1]] = (
considered_dir, picked[4], picked[5])
# If I move closer with a diagonal ship: subtract the
# immediate box counter for the other direction
picked_other_immediate_box_dirs = picked[3][
picked[3] != considered_dir]
can_box_immediately_counts_progress[considered_dir] = 0
can_box_immediately_counts_progress[
picked_other_immediate_box_dirs] -= 1
not_boxed_dirs[considered_dir] = 0
box_in_mask_rem_dirs_sum[picked[3]] -= 1
ship_priorities[picked[1]] -= 1e6
del can_box_progress[picked[0]]
else:
break
num_covered_directions = np.zeros(4, dtype=np.int)
num_one_step_from_covered = np.zeros(4, dtype=np.bool)
ship_order = np.argsort(ship_priorities)
box_in_mask_rem_dirs_sum = np.copy(box_in_mask_dirs_sum)
update_ship_scores = []
one_square_threats = []
almost_covered_dirs = []
for j in range(my_num_nearby):
attack_id = ship_order[j]
my_row = nearby_mask_pos[0][attack_id]
my_col = nearby_mask_pos[1][attack_id]
my_abs_row = (row+my_row-double_window) % grid_size
my_abs_col = (col+my_col-double_window) % grid_size
ship_pos = my_abs_row*grid_size+my_abs_col
ship_k = ship_pos_to_key[ship_pos]
box_directions = box_in_mask_dirs[:, my_row, my_col]
opponent_distance = np.abs(my_row-double_window) + np.abs(
my_col-double_window)
box_in_mask_rem_dirs_sum[box_directions] -= 1
# if observation['step'] == 341:
# import pdb; pdb.set_trace()
if next_step_attack:
# Increase the ship scores for the planned actions
if attack_id in move_ids_directions_next_attack:
move_dir = move_ids_directions_next_attack[attack_id]
move_row, move_col = move_ship_row_col(
my_abs_row, my_abs_col, move_dir, grid_size)
# if observation['step'] == 204:
# import pdb; pdb.set_trace()
update_ship_scores.append(
(ship_k, move_row, move_col, 2e6, opponent_distance, None,
my_abs_row, my_abs_col))
else:
# Figure out if we should use this ship to attack the target -
# there is no point in using too many ships!
# if observation['step'] == 201 and my_row == 6 and my_col == 7:
# import pdb; pdb.set_trace()
if (opponent_distance > 2) and (
(num_covered_directions[box_directions] + 0.5*(
box_in_mask_rem_dirs_sum[box_directions])).min() >= 2 and (
np.all(num_covered_directions[box_directions] > 0)) or (
box_in_mask_rem_dirs_sum[box_directions].min() > 2) and (
opponent_distance > box_in_window)):
# print("Dropping ship", my_abs_row, my_abs_col, "from attack")
continue
rel_pos_diff = (my_row-double_window, my_col-double_window)
num_covered_attacker = num_covered_directions[box_directions]
# Logic to cover a direction that is almost covered
almost_covered_override = False
if np.all((num_covered_attacker > 0) | (
box_in_mask_rem_dirs_sum[box_directions] >= 1)) & np.any(
num_one_step_from_covered) and (
box_directions.sum() == 1) and len(
threatened_one_step) > 0 and ((
np.abs(my_row - my_col) == 1) or (my_row + my_col in [
double_window-1, double_window+1])):
move_dir = None
if my_row-my_col == -1:
if WEST in threatened_one_step and my_row < double_window:
almost_covered_dir = WEST
move_dir = SOUTH
elif SOUTH in threatened_one_step and my_row > double_window:
almost_covered_dir = SOUTH
move_dir = WEST
elif my_row-my_col == 1:
if NORTH in threatened_one_step and my_row < double_window:
almost_covered_dir = NORTH
move_dir = EAST
elif EAST in threatened_one_step and my_row > double_window:
almost_covered_dir = EAST
move_dir = NORTH
elif my_row+my_col == double_window-1:
if EAST in threatened_one_step and my_row < double_window:
almost_covered_dir = EAST
move_dir = SOUTH
elif SOUTH in threatened_one_step and my_row > double_window:
almost_covered_dir = SOUTH
move_dir = EAST
elif my_row+my_col == double_window+1:
if NORTH in threatened_one_step and my_row < double_window:
almost_covered_dir = NORTH
move_dir = WEST
elif WEST in threatened_one_step and my_row > double_window:
almost_covered_dir = WEST
move_dir = NORTH
if move_dir is not None:
move_row, move_col = move_ship_row_col(
my_row, my_col, move_dir, grid_size)
if not pos_taken[move_row, move_col]:
# Override: when we are next to the target: expect opponent
# to move
almost_covered_override = True
if opponent_distance == 1:
threat_dir = OPPOSITE_MAPPING[get_dir_from_target(
my_row, my_col, double_window, double_window, 1000)[0]]
one_square_threats.append(threat_dir)
move_dir = None
else:
# Make sure that the square we want to move to is
# available
almost_covered_dirs.append(almost_covered_dir)
if not almost_covered_override:
if attack_id in box_override_assignment_not_next_attack:
attack_move_id = box_override_assignment_not_next_attack[
attack_id][0]
assert box_directions[attack_move_id]
else:
attack_dir_scores = num_covered_attacker + 0.1*(
box_in_mask_rem_dirs_sum[box_directions])
attack_dir_id = np.argmin(attack_dir_scores)
attack_move_id = np.where(box_directions)[0][attack_dir_id]
rel_pos_diff = (my_row-double_window, my_col-double_window)
attack_cover_dir = np.array(NOT_NONE_DIRECTIONS)[
attack_move_id]
one_hot_cover_dirs = np.zeros(4, dtype=bool)
one_hot_cover_dirs[attack_move_id] = 1
other_dirs_covered = one_hot_cover_dirs | (
num_covered_directions > 0) | (box_in_mask_rem_dirs_sum >= 1)
wait_reinforcements = not np.all(other_dirs_covered) or (
opponent_distance == 1)
# if observation['step'] == 357:
# import pdb; pdb.set_trace()
# print(my_row, my_col, threatened_one_step,
# num_covered_directions, num_one_step_from_covered)
if wait_reinforcements:
# Move away from the target if staying would mean having more
# halite than the target
my_next_halite = halite_ships[my_abs_row, my_abs_col] + int(
collect_rate*obs_halite[my_abs_row, my_abs_col])
if my_next_halite > target_halite:
move_away_dirs = get_dir_from_target(
double_window, double_window, my_row, my_col,
grid_size=1000)
# import pdb; pdb.set_trace()
move_dir = np_rng.choice(move_away_dirs)
else:
move_dir = None
else:
if num_covered_directions[attack_move_id] > 0:
# Move towards the target on the diagonal (empowerment)
move_penalties = 0.001*opponent_euclid_distances**4 + (
my_nearest_euclid_distances[attack_id]**4) + 1e3*(
pos_taken)
move_penalties[my_row, my_col] += 1e3
best_penalty_pos = np.where(
move_penalties == move_penalties.min())
target_move_row = best_penalty_pos[0][0]
target_move_col = best_penalty_pos[1][0]
move_dir = get_dir_from_target(
my_row, my_col, target_move_row, target_move_col,
grid_size=1000)[0]
if attack_cover_dir == NORTH:
if np.abs(rel_pos_diff[1]) < (np.abs(rel_pos_diff[0])-1):
move_dir = SOUTH
elif rel_pos_diff[1] < 0:
move_dir = EAST
else:
move_dir = WEST
elif attack_cover_dir == SOUTH:
if np.abs(rel_pos_diff[1]) < (np.abs(rel_pos_diff[0])-1):
move_dir = NORTH
elif rel_pos_diff[1] < 0:
move_dir = EAST
else:
move_dir = WEST
elif attack_cover_dir == EAST:
if np.abs(rel_pos_diff[0]) < (np.abs(rel_pos_diff[1])-1):
move_dir = WEST
elif rel_pos_diff[0] < 0:
move_dir = SOUTH
else:
move_dir = NORTH
elif attack_cover_dir == WEST:
if np.abs(rel_pos_diff[0]) < (np.abs(rel_pos_diff[1])-1):
move_dir = EAST
elif rel_pos_diff[0] < 0:
move_dir = SOUTH
else:
move_dir = NORTH
# Increase the ship scores for the planned actions
moved_rel_dir = RELATIVE_DIR_MAPPING[move_dir]
new_rel_pos = (rel_pos_diff[0] + moved_rel_dir[0],
rel_pos_diff[1] + moved_rel_dir[1])
new_grid_pos = (double_window + new_rel_pos[0],
double_window + new_rel_pos[1])
if new_grid_pos[0] < 0 or new_grid_pos[1] < 0 or new_grid_pos[
0] > 2*double_window or new_grid_pos[1] > 2*double_window:
new_rel_pos = (rel_pos_diff[0], rel_pos_diff[1])
new_grid_pos = (double_window + new_rel_pos[0],
double_window + new_rel_pos[1])
if pos_taken[new_grid_pos] and opponent_distance == 2:
# Override - if I can move right next to the target: do it.
shortest_directions = get_dir_from_target(
my_row, my_col, double_window, double_window, grid_size=1000)
for move_dir in shortest_directions:
moved_rel_dir = RELATIVE_DIR_MAPPING[move_dir]
new_rel_pos = (rel_pos_diff[0] + moved_rel_dir[0],
rel_pos_diff[1] + moved_rel_dir[1])
new_grid_pos = (double_window + new_rel_pos[0],
double_window + new_rel_pos[1])
if not pos_taken[new_grid_pos]:
break
move_row, move_col = move_ship_row_col(
my_abs_row, my_abs_col, move_dir, grid_size)
if not pos_taken[new_grid_pos] and not new_rel_pos == (0, 0):
# Update the covered attack directions
ship_covered_directions = np.zeros(4, dtype=np.bool)
ship_one_step_from_covered_directions = np.zeros(
4, dtype=np.bool)
for threat_dir in RELATIVE_NOT_NONE_DIRECTIONS:
nz_dim = int(threat_dir[0] == 0)
dir_offset = new_rel_pos[nz_dim]*threat_dir[nz_dim]
other_dir_abs_offset = np.abs(new_rel_pos[1-nz_dim])
if dir_offset > 0 and other_dir_abs_offset <= dir_offset:
covered_id = np.where(
RELATIVE_DIR_TO_DIRECTION_MAPPING[threat_dir] == (
np.array(NOT_NONE_DIRECTIONS)))[0][0]
ship_one_step_from_covered_directions[covered_id] = 1
if other_dir_abs_offset < dir_offset:
ship_covered_directions[covered_id] = 1
# if observation['step'] == 210 and row == 1 and col == 8:
# import pdb; pdb.set_trace()
# Join the attack - add actions to the list
num_covered_directions[ship_covered_directions] += 1
num_one_step_from_covered[
ship_one_step_from_covered_directions] = 1
update_ship_scores.append(
(ship_k, move_row, move_col, 2e6, opponent_distance,
np.where(ship_covered_directions)[0], my_abs_row,
my_abs_col))
pos_taken[new_grid_pos] = 1
# We can almost box the opponent in and rely on the opponent not
# taking risky actions to escape
almost_attack_nearby_blockers = False
if len(threatened_one_step) > 0 and (
len(one_square_threats+almost_covered_dirs) > 0) and not np.all(
num_covered_directions > 0) and not next_step_attack:
not_covered_dirs = [MOVE_DIRECTIONS[i+1] for i in np.where(
num_covered_directions == 0)[0]]
if len(one_square_threats) > 0 and np.all(
[d in threatened_one_step for d in not_covered_dirs]):
almost_attack_nearby_blockers = True
else:
almost_attack_nearby_blockers = len(
threatened_one_step.intersection(almost_covered_dirs)) > 0
# if observation['step'] == 87:
# import pdb; pdb.set_trace()
if next_step_attack or np.all(num_covered_directions > 0) or (
almost_attack_nearby_blockers and np.any(
num_covered_directions > 0)):
# Prune the attackers: only keep the closest two in each direction
if not next_step_attack:
drop_rows = []
distance_dir = np.array([[u[4], u[5][0]] for u in (
update_ship_scores) if u[5].size > 0])
for d_id in np.arange(4):
if (distance_dir[:, 1] == d_id).sum() > 2:
dir_rows = np.where(distance_dir[:, 1] == d_id)[0]
drop_ids = np.argsort(distance_dir[dir_rows, 0])[2:]
drop_rows.extend(dir_rows[drop_ids].tolist())
for dr in np.sort(drop_rows)[::-1]:
del update_ship_scores[dr]
# if observation['step'] == 237:
# import pdb; pdb.set_trace()
box_opponent_positions.append((row, col))
boxing_in_ships.append(opponent_ship_k)
for (ship_k, move_row, move_col, new_collect_score,
distance_to_target, _, my_abs_row, my_abs_col) in (
update_ship_scores):
# Only update the ship scores if the box in action is in my one
# step valid actions
box_dir = get_dir_from_target(
my_abs_row, my_abs_col, move_row, move_col, grid_size)[0]
if box_dir in all_ship_scores[ship_k][9]:
all_ship_scores[ship_k][0][move_row, move_col] = (
new_collect_score)
# Flag the boxing in ships as unavailable for other hunts
ships_available[my_abs_row, my_abs_col] = 0
boxing_in[my_abs_row, my_abs_col] = 1
ships_on_box_mission[ship_k] = distance_to_target
override_move_squares_taken[move_row, move_col] = 1
# Make sure that I attack all squares where an opponent is converting that
# I can not allow to happen
for (row, col) in possible_convert_opponent_positions:
if not (row, col) in ignore_convert_positions:
my_base_distances = my_current_base_distances[:, row, col]
must_attack_converting_square = my_base_distances.min() < (3.5 - (
observation['relative_step']))
if must_attack_converting_square and not override_move_squares_taken[
row, col]:
# Look for nearby ships of mine that can attack the converting ship
for d in NOT_NONE_DIRECTIONS:
my_row, my_col = move_ship_row_col(row, col, d, grid_size)
if original_ships_available[my_row, my_col] or (
my_defend_base_ship_positions[my_row, my_col]):
to_target_dir = OPPOSITE_MAPPING[d]
ship_pos = my_row*grid_size+my_col
ship_k = ship_pos_to_key[ship_pos]
if to_target_dir in all_ship_scores[ship_k][6]:
all_ship_scores[ship_k][0][row, col] = 1e9
boxing_in[my_row, my_col] = 1
print("ATTACKING POSSIBLY CONVERTING SHIP", observation['step'],
row, col, my_row, my_col)
break
history['prev_step_boxing_in_ships'] = boxing_in_ships
return (all_ship_scores, boxing_in, box_opponent_positions,
override_move_squares_taken, ships_on_box_mission)
def update_scores_pack_hunt(
all_ship_scores, config, stacked_ships, observation,
opponent_ships_sensible_actions, halite_ships, steps_remaining,
player_obs, np_rng, opponent_ships_scaled, collect_rate, obs_halite,
main_base_distances, history, on_rescue_mission, boxing_in_mission,
my_defend_base_ship_positions, env_observation, box_opponent_positions,
override_move_squares_taken, player_influence_maps,
ignore_convert_positions, convert_unavailable_positions,
early_hunting_season, late_hunting_season, safe_collect_margin, spawn_cost,
change_standard_consecutive_steps=5):
available_pack_hunt_ships = np.copy(stacked_ships[0])
grid_size = available_pack_hunt_ships.shape[0]
hunting_season_started = history['hunting_season_started']
prev_standard_ships = history['hunting_season_standard_ships']
# # FUTURE WORK: Make the number of standard ships a function of the hunt
# # success?
# # FUTURE WORK: Make the number of standard ships a function of ship losses?
if early_hunting_season:
max_standard_ships_hunting_season = config[
'max_standard_ships_early_hunting_season']
elif late_hunting_season:
max_standard_ships_hunting_season = max(config[
'max_standard_ships_late_hunting_season'], int(len(player_obs[2])*(
config['late_hunting_season_standard_min_fraction'])))
else:
max_standard_ships_hunting_season = max(10, int(len(player_obs[2])/2.5))
# print(observation['step'], len(player_obs[2]),
# max_standard_ships_hunting_season)
# print(observation['step'], opponent_hunt_fraction, num_my_ships,
# my_target_standard_ships, max_standard_ships_hunting_season)
# Determine if I should preferably target a specific opponent.
# In games where there is a clear difference between the top two agents and
# my agent, where I am one of those two: mostly harrass/hoard the other top
# agent
current_scores = history['current_scores']
spawn_diffs = (current_scores[0] - current_scores[1:])/spawn_cost
first_opponent_id = np.argsort(spawn_diffs)[0]
second_opponent_id = np.argsort(spawn_diffs)[1]
my_agent_in_top_two = (spawn_diffs < 0).sum() <= 1
spawn_diff_first = np.abs(spawn_diffs[first_opponent_id])
spawn_diff_second = np.abs(spawn_diffs[second_opponent_id])
prev_targeted_hoard_mode = history['targeted_hoard_mode']
should_start_targeted_hoard_mode = my_agent_in_top_two and (
spawn_diff_second > 2*spawn_diff_first) and (spawn_diff_second > 6)
should_continue_targeted_hoard_mode = my_agent_in_top_two and (
spawn_diff_second > spawn_diff_first) and (spawn_diff_second > 4)
targeted_hoard_mode = should_start_targeted_hoard_mode or (
prev_targeted_hoard_mode and should_continue_targeted_hoard_mode)
history['targeted_hoard_mode'] = targeted_hoard_mode
preferred_victim = None
if targeted_hoard_mode:
preferred_victim = first_opponent_id+1
if should_start_targeted_hoard_mode and not prev_targeted_hoard_mode:
print(observation['step'], "Start selective hoarding of opponent",
preferred_victim)
prev_step_opponent_ship_moves = history['prev_step_opponent_ship_moves']
num_players = stacked_ships.shape[0]
ship_pos_to_key = {}
for i in range(num_players):
ship_pos_to_key.update({
v[0]: k for k, v in env_observation.players[i][2].items()})
ship_key_to_pos = {v: k for k, v in ship_pos_to_key.items()}
player_ids = -1*np.ones((grid_size, grid_size), dtype=np.int)
for i in range(stacked_ships.shape[0]):
player_ids[stacked_ships[i]] = i
not_available_due_to_camping = np.zeros_like(available_pack_hunt_ships)
# Loop over the camping ships and exclude the ones from the available mask
# that have flagged they are not available for boxing in
camping_ships_strategy = history['camping_ships_strategy']
for ship_k in camping_ships_strategy:
if not camping_ships_strategy[ship_k][3]:
camping_row, camping_col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
not_available_due_to_camping[camping_row, camping_col] = 1
# Loop over the ships that attack opponent camplers and exclude them from the
# available mask
attack_opponent_campers = history['attack_opponent_campers']
for ship_k in attack_opponent_campers:
attacking_camper_row, attacking_camper_col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
not_available_due_to_camping[
attacking_camper_row, attacking_camper_col] = 1
# Loop over the ships that are stuck in a loop and mark them as unavailable
not_available_due_to_cycle = np.zeros_like(available_pack_hunt_ships)
for ship_k in history['avoid_cycle_actions']:
cycle_row, cycle_col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
not_available_due_to_cycle[cycle_row, cycle_col] = 1
# Loop over the ships that are temporarily assigned a collect task
not_available_due_to_temp_collect = np.zeros_like(available_pack_hunt_ships)
delete_keys = []
for ship_k in history['temporary_hoarding_collect_ships']:
if ship_k in player_obs[2]:
collect_row, collect_col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
not_available_due_to_temp_collect[collect_row, collect_col] = 1
else:
delete_keys.append(ship_k)
for k in delete_keys:
history['temporary_hoarding_collect_ships'].remove(k)
# List the ships that are definitely not available for the pack hunt
# In this group:
# - Opponent base camping
# - Attack opponent base campers
# - Ships that are are on a rescue mission (rescuer and rescued)
# - Base defense emergency ships
# - Boxing in other ships
available_pack_hunt_ships &= (~not_available_due_to_camping)
available_pack_hunt_ships &= (~on_rescue_mission)
available_pack_hunt_ships &= (~my_defend_base_ship_positions)
available_pack_hunt_ships &= (~boxing_in_mission)
available_pack_hunt_ships &= (~convert_unavailable_positions)
available_pack_hunt_ships &= (~not_available_due_to_cycle)
available_pack_hunt_ships &= (~not_available_due_to_temp_collect)
# Of the remaining list: identify 'max_standard_ships_hunting_season' ships
# that are available to gather halite/attack bases.
# Preferably select ships that were also selected for these modes in the
# previous step and have halite on board.
# Only change the gather/attack ships if one of my gatherers was destroyed
# Assign a new gatherer if my gatherer is assigned to the base camping
# attack or defense (These ships tend to be indefinitely unavailable), or if
# the ship was destroyed.
# Prefer non-zero halite ships for the initial gathering ships.
my_ship_pos_to_k = {v[0]: k for k, v in player_obs[2].items()}
available_positions = np.where(available_pack_hunt_ships)
num_available_ships = available_pack_hunt_ships.sum()
standard_ships = []
if num_available_ships > 0:
best_standard_scores = np.zeros(num_available_ships)
pos_keys = []
for i in range(num_available_ships):
row = available_positions[0][i]
col = available_positions[1][i]
pos_key = my_ship_pos_to_k[row*grid_size+col]
best_standard_scores[i] = all_ship_scores[pos_key][0].max() - 1e6*(
halite_ships[row, col] == 0)
pos_keys.append(pos_key)
if hunting_season_started:
already_included_ids = np.zeros(num_available_ships, dtype=np.bool)
for ship_k in prev_standard_ships:
if ship_k in player_obs[2]:
# The ship still exists and was a standard ship in the previous step
row, col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
if my_defend_base_ship_positions[row, col] or boxing_in_mission[
row, col] or on_rescue_mission[row, col] or (
not_available_due_to_cycle[row, col]):
# We can use the ship for collecting soon (now it is rescuing or
# boxing in or defending the base)
standard_ships.append(ship_k)
elif available_pack_hunt_ships[row, col]:
# The ship is available now. Flag it for exclusion so it doesn't
# get added twice
standard_ships.append(ship_k)
match_id = np.where((available_positions[0] == row) & (
available_positions[1] == col))[0][0]
already_included_ids[match_id] = True
else:
# The ship is now used for base camping or base conversion or
# additional collection
# Exclude it from the standard ships group
assert not_available_due_to_camping[row, col] or (
convert_unavailable_positions[row, col]) or (
not_available_due_to_temp_collect[row, col])
best_standard_scores = best_standard_scores[~already_included_ids]
available_positions = (available_positions[0][~already_included_ids],
available_positions[1][~already_included_ids])
pos_keys = np.array(pos_keys)[~already_included_ids].tolist()
num_unassigned = max_standard_ships_hunting_season - len(standard_ships)
# if num_unassigned > 0:
# import pdb; pdb.set_trace()
num_to_assign = min(num_unassigned, num_available_ships)
num_to_assign_phase_1 = max(0, num_to_assign - config[
'max_standard_ships_decided_end_pack_hunting'])
num_to_assign_phase_2 = num_to_assign-num_to_assign_phase_1
num_available_ships = best_standard_scores.size
else:
num_to_assign = max_standard_ships_hunting_season
num_to_assign_phase_1 = min(num_to_assign, num_available_ships)
num_to_assign_phase_2 = 0
# Assign the remaining standard ships
# Assign the available ships with the highest collect score for collecting
# (preferably non zero halite ships)
best_standard_ids = np.argsort(-best_standard_scores)[
:num_to_assign_phase_1]
for standard_id in best_standard_ids:
standard_row = available_positions[0][standard_id]
standard_col = available_positions[1][standard_id]
standard_key = pos_keys[standard_id]
assert not standard_key in standard_ships
standard_ships.append(standard_key)
# Mark the standard ships as unavailable for pack hunting
for standard_key in standard_ships:
standard_row, standard_col = row_col_from_square_grid_pos(
player_obs[2][standard_key][0], grid_size)
available_pack_hunt_ships[standard_row, standard_col] = 0
# The remaining ships are considered for pack hunting. Send the ones with
# halite on board to a base.
considered_hunting_ships_pos = np.where(available_pack_hunt_ships)
num_available_ships = available_pack_hunt_ships.sum()
for i in range(num_available_ships):
row = considered_hunting_ships_pos[0][i]
col = considered_hunting_ships_pos[1][i]
if halite_ships[row, col] > 0:
available_pack_hunt_ships[row, col] = 0
ship_k = my_ship_pos_to_k[row*grid_size+col]
# Let the ship collect at will (but prefer to go to a base sooner rather
# than later) before joining the pack hunt (after touching base)
for j in [2, 3]:
all_ship_scores[ship_k][j][:] = -1e6
# Let a hoarding ship gather when it is safe to do so
if (obs_halite[row, col] < 100 or safe_collect_margin[
row, col] <= 0) and (
not ship_k in history['temporary_hoarding_collect_ships']):
# FUTURE WORK: Make the multiplier a function of the opponent
# aggression level?
all_ship_scores[ship_k][1][:] *= 4
elif not ship_k in history['temporary_hoarding_collect_ships'] and (
safe_collect_margin[row, col] > 0):
history['temporary_hoarding_collect_ships'].append(ship_k)
# print(observation['step'], row, col, ship_k,
# "Temporarily collecting")
# Ignore ships for hunting that are already being boxed in with my box-in
# ships
box_opponent_mask = np.zeros((grid_size, grid_size), dtype=np.bool)
for boxed_target_row, boxed_target_col in box_opponent_positions:
box_opponent_mask[boxed_target_row, boxed_target_col] = 1
# Consider what to do with the zero halite ships that are available for
# hunting.
# First attempt: do something similar as mzotkiew
# Main idea: move to the nearest non zero halite opponent if that direction
# is valid.
opponent_ships = stacked_ships[1:].sum(0) > 0
potential_targets = opponent_ships & (halite_ships > 0) & (
~box_opponent_mask)
hunting_ships_pos = np.where(available_pack_hunt_ships)
num_hunting_ships = available_pack_hunt_ships.sum()
# Exclude targets that I am willfully letting convert
for (ignore_convert_row, ignore_convert_col) in ignore_convert_positions:
potential_targets[ignore_convert_row, ignore_convert_col] = 0
# Exclude targets that have a safe path to one of their bases
if num_hunting_ships > 0:
stacked_bases = np.stack(
[rbs[1] for rbs in observation['rewards_bases_ships']])
nearest_opponent_base_distances = [None]
for opponent_id in range(1, num_players):
num_bases = stacked_bases[opponent_id].sum()
opponent_base_locations = np.where(stacked_bases[opponent_id])
all_opponent_base_distances = [DISTANCES[
opponent_base_locations[0][i], opponent_base_locations[1][i]] for i in (
range(num_bases))] + [99*np.ones((grid_size, grid_size))]
nearest_opponent_base_distances.append(np.stack(
all_opponent_base_distances).min(0))
considered_targets_pos = np.where(potential_targets)
for j in range(potential_targets.sum()):
target_row = considered_targets_pos[0][j]
target_col = considered_targets_pos[1][j]
opponent_id = np.where(stacked_ships[:, target_row, target_col])[0][0]
opp_nearest_base_distances = nearest_opponent_base_distances[opponent_id]
target_distance_to_nearest_base = opp_nearest_base_distances[
target_row, target_col]
my_min_distance_to_opp_nearest_base = opp_nearest_base_distances[
hunting_ships_pos].min()
# if target_row == 20 and target_col == 12 and observation['step'] == 160:
# import pdb; pdb.set_trace()
if target_distance_to_nearest_base < my_min_distance_to_opp_nearest_base:
potential_targets[target_row, target_col] = 0
potential_targets_pos = np.where(potential_targets)
num_potential_targets = potential_targets.sum()
# print(observation['step'], num_hunting_ships, num_potential_targets)
hoarded_one_step_opponent_keys = []
if num_potential_targets > 0 and num_hunting_ships > 0:
# print(observation['step'])
ordered_ship_keys = []
all_target_distances = np.zeros((num_hunting_ships, num_potential_targets))
for i in range(num_hunting_ships):
row = hunting_ships_pos[0][i]
col = hunting_ships_pos[1][i]
ship_k = my_ship_pos_to_k[row*grid_size+col]
ordered_ship_keys.append(ship_k)
potential_target_distances = DISTANCES[row, col][potential_targets]
# Update the target distances to only include potential targets that
# correspond with valid actions
potential_targets_rows = potential_targets_pos[0]
potential_targets_cols = potential_targets_pos[1]
south_dist = np.where(
potential_targets_rows >= row, potential_targets_rows-row,
potential_targets_rows-row+grid_size)
east_dist = np.where(
potential_targets_cols >= col, potential_targets_cols-col,
potential_targets_cols-col+grid_size)
valid_directions = all_ship_scores[ship_k][6]
valid_move_counts = 2*np.ones(num_potential_targets)
for d in NOT_NONE_DIRECTIONS:
if not d in valid_directions:
if d == NORTH:
decrement_ids = south_dist >= grid_size/2
elif d == SOUTH:
decrement_ids = (south_dist <= grid_size/2) & (south_dist > 0)
elif d == EAST:
decrement_ids = (east_dist <= grid_size/2) & (east_dist > 0)
elif d == WEST:
decrement_ids = east_dist >= grid_size/2
valid_move_counts[decrement_ids] -= 1
# Handle the case of being in the same row or column
valid_move_counts[south_dist == 0] -= 1
valid_move_counts[east_dist == 0] -= 1
# if observation['step'] == 91 and row == 6 and col == 3:
# import pdb; pdb.set_trace()
assert np.all(valid_move_counts >= 0)
potential_target_distances[valid_move_counts == 0] += 100
all_target_distances[i] = potential_target_distances
opponent_num_escape_directions = np.zeros(num_potential_targets)
for j in range(num_potential_targets):
target_row = potential_targets_pos[0][j]
target_col = potential_targets_pos[1][j]
opponent_num_escape_directions[j] = len(opponent_ships_sensible_actions[
target_row, target_col])
# First coordinate my hunters to ships that have no valid escape directions
hunting_ships_available = np.ones(num_hunting_ships, dtype=np.bool)
for j in range(num_potential_targets):
num_escape_dirs = opponent_num_escape_directions[j]
if num_escape_dirs == 0:
target_row = potential_targets_pos[0][j]
target_col = potential_targets_pos[1][j]
# Loop over my hunting ships at a distance of max two and take as many
# of the potential escape squares as possible
my_near_ships = np.where((all_target_distances[:, j] <= 2) & (
hunting_ships_available))[0]
num_my_near_ships = my_near_ships.size
if num_my_near_ships > 0:
# You can always attack at most 2 escape squares. Keep track of what
# escape squares each of my ships can attack without collecting
# halite on the next turn (ignoring ship collision halite gain)
my_target_relative_attack_dirs = np.zeros((num_my_near_ships, 5))
for loop_id, my_ship_id in enumerate(my_near_ships):
row = hunting_ships_pos[0][my_ship_id]
col = hunting_ships_pos[1][my_ship_id]
ship_k = my_ship_pos_to_k[row*grid_size+col]
valid_attack_dirs = all_ship_scores[ship_k][6]
considered_attack_dirs = get_dir_from_target(
row, col, target_row, target_col, grid_size)
if all_target_distances[my_ship_id, j] == 1 and (
obs_halite[row, col] == 0):
considered_attack_dirs.append(None)
attack_dirs = list(set(considered_attack_dirs) & set(
valid_attack_dirs))
# Get the relative directions from the target that I can attack
for d in attack_dirs:
move_row, move_col = move_ship_row_col(
row, col, d, grid_size)
relative_covered_dir = MOVE_DIRECTIONS_TO_ID[get_dir_from_target(
target_row, target_col, move_row, move_col, grid_size)[0]]
my_target_relative_attack_dirs[loop_id, relative_covered_dir] = 1
direction_covered = np.zeros(len(MOVE_DIRECTIONS), dtype=np.bool)
for dir_id, d in enumerate(MOVE_DIRECTIONS):
rel_target_row, rel_target_col = move_ship_row_col(
target_row, target_col, d, grid_size)
if override_move_squares_taken[rel_target_row, rel_target_col]:
direction_covered[dir_id] = 1
# First, handle the ships that can only attack a single square that
# is not covered yet
my_target_relative_attack_dirs[:, direction_covered] = 0
# if observation['step'] == 149:
# import pdb; pdb.set_trace()
# Greedily loop over directions by ordering the count of the number
# of ships that cover. Prefer low but strictly positive directions.
while my_target_relative_attack_dirs.sum() > 0:
ship_num_possible_attacks = my_target_relative_attack_dirs.sum(1)
dir_num_possible_attacks = my_target_relative_attack_dirs.sum(0)
# The None action is slightly preferred since that guarantees a max
# distance of 1 on the next turn
dir_num_possible_attacks[0] -= 0.1
non_zero_min_count = dir_num_possible_attacks[
dir_num_possible_attacks > 0].min()
best_dir_ids = np.where(dir_num_possible_attacks == (
non_zero_min_count))[0]
dir_id = np_rng.choice(best_dir_ids)
considered_ships = np.where(
my_target_relative_attack_dirs[:, dir_id])[0]
# Break ties with the number of directions each ship covers
cover_ship_scores = ship_num_possible_attacks[considered_ships]
considered_ships_attacker_id = considered_ships[
np.argmin(cover_ship_scores)]
attacker_id = my_near_ships[considered_ships_attacker_id]
# Move my ship to the relative position of the target
rel_target_row, rel_target_col = move_ship_row_col(
target_row, target_col, MOVE_DIRECTIONS[dir_id], grid_size)
attacker_row = hunting_ships_pos[0][attacker_id]
attacker_col = hunting_ships_pos[1][attacker_id]
ship_k = my_ship_pos_to_k[attacker_row*grid_size+attacker_col]
all_ship_scores[ship_k][0][rel_target_row, rel_target_col] = 2e5
override_move_squares_taken[rel_target_row, rel_target_col] = 1
hunting_ships_available[attacker_id] = 0
# Update the attack dir counts
my_target_relative_attack_dirs[considered_ships_attacker_id] = 0
my_target_relative_attack_dirs[:, dir_id] = 0
# if observation['step'] == 188:
# import pdb; pdb.set_trace()
# Next, coordinate my hunters to ships that have a single moving escape
# direction.
# These ship are preferred targets since it is likely that I can soon box
# them in, especially if it is me who cuts off three of the move directions
# Order the ships so that the ships that had a single escape direction in
# the previous step are handled first, so we can coordinate the
# interception
one_step_opponent_ids = np.arange(num_potential_targets).tolist()
priority_ids = []
for opponent_ship_k in history['prev_step_hoarded_one_step_opponent_keys']:
if opponent_ship_k in ship_key_to_pos:
opponent_pos = ship_key_to_pos[opponent_ship_k]
target_row, target_col = row_col_from_square_grid_pos(
opponent_pos, grid_size)
if potential_targets[target_row, target_col]:
# We need to check because the target may no longer be available for
# pack hunting due to boxing in or getting close to a friendly base
opponent_priority_id = np.where(
(potential_targets_pos[0] == target_row) & (
potential_targets_pos[1] == target_col))[0][0]
priority_ids.append(opponent_priority_id)
remaining_ids = list(set(one_step_opponent_ids) - set(priority_ids))
remaining_ids.sort() # Set intersect can be flaky
one_step_opponent_ids = priority_ids + remaining_ids
one_step_opponent_positions_directions = []
for j in one_step_opponent_ids:
target_row = potential_targets_pos[0][j]
target_col = potential_targets_pos[1][j]
target_escape_directions = opponent_ships_sensible_actions[
target_row, target_col]
move_escape_directions = copy.copy(target_escape_directions)
if (0, 0) in move_escape_directions:
move_escape_directions.remove((0, 0))
num_move_escape_dirs = len(move_escape_directions)
# nearest_target_distances = np.tile(
# all_target_distances.min(1)[:, None], [1, num_potential_targets])
if num_move_escape_dirs == 1:
# The <= ensures we consider piling up on inidividual ships
potential_nearby_attackers = np.where(hunting_ships_available & (
all_target_distances[:, j] <= 2))[0]
if potential_nearby_attackers.size >= 2:
# Figure out if I have at least one available ship at a max distance
# of 2 that can push the opponent in one direction
escape_dir = RELATIVE_DIR_TO_DIRECTION_MAPPING[
move_escape_directions[0]]
potential_nearby_distances = all_target_distances[
potential_nearby_attackers, j]
if potential_nearby_distances.min() == 1:
# The None direction is covered - verify the other directions
uncovered_dirs = copy.copy(NOT_NONE_DIRECTIONS)
uncovered_dirs.remove(escape_dir)
ignore_attacker_ids = []
d1_hunters = []
for attacker_id in potential_nearby_attackers:
attacker_row = hunting_ships_pos[0][attacker_id]
attacker_col = hunting_ships_pos[1][attacker_id]
ship_k = my_ship_pos_to_k[attacker_row*grid_size+attacker_col]
valid_directions = all_ship_scores[ship_k][6]
if escape_dir in valid_directions:
threat_dirs = get_dir_from_target(
target_row, target_col, attacker_row, attacker_col,
grid_size)
uncovered_dirs = list(set(uncovered_dirs) - set(threat_dirs))
if DISTANCES[target_row, target_col][
attacker_row, attacker_col] == 1:
d1_hunters.append(attacker_id)
else:
ignore_attacker_ids.append(attacker_id)
if len(uncovered_dirs) == 0 or (
len(uncovered_dirs) == 1 and len(d1_hunters) > 1):
one_step_opponent_positions_directions.append((
target_row, target_col, escape_dir))
opponent_ship_k = ship_pos_to_key[
target_row*grid_size+target_col]
hoarded_one_step_opponent_keys.append(opponent_ship_k)
if len(uncovered_dirs) > 0:
potential_nearby_attackers = d1_hunters
# Move the attackers in the single escape direction
# import pdb; pdb.set_trace()
for attacker_id in potential_nearby_attackers:
if not attacker_id in ignore_attacker_ids:
attacker_row = hunting_ships_pos[0][attacker_id]
attacker_col = hunting_ships_pos[1][attacker_id]
move_row, move_col = move_ship_row_col(
attacker_row, attacker_col, escape_dir, grid_size)
ship_k = my_ship_pos_to_k[
attacker_row*grid_size+attacker_col]
all_ship_scores[ship_k][0][move_row, move_col] = 2e5
override_move_squares_taken[move_row, move_col] = 1
hunting_ships_available[attacker_id] = 0
# Try to get into a position where the opponent can only move in one
# direction (from two to one escape direction)
for j in range(num_potential_targets):
num_escape_dirs = opponent_num_escape_directions[j]
if num_escape_dirs == 2:
target_row = potential_targets_pos[0][j]
target_col = potential_targets_pos[1][j]
potential_nearby_attackers = np.where(hunting_ships_available & (
all_target_distances[:, j] == 1))[0]
attack_selected = False
if potential_nearby_attackers.size == 2:
escape_directions = opponent_ships_sensible_actions[
target_row, target_col]
if (escape_directions[0][0] == 0 and (
escape_directions[1][0] == 0)) or (
escape_directions[0][1] == 0 and (
escape_directions[1][1] == 0)):
# Scenario: ME | OPPONENT | ME - guess the action and then chase
# Guess the opponent's next action
opponent_id = np.where(
stacked_ships[:, target_row, target_col])[0][0]
escape_dir_scores = np.zeros(2)
for escape_id, escape_dir in enumerate(escape_directions):
move_row, move_col = move_ship_row_col(
target_row, target_col, RELATIVE_DIR_TO_DIRECTION_MAPPING[
escape_dir], grid_size)
opponent_influence = player_influence_maps[opponent_id][
move_row, move_col]
my_influence = player_influence_maps[0][move_row, move_col]
escape_dir_scores[escape_id] = opponent_influence-my_influence
likely_opponent_move = RELATIVE_DIR_TO_DIRECTION_MAPPING[
escape_directions[np.argmax(escape_dir_scores)]]
# Only continue if both my ships can move in the selected
# directions
both_can_move = True
can_stay = np.zeros(2, dtype=np.bool)
for attacker_0_or_1, attacker_id in enumerate(
potential_nearby_attackers):
attacker_row = hunting_ships_pos[0][attacker_id]
attacker_col = hunting_ships_pos[1][attacker_id]
ship_k = my_ship_pos_to_k[attacker_row*grid_size+attacker_col]
both_can_move = both_can_move and likely_opponent_move in (
all_ship_scores[ship_k][6])
can_stay[attacker_0_or_1] = obs_halite[
attacker_row, attacker_col] == 0
if both_can_move:
# If both are on non zero halite squares: move both in the likely
# escape direction. Otherwise, select a random ship to move in
# the escape direction where the ship that remains in place has
# no halite at the considered square
if not np.any(can_stay):
stay_in_place_ids = []
else:
stay_in_place_ids = [np_rng.choice(potential_nearby_attackers[
can_stay])]
for attacker_id in potential_nearby_attackers:
# import pdb; pdb.set_trace()
attacker_row = hunting_ships_pos[0][attacker_id]
attacker_col = hunting_ships_pos[1][attacker_id]
move_dir = None if attacker_id in stay_in_place_ids else (
likely_opponent_move)
move_row, move_col = move_ship_row_col(
attacker_row, attacker_col, move_dir, grid_size)
ship_k = my_ship_pos_to_k[
attacker_row*grid_size+attacker_col]
all_ship_scores[ship_k][0][move_row, move_col] = 2e5
override_move_squares_taken[move_row, move_col] = 1
hunting_ships_available[attacker_id] = 0
attack_selected = True
escape_directions = opponent_ships_sensible_actions[
target_row, target_col]
if not attack_selected and not (0, 0) in escape_directions and len(
escape_directions) == 2:
# Scenario: ME | OPPONENT | | ME - guess the action and then chase
available_nearby = np.where(hunting_ships_available & (
all_target_distances[:, j] <= 2))[0]
if available_nearby.size >= 2:
attacker_rows = hunting_ships_pos[0][available_nearby]
attacker_cols = hunting_ships_pos[1][available_nearby]
north_dist = np.where(
target_row >= attacker_rows, target_row-attacker_rows,
target_row-attacker_rows+grid_size)
vert_rel_pos = np.where(
north_dist < 3, north_dist, north_dist-grid_size)
west_dist = np.where(
target_col >= attacker_cols, target_col-attacker_cols,
target_col-attacker_cols+grid_size)
horiz_rel_pos = np.where(
west_dist < 3, west_dist, west_dist-grid_size)
same_row_ids = (vert_rel_pos == 0)
same_col_ids = (horiz_rel_pos == 0)
consider_attack = False
if np.any(horiz_rel_pos[same_row_ids] < 0) and np.any(
horiz_rel_pos[same_row_ids] > 0):
if np.any(horiz_rel_pos[same_row_ids] == 1) and np.any(
horiz_rel_pos[same_row_ids] == -2):
move_to_target_id = available_nearby[same_row_ids][np.where(
horiz_rel_pos[same_row_ids] == -2)[0][0]]
move_escape_id = available_nearby[same_row_ids][np.where(
horiz_rel_pos[same_row_ids] == 1)[0][0]]
consider_attack = True
elif np.any(horiz_rel_pos[same_row_ids] == -1) and np.any(
horiz_rel_pos[same_row_ids] == 2):
move_to_target_id = available_nearby[same_row_ids][np.where(
horiz_rel_pos[same_row_ids] == 2)[0][0]]
move_escape_id = available_nearby[same_row_ids][np.where(
horiz_rel_pos[same_row_ids] == -1)[0][0]]
consider_attack = True
elif np.any(vert_rel_pos[same_col_ids] < 0) and np.any(
vert_rel_pos[same_col_ids] > 0):
if np.any(vert_rel_pos[same_col_ids] == 1) and np.any(
vert_rel_pos[same_col_ids] == -2):
move_to_target_id = available_nearby[same_col_ids][np.where(
vert_rel_pos[same_col_ids] == -2)[0][0]]
move_escape_id = available_nearby[same_col_ids][np.where(
vert_rel_pos[same_col_ids] == 1)[0][0]]
consider_attack = True
elif np.any(vert_rel_pos[same_col_ids] == -1) and np.any(
vert_rel_pos[same_col_ids] == 2):
move_to_target_id = available_nearby[same_col_ids][np.where(
vert_rel_pos[same_col_ids] == 2)[0][0]]
move_escape_id = available_nearby[same_col_ids][np.where(
vert_rel_pos[same_col_ids] == -1)[0][0]]
consider_attack = True
if consider_attack:
opponent_id = np.where(
stacked_ships[:, target_row, target_col])[0][0]
escape_dir_scores = np.zeros(2)
for escape_id, escape_dir in enumerate(escape_directions):
move_row, move_col = move_ship_row_col(
target_row, target_col, RELATIVE_DIR_TO_DIRECTION_MAPPING[
escape_dir], grid_size)
opponent_influence = player_influence_maps[opponent_id][
move_row, move_col]
my_influence = player_influence_maps[0][move_row, move_col]
escape_dir_scores[escape_id] = opponent_influence-my_influence
likely_opponent_move = RELATIVE_DIR_TO_DIRECTION_MAPPING[
escape_directions[np.argmax(escape_dir_scores)]]
# print(observation['step'], target_row, target_col)
attacker_escape_row = hunting_ships_pos[0][move_escape_id]
attacker_escape_col = hunting_ships_pos[1][move_escape_id]
attacker_to_target_row = hunting_ships_pos[0][move_to_target_id]
attacker_to_target_col = hunting_ships_pos[1][move_to_target_id]
move_escape_row, move_escape_col = move_ship_row_col(
attacker_escape_row, attacker_escape_col, likely_opponent_move,
grid_size)
to_target_dir = get_dir_from_target(
attacker_to_target_row, attacker_to_target_col,
target_row, target_col, grid_size)[0]
move_to_target_row, move_to_target_col = move_ship_row_col(
attacker_to_target_row, attacker_to_target_col,
to_target_dir, grid_size)
ship_escape_k = my_ship_pos_to_k[
attacker_escape_row*grid_size+attacker_escape_col]
ship_to_target_k = my_ship_pos_to_k[
attacker_to_target_row*grid_size+attacker_to_target_col]
if likely_opponent_move in all_ship_scores[ship_escape_k][6] and(
to_target_dir in all_ship_scores[ship_to_target_k][6]) and(
not override_move_squares_taken[
move_escape_row, move_escape_col]) and not (
override_move_squares_taken[
move_to_target_row, move_to_target_col]):
all_ship_scores[ship_escape_k][0][
move_escape_row, move_escape_col] = 2e5
all_ship_scores[ship_to_target_k][0][
move_to_target_row, move_to_target_col] = 2e5
override_move_squares_taken[
move_escape_row, move_escape_col] = 1
override_move_squares_taken[
move_to_target_row, move_to_target_col] = 1
hunting_ships_available[move_escape_id] = 0
hunting_ships_available[move_to_target_id] = 0
# Intercept ships that are pushed in one direction to avoid chasing forever
for target_row, target_col, escape_dir in (
one_step_opponent_positions_directions):
# Try to move perpendicular to the escaping ship if I can catch it in
# time
attacker_rows = hunting_ships_pos[0]
attacker_cols = hunting_ships_pos[1]
north_dist = np.where(
target_row >= attacker_rows, target_row-attacker_rows,
target_row-attacker_rows+grid_size)
west_dist = np.where(
target_col >= attacker_cols, target_col-attacker_cols,
target_col-attacker_cols+grid_size)
if escape_dir in [NORTH, SOUTH]:
perpendicular_distances = np.minimum(west_dist, grid_size-west_dist)
if escape_dir == SOUTH:
direction_distances = grid_size-north_dist
else:
direction_distances = north_dist
else:
perpendicular_distances = np.minimum(north_dist, grid_size-north_dist)
if escape_dir == EAST:
direction_distances = grid_size-west_dist
else:
direction_distances = west_dist
potential_nearby_attackers = np.where(hunting_ships_available & (
direction_distances >= perpendicular_distances))[0]
if potential_nearby_attackers.size > 0:
potential_crossing_min_steps = np.ceil((
direction_distances[potential_nearby_attackers]+(
perpendicular_distances[potential_nearby_attackers]))/2)
min_crossing_distance = potential_crossing_min_steps.min().astype(np.int)
# FUTURE WORK: discard if there is a base on the escape track
if min_crossing_distance <= 6:
attacker_id = potential_nearby_attackers[
np.argmin(potential_crossing_min_steps)]
attacker_row = hunting_ships_pos[0][attacker_id]
attacker_col = hunting_ships_pos[1][attacker_id]
if escape_dir == NORTH:
intersect_row = (target_row-min_crossing_distance) % grid_size
intersect_col = target_col
elif escape_dir == SOUTH:
intersect_row = (target_row+min_crossing_distance) % grid_size
intersect_col = target_col
elif escape_dir == EAST:
intersect_row = target_row
intersect_col = (target_col+min_crossing_distance) % grid_size
elif escape_dir == WEST:
intersect_row = target_row
intersect_col = (target_col-min_crossing_distance) % grid_size
ship_k = my_ship_pos_to_k[
attacker_row*grid_size+attacker_col]
intersect_bonus_mask = get_mask_between_exclude_ends(
attacker_row, attacker_col, intersect_row, intersect_col,
grid_size)
intersect_bonus = 1e5*intersect_bonus_mask
if intersect_bonus_mask.sum() > 1:
# Prefer to move to low halite squares in order to avoid conflicts
# with collect ships
intersect_bonus[intersect_bonus_mask] -= 10*(np.minimum(
1000, 10*obs_halite[intersect_bonus_mask])+obs_halite[
intersect_bonus_mask]/10)
# Give a small penalty to same rows or columns in order to allow more
# move options downstream
intersect_bonus[row] -= 1
intersect_bonus[:, col] -= 1
all_ship_scores[ship_k][0][:] += intersect_bonus
# override_move_squares_taken[move_row, move_col] = 1
# import pdb; pdb.set_trace()
hunting_ships_available[attacker_id] = 0
# Try to cut off the preferred escape direction for opponent ships that
# only have a stand still valid action
for j in range(num_potential_targets):
target_row = potential_targets_pos[0][j]
target_col = potential_targets_pos[1][j]
target_escape_directions = opponent_ships_sensible_actions[
target_row, target_col]
if (0, 0) in target_escape_directions and len(
target_escape_directions) == 1:
potential_nearby_attackers = np.where(hunting_ships_available & (
all_target_distances[:, j] <= 2))[0]
num_potential_attackers = potential_nearby_attackers.size
if num_potential_attackers > 0:
can_cover_dirs = {d: [] for d in NOT_NONE_DIRECTIONS}
for attacker_id in potential_nearby_attackers:
attacker_row = hunting_ships_pos[0][attacker_id]
attacker_col = hunting_ships_pos[1][attacker_id]
ship_k = my_ship_pos_to_k[attacker_row*grid_size+attacker_col]
for d in NOT_NONE_DIRECTIONS:
if d in all_ship_scores[ship_k][6]:
move_attacker_row, move_attacker_col = move_ship_row_col(
attacker_row, attacker_col, d, grid_size)
if DISTANCES[target_row, target_col][
move_attacker_row, move_attacker_col] == 1:
covered_dir = get_dir_from_target(
target_row, target_col, move_attacker_row,
move_attacker_col, grid_size)[0]
can_cover_dirs[covered_dir].append(attacker_id)
cover_dir_scores = np.zeros(len(NOT_NONE_DIRECTIONS))
opponent_id = np.where(
stacked_ships[:, target_row, target_col])[0][0]
for dir_id, d in enumerate(NOT_NONE_DIRECTIONS):
move_row, move_col = move_ship_row_col(
target_row, target_col, d, grid_size)
opponent_influence = player_influence_maps[opponent_id][
move_row, move_col]
not_opponent_influence = player_influence_maps[
:, move_row, move_col].sum() - opponent_influence
cover_dir_scores[dir_id] = (
opponent_influence-not_opponent_influence)
# Greedily cover escape directions by the order of preference for the
# target ship
cover_dir_argsort = np.argsort(-cover_dir_scores)
for dir_id in cover_dir_argsort:
d = NOT_NONE_DIRECTIONS[dir_id]
covered_row, covered_col = move_ship_row_col(
target_row, target_col, d, grid_size)
considered_attacker_ids = can_cover_dirs[d]
# if considered_attacker_ids and override_move_squares_taken[
# covered_row, covered_col]:
# import pdb; pdb.set_trace()
if considered_attacker_ids and not override_move_squares_taken[
covered_row, covered_col]:
# Prefer my attackers that can not attack other escape squares
# (on the same row or col at a distance of 2)
num_considered_attackers = len(considered_attacker_ids)
considered_attack_scores = np.zeros(num_considered_attackers)
for cons_attack_id in range(num_considered_attackers):
attacker_id = considered_attacker_ids[cons_attack_id]
attacker_row = hunting_ships_pos[0][attacker_id]
attacker_col = hunting_ships_pos[1][attacker_id]
considered_attack_scores[cons_attack_id] = int(
(attacker_row != target_row) and (
attacker_col != target_col))
selected_attacker_id = considered_attacker_ids[
np.argmin(considered_attack_scores)]
hunting_ships_available[selected_attacker_id] = 0
attacker_row = hunting_ships_pos[0][selected_attacker_id]
attacker_col = hunting_ships_pos[1][selected_attacker_id]
ship_k = my_ship_pos_to_k[attacker_row*grid_size+attacker_col]
# import pdb; pdb.set_trace()
all_ship_scores[ship_k][0][covered_row, covered_col] = 2e5
override_move_squares_taken[covered_row, covered_col] = 1
for d_ in NOT_NONE_DIRECTIONS:
if selected_attacker_id in can_cover_dirs[d_]:
can_cover_dirs[d_].remove(selected_attacker_id)
# FUTURE WORK: Try to cut off the preferred escape direction for opponent
# ships that only have a single non stand still valid action
# Assign the remaining standard ships
if num_to_assign_phase_2 > 0 and hunting_ships_available.sum() > 0:
available_hunting_ids = np.where(hunting_ships_available)[0]
num_remaining_available_ships = available_hunting_ids.size
best_standard_scores = np.zeros(num_remaining_available_ships)
pos_keys = []
for i in range(num_remaining_available_ships):
row = hunting_ships_pos[0][available_hunting_ids[i]]
col = hunting_ships_pos[1][available_hunting_ids[i]]
pos_key = my_ship_pos_to_k[row*grid_size+col]
best_standard_scores[i] = all_ship_scores[pos_key][0].max() - 1e6*(
halite_ships[row, col] == 0)
pos_keys.append(pos_key)
# Assign the remaining collect ships
# Assign the available ships with the highest collect score for collecting
# (preferably non zero halite ships)
best_standard_ids = np.argsort(
-best_standard_scores)[:num_to_assign_phase_2]
for standard_id in best_standard_ids:
standard_row = hunting_ships_pos[0][available_hunting_ids[standard_id]]
standard_col = hunting_ships_pos[1][available_hunting_ids[standard_id]]
# print("Newly assigned standard ship", standard_row, standard_col)
standard_key = pos_keys[standard_id]
assert not standard_key in standard_ships
standard_ships.append(standard_key)
hunting_ships_available[available_hunting_ids[standard_id]] = False
# Coordinate the remaining hunting actions based on the potential target
# distances. Balance with staying closer to my center of mass of ships
# with halite and my bases
# FUTURE WORK: make this work
# my_vulnerable_score = (
# observation['rewards_bases_ships'][0][2] & (halite_ships > 0)) + 3*(
# observation['rewards_bases_ships'][0][1])
# Average the vulnerable map over time so that the center of mass is more
# stable
# my_time_averaged_vulnerable_score = 0.8*history[
# 'my_time_averaged_vulnerable_score'] + 0.2*my_vulnerable_score
# history['my_time_averaged_vulnerable_score'] = (
# my_time_averaged_vulnerable_score)
# smoothed_vulnerable_score = smooth2d(my_time_averaged_vulnerable_score)
# center_of_vulnerable_mass = np.unravel_index(
# smoothed_vulnerable_score.argmax(), smoothed_vulnerable_score.shape)
# print(observation['step'], center_of_vulnerable_mass)
# FUTURE WORK: don't break ranks when hoarding
stacked_bases = np.stack(
[rbs[1] for rbs in observation['rewards_bases_ships']])
unavailable_hunt_positions = (stacked_bases[1:].sum(0) > 0) | (
override_move_squares_taken)
for i in range(num_hunting_ships):
if hunting_ships_available[i]:
row = hunting_ships_pos[0][i]
col = hunting_ships_pos[1][i]
ship_k = my_ship_pos_to_k[row*grid_size+col]
potential_target_distances_ship = all_target_distances[i]
potential_target_distances_ship_weighted = (
potential_target_distances_ship)
# potential_target_distances_ship_weighted = (
# potential_target_distances_ship) + DISTANCES[
# center_of_vulnerable_mass][potential_targets_pos]/3
if preferred_victim is not None and (
potential_target_distances_ship.min() > 2):
preferred_victim_ship_ids = player_ids[
potential_targets_pos] == preferred_victim
potential_target_distances_ship_weighted[
preferred_victim_ship_ids] -= 10
selected_target_id = np.argmin(
potential_target_distances_ship_weighted)
target_distance = potential_target_distances_ship[selected_target_id]
if target_distance < 3 or obs_halite[row, col] < 100 or (
safe_collect_margin[row, col] <= 0):
target_row = potential_targets_rows[selected_target_id]
target_col = potential_targets_cols[selected_target_id]
hunting_bonus = 1e5*get_mask_between_exclude_ends(
row, col, target_row, target_col, grid_size)
if target_distance == 1 and not unavailable_hunt_positions[
target_row, target_col]:
hunting_bonus[target_row, target_col] = 1e5
elif target_distance == 1:
# Move in one of the at most 2 likely opponent next action
# direction if that direction is still available.
# This means I have another hunter/boxer at distance one which has
# already claimed the target square
sensible_opponent_dirs = opponent_ships_sensible_actions[
(target_row, target_col)]
for d in NOT_NONE_DIRECTIONS:
if RELATIVE_DIR_MAPPING[d] in sensible_opponent_dirs:
move_row, move_col = move_ship_row_col(
row, col, d, grid_size)
if not unavailable_hunt_positions[move_row, move_col]:
hunting_bonus[move_row, move_col] = 1e5
# Prefer to move in the same direction as the target when I am
# tracking the target closely
opponent_ship_k = ship_pos_to_key[
target_row*grid_size+target_col]
if opponent_ship_k in prev_step_opponent_ship_moves and (
target_distance <= 2):
target_prev_move = prev_step_opponent_ship_moves[opponent_ship_k]
bonus_rel_dir = RELATIVE_DIR_MAPPING[target_prev_move]
bonus_rows = np.mod(row + bonus_rel_dir[0]*(
1+np.arange(half_distance_mask_dim)), grid_size)
bonus_cols = np.mod(col + bonus_rel_dir[1]*(
1+np.arange(half_distance_mask_dim)), grid_size)
hunting_bonus[(bonus_rows, bonus_cols)] *= 1.5
if (hunting_bonus > 0).sum() > 1:
# Prefer to move to low halite squares in order to avoid conflicts
# with collect ships
hunting_bonus[hunting_bonus > 0] -= 10*(np.minimum(
1000, 10*obs_halite[hunting_bonus > 0]) + obs_halite[
hunting_bonus > 0]/10)
# Give a small penalty to same rows or columns in order to allow
# more move options downstream
hunting_bonus[row] -= 1
hunting_bonus[:, col] -= 1
else:
# Override the pack hunt and collect at the current square
hunting_bonus = np.zeros((grid_size, grid_size))
if ship_k in history['temporary_hoarding_collect_ships']:
print(observation['step'], row, col, ship_k,
"INVESTIGATE: ship in hoarding collect but also hoarding??")
else:
history['temporary_hoarding_collect_ships'].append(ship_k)
# print(observation['step'], row, col, ship_k,
# "Temporarily collecting")
hunting_bonus[row, col] = 11e4 # Consider
# import pdb; pd.set_trace()
# x=1
all_ship_scores[ship_k][0][:] += hunting_bonus
# print(standard_ships, available_pack_hunt_ships.sum(),
# stacked_ships[0].sum())
# print(observation['step'], history['temporary_hoarding_collect_ships'])
history['hunting_season_standard_ships'] = standard_ships
history['hunting_season_started'] = True
history['prev_step_hoarded_one_step_opponent_keys'] = (
hoarded_one_step_opponent_keys)
# if observation['step'] == 192:
# import pdb; pdb.set_trace()
return all_ship_scores, history, override_move_squares_taken
def get_no_zero_halite_neighbors(halite):
no_zero_halite_neighbors = np.ones_like(halite, dtype=np.bool)
for d in NOT_NONE_DIRECTIONS:
if d == NORTH:
shifted = np.concatenate([halite[None, -1], halite[:-1]])
elif d == SOUTH:
shifted = np.concatenate([halite[1:], halite[None, 0]])
elif d == EAST:
shifted = np.concatenate([halite[:, 1:], halite[:, 0, None]], 1)
elif d == WEST:
shifted = np.concatenate([halite[:, -1, None], halite[:, :-1]], 1)
no_zero_halite_neighbors &= (shifted > 0)
return no_zero_halite_neighbors
def get_my_guaranteed_safe_collect_squares(
opponent_ships, grid_size, my_bases, obs_halite, collect_rate,
halite_ships, observation, halite_on_board_mult=1e-6):
opp_ship_locations = np.where(opponent_ships)
nearest_opponent_stacked_distances = [
99*np.ones((grid_size, grid_size))]
for i in range(opponent_ships.sum()):
opponent_row = opp_ship_locations[0][i]
opponent_col = opp_ship_locations[1][i]
opponent_ship_halite = max(0, halite_ships[opponent_row, opponent_col])
opponent_distances = DISTANCES[opponent_row, opponent_col]
nearest_opponent_stacked_distances.append(
opponent_distances + halite_on_board_mult*opponent_ship_halite)
nearest_opponent_distances = np.stack(
nearest_opponent_stacked_distances).min(0)
my_base_locations = np.where(my_bases)
my_nearest_base_distances = [DISTANCES[
my_base_locations[0][i], my_base_locations[1][i]] for i in range(
my_bases.sum())]
safe_to_collect = np.zeros((grid_size, grid_size), dtype=np.bool)
safe_to_collect_margin = -1*np.ones((grid_size, grid_size), dtype=np.int)
safe_to_return_halites = -1/halite_on_board_mult*np.ones(
(grid_size, grid_size), dtype=np.int)
safe_to_return_base_halites = []
for i in range(my_bases.sum()):
considered_base = my_base_locations[0][i], my_base_locations[1][i]
margin = np.floor((
nearest_opponent_distances[considered_base]-1) - (
my_nearest_base_distances[i] + halite_on_board_mult*(
np.maximum(0, halite_ships)+(
collect_rate*obs_halite).astype(np.int))+1e-12)).astype(np.int)
safe_base_reach = (my_nearest_base_distances[i] + halite_on_board_mult*(
np.maximum(0, halite_ships)+(
collect_rate*obs_halite).astype(np.int))) < (
nearest_opponent_distances[considered_base]-1)
safe_to_collect |= safe_base_reach
safe_to_collect_margin[safe_base_reach] = np.maximum(
safe_to_collect_margin[safe_base_reach], margin[safe_base_reach]+1)
base_safe_return_thresholds = 1/halite_on_board_mult*(
nearest_opponent_distances[considered_base] - (
my_nearest_base_distances[i]))
safe_to_return_halites = np.maximum(
safe_to_return_halites, base_safe_return_thresholds)
safe_to_return_base_halites.append(
(base_safe_return_thresholds, considered_base))
# if observation['step'] == 78:
# import pdb; pdb.set_trace()
# nearest_opponent_stacked_distances_old = [DISTANCES[
# opp_ship_locations[0][i], opp_ship_locations[1][i]] for i in range(
# opponent_ships.sum())] + [99*np.ones((grid_size, grid_size))]
# nearest_opponent_distances_old = np.stack(
# nearest_opponent_stacked_distances_old).min(0)
# my_nearest_base_distances_old = np.stack(my_nearest_base_distances + [
# 99*np.ones((grid_size, grid_size))]).min(0)
# safe_to_collect_old = my_nearest_base_distances_old <= (
# nearest_opponent_distances_old-2)
return (safe_to_collect, safe_to_collect_margin, safe_to_return_halites,
safe_to_return_base_halites)
def get_ignored_convert_positions(
likely_convert_opponent_positions, main_base_distances, stacked_ships,
abs_rel_opponent_scores, observation, my_base_distances, opponent_bases,
boxed_in_attack_squares):
ignore_convert_positions = []
for (row, col) in likely_convert_opponent_positions:
main_base_distance = main_base_distances[row, col]
opponent_id = np.where(stacked_ships[:, row, col])[0][0]
if (abs_rel_opponent_scores[opponent_id-1] == 0) and (
main_base_distance >= 9-(observation['relative_step']*6)) and (
my_base_distances[:, row, col].min() >= 5-(
observation['relative_step']*3)):
ignore_convert_positions.append((row, col))
opponent_bases[row, col] = True
boxed_in_attack_squares[ROW_COL_MAX_DISTANCE_MASKS[row, col, 1]] = 0
# if observation['step'] == 84:
# import pdb; pdb.set_trace()
return ignore_convert_positions, opponent_bases, boxed_in_attack_squares
def get_avoid_attack_squares(
boxed_in_attack_squares, approximate_score_diff, currently_winning,
abs_rel_opponent_scores, my_zero_halite_ships, opponent_ships,
influence_map, influence_map_unweighted, my_base_distances,
boxed_in_opponent_ids, observation):
grid_size = opponent_ships.shape[0]
avoid_attack_squares_zero_halite = np.zeros(
(grid_size, grid_size), dtype=np.bool)
# Decide what opponent to attack regardless of the risk of ship loss
# Policy: I am a close second or I am winning and attacking the second
always_attack_opponent_id = None
best_opponent_id = 1+np.argmin(approximate_score_diff)
if np.all(currently_winning) or (
(~currently_winning).sum() == 1 and abs_rel_opponent_scores[
best_opponent_id-1] > 0):
always_attack_opponent_id = best_opponent_id
if np.any(boxed_in_attack_squares):
# Count nearby zero halite and opponent ships
all_boxed_squares = np.where(boxed_in_attack_squares)
for i in range(all_boxed_squares[0].size):
boxed_row = all_boxed_squares[0][i]
boxed_col = all_boxed_squares[1][i]
num_my_nearby_zero_halite = my_zero_halite_ships[
ROW_COL_MAX_DISTANCE_MASKS[boxed_row, boxed_col, 3]].sum()
num_opponent_nearby = opponent_ships[
ROW_COL_MAX_DISTANCE_MASKS[boxed_row, boxed_col, 5]].sum()
if ((influence_map[boxed_row, boxed_col] < 0.5) and (
influence_map_unweighted[boxed_row, boxed_col] < -2) and (
num_my_nearby_zero_halite == 1) and (
num_opponent_nearby > 4) and (
my_base_distances[:, boxed_row, boxed_col].min() >= 5)) and (
always_attack_opponent_id is None or (
boxed_in_opponent_ids[boxed_row, boxed_col] != (
always_attack_opponent_id))):
# Flag the square as bad if I don't have a likely escape path
can_escape = False
avoid_attack_escape_distance = 4
for d in NOT_NONE_DIRECTIONS:
if d == NORTH:
considered_row = (boxed_row - avoid_attack_escape_distance) % (
grid_size)
considered_col = boxed_col
elif d == SOUTH:
considered_row = (boxed_row + avoid_attack_escape_distance) % (
grid_size)
considered_col = boxed_col
elif d == EAST:
considered_row = boxed_row
considered_col = (boxed_col + avoid_attack_escape_distance) % (
grid_size)
elif d == WEST:
considered_row = boxed_row
considered_col = (boxed_col - avoid_attack_escape_distance) % (
grid_size)
if influence_map[considered_row, considered_col] > 0.5:
can_escape = True
break
if not can_escape:
avoid_attack_squares_zero_halite[boxed_row, boxed_col] = 1
# if np.any(avoid_attack_squares_zero_halite):
# print(observation['step'], np.where(avoid_attack_squares_zero_halite))
# import pdb; pdb.set_trace()
# x=1
return avoid_attack_squares_zero_halite, always_attack_opponent_id
def override_initial_collect(
config, all_ship_scores, obs_halite, halite_ships, stacked_ships,
stacked_bases, player_influence_maps, player_obs, observation,
history):
# ORPHANED LOGIC, this was not going anywhere
# Initial collect override logic. Ships should initially aim for halite
# squares at the boundaries of their influence sphere - that way opponents
# don't get to mine it and I can then later focus on halite near my base
grid_size = stacked_ships.shape[1]
# Stack the collect scores for all my ships
ship_rows = []
ship_cols = []
ship_keys = []
for ship_k in player_obs[2]:
row, col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
ship_rows.append(row)
ship_cols.append(col)
ship_keys.append(ship_k)
my_num_ships = len(ship_rows)
if my_num_ships > 0:
my_ship_positions = (np.array(ship_rows), np.array(ship_cols))
opponent_ship_positions = np.where(stacked_ships[1:].sum(0) > 0)
my_ship_halites = halite_ships[my_ship_positions]
# Obtain the nearest distances for my and opponent ships
my_nearest_ship_distances = 99*np.ones((grid_size, grid_size))
for ship_id in range(my_num_ships):
# FUTURE WORK: Should I exclude my returning to base ships? Probably not.
row = my_ship_positions[0][ship_id]
col = my_ship_positions[1][ship_id]
my_nearest_ship_distances = np.minimum(
my_nearest_ship_distances, DISTANCES[row, col])
my_nearest_base_distances = 99*np.ones((grid_size, grid_size))
my_num_bases = stacked_bases[0].sum()
my_base_positions = np.where(stacked_bases[0])
for base_id in range(my_num_bases):
row = my_base_positions[0][base_id]
col = my_base_positions[1][base_id]
my_nearest_base_distances = np.minimum(
my_nearest_base_distances, DISTANCES[row, col])
opponent_nearest_ship_distances = 99*np.ones((grid_size, grid_size))
opponent_ships = stacked_ships[1:].sum(0) > 0
num_opponent_ships = opponent_ships.sum()
for ship_id in range(num_opponent_ships):
row = opponent_ship_positions[0][ship_id]
col = opponent_ship_positions[1][ship_id]
opponent_nearest_ship_distances = np.minimum(
opponent_nearest_ship_distances, DISTANCES[row, col])
opponent_nearest_base_distances = 99*np.ones((grid_size, grid_size))
opponent_bases = stacked_bases[1:].sum(0) > 0
num_opponent_bases = opponent_bases.sum()
opponent_base_positions = np.where(opponent_bases)
for base_id in range(num_opponent_bases):
row = opponent_base_positions[0][base_id]
col = opponent_base_positions[1][base_id]
opponent_nearest_base_distances = np.minimum(
opponent_nearest_base_distances, DISTANCES[row, col])
nearest_ship_distance_difference = (
my_nearest_ship_distances - opponent_nearest_ship_distances)
if observation['step'] == 0:
original_position_multiplier = 1.5**(3-np.abs(np.maximum(
-3, nearest_ship_distance_difference)))
history['original_position_multiplier'] = original_position_multiplier
else:
original_position_multiplier = history['original_position_multiplier']
# Use the opponent influence to determine the value of gather squares -
# squares where the nearest distance to one of my ships is equal have the
# highest value since these will likely be where the competition happens
smooth_multiplier = smooth2d(obs_halite)
smooth_multiplier /= smooth_multiplier.mean()
collect_values = np.copy(obs_halite) * smooth_multiplier
collect_values *= original_position_multiplier
smooth_collect_values = smooth2d(collect_values, 5)
# Compute ship specific collect scores for all considered collect values
# This factors in the distance to each square and the halite on board
# FUTURE WORK: factor in halite on board
all_ship_collect_scores = np.zeros((my_num_ships, grid_size, grid_size))
my_assigned_squares = np.zeros((grid_size, grid_size), dtype=np.bool)
ships_assigned = np.zeros(my_num_ships, dtype=np.bool)
assigned_id = 0
initial_collect_zero_halite_targets = history[
'initial_collect_zero_halite_targets']
for ship_id, ship_k in enumerate(ship_keys):
row = my_ship_positions[0][ship_id]
col = my_ship_positions[1][ship_id]
valid_considered_mask = np.ones((grid_size, grid_size), dtype=np.bool)
bad_directions = list(set(MOVE_DIRECTIONS) - set(all_ship_scores[ship_k][6]))
for d in bad_directions:
if d is None:
valid_considered_mask[row, col] = 0
else:
valid_considered_mask[HALF_PLANES_CATCH[row, col][d]] = 0
dm = DISTANCE_MASKS[(row, col)]
ship_collect_scores = dm*collect_values*valid_considered_mask
ship_collect_scores[row, col] *= int(my_num_bases > 0)*(
config['relative_stand_still_collect_boost'])
all_ship_collect_scores[ship_id] = ship_collect_scores
if my_ship_halites[ship_id] > 0 and not None in bad_directions and (
ship_collect_scores[row, col] == ship_collect_scores.max()):
# import pdb; pdb.set_trace()
my_assigned_squares[row, col] = True
all_ship_scores[ship_k][0][row, col] = 1e5 - assigned_id
ships_assigned[ship_id] = True
assigned_id += 1
elif my_ship_halites[ship_id] == 0 and ship_k in (
initial_collect_zero_halite_targets):
# Prefer consistent target selection - only reevaluate after reaching the
# target
all_ship_collect_scores[ship_id][initial_collect_zero_halite_targets[
ship_k]] *= 2
# if observation['step'] == 13:
# import pdb; pdb.set_trace()
lowest_collect_score = all_ship_collect_scores.min()
# original_ship_collect_scores = np.copy(all_ship_collect_scores)
all_ship_collect_scores[:, my_assigned_squares] = lowest_collect_score
all_ship_collect_scores[ships_assigned] = lowest_collect_score
# First assign the zero halite ships - Ideally, they should spread out and
# target high value halite squares at the boundary of the influence sphere
num_zero_halite_ships = (my_ship_halites == 0).sum()
zero_halite_ids = np.where(my_ship_halites == 0)
zero_halite_collect_scores = all_ship_collect_scores[zero_halite_ids]
zero_halite_targets = {}
for _ in range(num_zero_halite_ships):
(best_ship_id, best_row, best_col) = np.unravel_index(
zero_halite_collect_scores.argmax(), zero_halite_collect_scores.shape)
# import pdb; pdb.set_trace()
ship_k = ship_keys[best_ship_id]
my_assigned_squares[best_row, best_col] = True
# Create a mask between the current and target positions, where we
# encourage the ships to prefer squares with a higher smooth collect value
row = my_ship_positions[0][best_ship_id]
col = my_ship_positions[1][best_ship_id]
collect_ship_score_mask = get_mask_between_exclude_ends(
row, col, best_row, best_col, grid_size)
collect_ship_score_mask[best_row, best_col] = 1
collect_ship_scores = (1e5-assigned_id)*collect_ship_score_mask-(
10*obs_halite)
to_target_dirs = get_dir_from_target(
row, col, best_row, best_col, grid_size)
if len(to_target_dirs) == 2:
first_move_pos = move_ship_row_col(
row, col, to_target_dirs[0], grid_size)
second_move_pos = move_ship_row_col(
row, col, to_target_dirs[1], grid_size)
if smooth_collect_values[first_move_pos] > smooth_collect_values[
second_move_pos]:
avoid_dir = to_target_dirs[1]
else:
avoid_dir = to_target_dirs[0]
collect_ship_scores[HALF_PLANES_CATCH[row, col][avoid_dir]] -= 1
# import pdb; pdb.set_trace()
all_ship_scores[ship_k][0][:] = collect_ship_scores
zero_halite_targets[ship_k] = (best_row, best_col)
assigned_id += 1
ships_assigned[best_ship_id] = True
all_ship_collect_scores[:, my_assigned_squares]
zero_halite_collect_scores[:, best_row, best_col] = lowest_collect_score
zero_halite_collect_scores[best_ship_id] = lowest_collect_score
history['initial_collect_zero_halite_targets'] = zero_halite_targets
print(observation['step'], zero_halite_targets)
return all_ship_scores, history
def get_ship_scores(config, observation, player_obs, env_config, np_rng,
ignore_bad_attack_directions, history,
env_obs_ids, env_observation, verbose):
ship_scores_start_time = time.time()
convert_cost = env_config.convertCost
spawn_cost = env_config.spawnCost
stacked_bases = np.stack(
[rbs[1] for rbs in observation['rewards_bases_ships']])
all_my_bases = copy.copy(stacked_bases[0])
my_bases = stacked_bases[0]
# Exclude bases that are persistently camped by opponents
num_my_bases_with_excluded = my_bases.sum()
base_locations_with_excluded = np.where(my_bases)
excluded_base_distances = []
for base_pos in history['my_base_not_attacked_positions']:
# Note: stacking ensures we are working on a copy of the original base
# observation!
my_bases[base_pos] = 0
excluded_base_distances.append(DISTANCES[base_pos])
obs_halite = np.maximum(0, observation['halite'])
# Clip obs_halite to zero when gathering it doesn't add to the score
# code: delta_halite = int(cell.halite * configuration.collect_rate)
collect_rate = env_config.collectRate
obs_halite[obs_halite < 1/collect_rate] = 0
obs_halite_sum = obs_halite.sum()
my_ship_count = len(player_obs[2])
num_my_bases = my_bases.sum()
first_base = my_ship_count == 1 and num_my_bases == 0 and observation[
'step'] <= 10
max_ships = config['max_initial_ships']
early_game_return_boost_step = config['early_game_return_boost_step']
step = observation['step']
early_game_not_max_ships = (my_ship_count < max_ships) and (
step < early_game_return_boost_step)
early_game_return_boost = (early_game_return_boost_step-step)/(
early_game_return_boost_step)*config[
'early_game_return_base_additional_multiplier']*early_game_not_max_ships
steps_remaining = env_config.episodeSteps-1-observation['step']
# Override the maximum number of conversions on the last episode turn
last_episode_turn = observation['relative_step'] == 1
grid_size = obs_halite.shape[0]
half_dim_grid_mask = np.ones((grid_size, grid_size))*half_distance_mask_dim
# smoothed_friendly_ship_halite = smooth2d(
# observation['rewards_bases_ships'][0][3])
smoothed_halite = smooth2d(obs_halite)
can_deposit_halite = num_my_bases > 0
stacked_ships = np.stack(
[rbs[2] for rbs in observation['rewards_bases_ships']])
my_ships = stacked_ships[0]
opponent_ships = stacked_ships[1:].sum(0) > 0
all_ship_count = opponent_ships.sum() + my_ship_count
my_ship_fraction = my_ship_count/(1e-9+all_ship_count)
halite_ships = np.stack([
rbs[3] for rbs in observation['rewards_bases_ships']]).sum(0)
halite_ships[stacked_ships.sum(0) == 0] = -1e-9
my_zero_halite_ships = my_ships & (halite_ships == 0)
last_ship_standing_no_collect = observation[
'relative_step'] > 1/4 and (
stacked_ships[0] & (halite_ships == 0)).sum() == 1
opponent_bases = stacked_bases[1:].sum(0)
player_ids = -1*np.ones((grid_size, grid_size), dtype=np.int)
for i in range(stacked_ships.shape[0]):
player_ids[stacked_ships[i]] = i
camping_ships_strategy = history['camping_ships_strategy']
# Get the distance to the nearest base for all squares
all_bases = stacked_bases.sum(0) > 0
base_locations = np.where(all_bases)
num_bases = all_bases.sum()
all_base_distances = [DISTANCES[
base_locations[0][i], base_locations[1][i]] for i in range(num_bases)] + [
99*np.ones((grid_size, grid_size))]
nearest_base_distances = np.stack(all_base_distances).min(0)
if num_my_bases_with_excluded > 0:
all_base_distances_with_excluded = np.stack([DISTANCES[
base_locations_with_excluded[0][i],
base_locations_with_excluded[1][i]] for i in range(
num_my_bases_with_excluded)])
nearest_base_distances_with_my_excluded = (
all_base_distances_with_excluded.min(0))
else:
all_base_distances_with_excluded = np.zeros((0, grid_size, grid_size))
nearest_base_distances_with_my_excluded = 99*np.ones(
(grid_size, grid_size), dtype=np.int)
# Flag to indicate I should not occupy/flood my base with early ships
my_halite = observation['rewards_bases_ships'][0][0]
avoid_base_early_game = my_halite >= spawn_cost and (
observation['step'] < 20) and num_my_bases == 1 and (
my_halite % spawn_cost) == 0 and my_ship_count < 9
# if observation['step'] in [160, 242]:
# import pdb; pdb.set_trace()
# Distance to nearest base mask - gathering closer to my base is better
(base_nearest_distance_scores, my_base_distances,
my_nearest_base_distances) = get_nearest_base_distances(
grid_size, history['my_base_not_attacked_positions'], observation)
# Get opponent ship actions that avoid collisions with less halite ships
(opponent_ships_sensible_actions, opponent_ships_sensible_actions_no_risk,
boxed_in_attack_squares, boxed_in_opponent_ids,
boxed_in_zero_halite_opponents, likely_convert_opponent_positions,
possible_convert_opponent_positions) = get_valid_opponent_ship_actions(
config, observation['rewards_bases_ships'], halite_ships, grid_size,
history, nearest_base_distances_with_my_excluded, observation,
env_config)
# Get the weighted base mask
(weighted_base_mask, main_base_distances, non_abandoned_base_distances,
ship_diff_smoothed) = get_weighted_base_mask(
stacked_bases, stacked_ships, observation, history)
# Get the influence map
(influence_map, influence_map_unweighted, player_influence_maps,
priority_scores, ship_priority_weights,
escape_influence_probs) = get_influence_map(
config, stacked_bases, stacked_ships, halite_ships, observation,
player_obs)
# Scale the opponent bases as a function of attack desirability
(opponent_bases_scaled, opponent_ships_scaled, abs_rel_opponent_scores,
currently_winning, approximate_score_diff, history,
ballistic_attack_base_targets) = scale_attack_scores_bases_ships(
config, observation, player_obs, spawn_cost, non_abandoned_base_distances,
weighted_base_mask, steps_remaining, obs_halite, halite_ships, history,
smoothed_halite, player_influence_maps,
nearest_base_distances_with_my_excluded, player_ids)
ignore_bad_attack_directions = ignore_bad_attack_directions or len(
ballistic_attack_base_targets) > 0
# Decide what converting ships to let convert peacefully
(ignore_convert_positions, opponent_bases,
boxed_in_attack_squares) = get_ignored_convert_positions(
likely_convert_opponent_positions, main_base_distances, stacked_ships,
abs_rel_opponent_scores, observation, my_base_distances, opponent_bases,
boxed_in_attack_squares)
# Decide what boxed in escape squares to avoid - if I use a lonely zero
# halite ship to destroy an opponent's ship, I am likely to lose my ship in
# one of the subsequent turns
(avoid_attack_squares_zero_halite,
always_attack_opponent_id) = get_avoid_attack_squares(
boxed_in_attack_squares, approximate_score_diff, currently_winning,
abs_rel_opponent_scores, my_zero_halite_ships, opponent_ships,
influence_map, influence_map_unweighted, my_base_distances,
boxed_in_opponent_ids, observation)
# Get the squares that have no zero halite neighbors - this makes it hard
# to successfully camp out next to the base
no_zero_halite_neighbors = get_no_zero_halite_neighbors(
observation['halite'])
# Only conditionally attack the bases where I have a camper that is active
my_prev_step_base_attacker_ships = history[
'my_prev_step_base_attacker_ships']
camp_attack_mask = np.ones((grid_size, grid_size), dtype=np.bool)
for ship_k in camping_ships_strategy:
base_location = camping_ships_strategy[ship_k][5]
consider_base_attack = camping_ships_strategy[ship_k][4]
camp_attack_mask[base_location] = consider_base_attack
# Attack opponent ships that camp out next to my base
attack_opponent_campers = history['attack_opponent_campers']
# Don't worry about collecting if I have a base at distance <= d and the
# nearest opponent is at a distance of at least d+2
(safe_to_collect, safe_to_collect_margin, safe_to_return_halites,
safe_to_return_base_halites) = get_my_guaranteed_safe_collect_squares(
opponent_ships, grid_size, all_my_bases, obs_halite, collect_rate,
halite_ships, observation)
# Early on, the collect boost is high as the distance from the nearest base
# grows. This effect rapidly decays once the hunting season starts
stand_still_collect_boost = config['relative_stand_still_collect_boost']
# print(observation['step'], my_ship_count, (stacked_ships[0] & (
# halite_ships == 0)).sum())
n_step_avoid_min_die_prob_cutoff = config[
'n_step_avoid_min_die_prob_cutoff']
if history['num_destroyed_ships'] == 0:
low_risk_limit = 0.1
early_game_risk_limit = max(
low_risk_limit, config['max_risk_n_step_risky'] - (
config['max_risk_n_step_risky']-low_risk_limit)*observation[
'step']/config['max_steps_n_step_risky'])
n_step_avoid_min_die_prob_cutoff = max(
early_game_risk_limit, n_step_avoid_min_die_prob_cutoff)
all_ship_scores = {}
for i, ship_k in enumerate(player_obs[2]):
row, col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
dm = DISTANCE_MASKS[(row, col)]
ship_halite = player_obs[2][ship_k][1]
opponent_less_halite_ships = np.logical_and(
opponent_ships, halite_ships <= ship_halite)
opponent_smoother_less_halite_ships = smooth2d(
opponent_less_halite_ships, smooth_kernel_dim=5)
# Scores 1: collecting halite at row, col
# Multiply the smoothed halite, added with the obs_halite with a distance
# mask, specific for the current row and column
ship_influence_priority_multipliers = (
1+config['influence_weights_additional_multiplier']*(
ship_priority_weights[ship_k])**config[
'influence_weights_exponent']) ** priority_scores
collect_grid_scores = dm*(
smoothed_halite * config['collect_smoothed_multiplier'] +
obs_halite * config['collect_actual_multiplier']) * (
config['collect_less_halite_ships_multiplier_base'] ** (
opponent_smoother_less_halite_ships)) * (
base_nearest_distance_scores ** config[
'collect_base_nearest_distance_exponent'])*(
ship_influence_priority_multipliers)
if observation['step'] < 50:
# import pdb; pdb.set_trace()
collect_grid_scores *= (history['original_position_multiplier']**(
observation['step']/50))
base_distance = nearest_base_distances_with_my_excluded[row, col]
collect_grid_scores[row, col] *= int(num_my_bases_with_excluded > 0)*(
stand_still_collect_boost * (1+config[
'initial_collect_boost_away_from_base'] * max(0, base_distance-5)/7)**(
(1-observation['relative_step'])**14))
if ship_k in history['initial_not_collect_near_base_ships']:
collect_grid_scores[history['initial_not_collect_near_base_mask']] = 0
# if observation['step'] == 233:
# import pdb; pdb.set_trace()
if last_ship_standing_no_collect and ship_halite == 0:
collect_grid_scores[row, col] = -1e13
# if observation['step'] >= 14 and row == 2 and col in [9]:
# import pdb; pdb.set_trace()
# Override the collect score to 0 to avoid blocking the base early on in
# the game: All squares right next to the initial base are set to 0
if avoid_base_early_game:
collect_grid_scores, early_next_base_dir, drop_None_valid = (
set_scores_single_nearby_zero(
collect_grid_scores, my_bases, grid_size, row, col))
else:
early_next_base_dir = None
drop_None_valid = False
# At the end of a game, disincintivize all collect squares that are too far
# away from the nearest base to be able to return before the game is over
if steps_remaining < grid_size and nearest_base_distances.min() == 0:
trajectory_lengths = DISTANCES[row, col] + my_nearest_base_distances
collect_grid_scores[trajectory_lengths > (steps_remaining-1-int(
steps_remaining > grid_size//2))] = 0
# Scores 2: returning to any of my bases - delay the return when it is
# safe to collect
safe_collect_ship_margin = safe_to_collect_margin[row, col]
if ship_k in history['returning_to_base_ships'] or (
safe_collect_ship_margin <= 0) or num_my_bases < 2:
delay_return_divisor = 1
else:
# delay_return_divisor = 1
delay_return_divisor = 2**(safe_collect_ship_margin/2)
# Always use the maximum value for a base return if I can safely move there
# (regardless of my influence in that area)
weighted_base_mask_ship_return = np.copy(weighted_base_mask)
for base_safe_return_halite, base_location in safe_to_return_base_halites:
if ship_halite < base_safe_return_halite[row, col]:
weighted_base_mask_ship_return[base_location] = max(
1.0, weighted_base_mask_ship_return[base_location])
base_return_grid_multiplier = dm*min(ship_halite, 2*convert_cost)/(
delay_return_divisor)*(config['return_base_multiplier'] * (
config['return_base_less_halite_ships_multiplier_base'] ** (
opponent_smoother_less_halite_ships)) + early_game_return_boost)*(
weighted_base_mask_ship_return)
# Further incentivize moving onto a base after a return has started when I
# am close to a base since that means I can count on some of the best
# collect score for future steps
if ship_k in history['returning_to_base_ships'] and observation[
'relative_step'] > config['start_hunting_season_relative_step']:
base_return_grid_multiplier += (dm**2)*collect_grid_scores.max()/1.5
chase_details = history['chase_counter'][0].get(ship_k, None)
if chase_details is not None:
# Keep the relative order using the minimum in case the return to base
# pull is big
base_return_grid_multiplier = np.minimum(
base_return_grid_multiplier+5e4, base_return_grid_multiplier*(config[
'chase_return_base_exponential_bonus']**chase_details[1]))
# Force returning to a base when the episode is almost over and I
# have halite on board
if ship_halite > 0 and steps_remaining < grid_size:
base_return_grid_multiplier, end_game_base_return = (
force_return_base_end_episode(
my_bases, base_return_grid_multiplier, main_base_distances, row, col,
steps_remaining, opponent_less_halite_ships, weighted_base_mask,
safe_to_collect))
else:
end_game_base_return = False
# Override the return base score to 0 to avoid blocking the base early on
# in the game.
if avoid_base_early_game:
base_return_grid_multiplier = override_early_return_base_scores(
base_return_grid_multiplier, my_bases, row, col, my_ship_count)
# if observation['step'] == 247 and row == 15 and col == 4:
# import pdb; pdb.set_trace()
# Scores 3: establish a new base
first_base_or_can_spawn = my_ship_count == 1 and num_my_bases == 0 and (
observation['step'] <= 10 or (player_obs[0]+ship_halite) >= (
2*spawn_cost))
establish_base_scores = dm**(config['establish_base_dm_exponent'])*(
smoothed_halite-obs_halite) * (config[
'establish_base_smoothed_multiplier'] + first_base*config[
'establish_first_base_smoothed_multiplier_correction'])*(
1-((my_bases*dm).max()))*(1-my_bases) * (
config['establish_base_less_halite_ships_multiplier_base'] ** (
opponent_smoother_less_halite_ships)) - (
convert_cost*can_deposit_halite) + min(
ship_halite, convert_cost)*(
config['establish_base_deposit_multiplier']) + first_base*(
config['first_base_no_4_way_camping_spot_bonus']*(
no_zero_halite_neighbors)) - 1e5*int(not (
first_base_or_can_spawn))
# if observation['step'] == 391 and ship_k == '58-1':
# import pdb; pdb.set_trace()
# Scores 4: attack an opponent base at row, col
attack_step_multiplier = min(5, max(1, 1/(
2*(1-observation['relative_step']+1e-9))))
if ship_k in my_prev_step_base_attacker_ships:
# Encourage the base attack of a ship to be persistent
attack_step_multiplier *= 5
attack_base_scores = dm*np.minimum(15e5, camp_attack_mask*(
attack_step_multiplier)*config['attack_base_multiplier']*(
opponent_bases_scaled)*(config[
'attack_base_less_halite_ships_multiplier_base'] ** (
opponent_smoother_less_halite_ships))) - (config[
'attack_base_halite_sum_multiplier'] * obs_halite_sum**0.8 / (
all_ship_count))*int(my_ship_fraction < 0.5) - 1e12*(
ship_halite > 0)
# Keep the preference order in ballistic mode without abandoning recue or
# base defense ships
attack_base_scores = np.minimum(15e5, attack_base_scores) + 1e-10*(
attack_base_scores) * (attack_base_scores > 15e5)
# Update the scores as a function of nearby opponent ships to avoid
# collisions with opposing ships that carry less halite and encourage
# collisions with opponent ships that carry less halite
# Also incorporate the camping score override behavior here
camping_override_strategy = camping_ships_strategy.get(ship_k, ())
attack_campers_override_strategy = attack_opponent_campers.get(ship_k, ())
(collect_grid_scores, base_return_grid_multiplier, establish_base_scores,
attack_base_scores, preferred_directions, valid_directions,
agent_surrounded, two_step_bad_directions, n_step_bad_directions,
one_step_valid_directions, n_step_bad_directions_die_probs,
original_n_step_bad_directions,
original_n_step_bad_directions_die_probs) = update_scores_opponent_ships(
config, collect_grid_scores, base_return_grid_multiplier,
establish_base_scores, attack_base_scores, opponent_ships,
opponent_bases, halite_ships, row, col, grid_size, spawn_cost,
drop_None_valid, obs_halite, collect_rate, np_rng,
opponent_ships_sensible_actions,
opponent_ships_sensible_actions_no_risk, ignore_bad_attack_directions,
observation, ship_k, all_my_bases, my_ships, steps_remaining, history,
escape_influence_probs, player_ids, env_obs_ids, env_observation,
main_base_distances, nearest_base_distances, end_game_base_return,
camping_override_strategy, attack_campers_override_strategy,
boxed_in_attack_squares, safe_to_collect,
boxed_in_zero_halite_opponents, ignore_convert_positions,
avoid_attack_squares_zero_halite, n_step_avoid_min_die_prob_cutoff,
safe_to_return_halites, safe_to_return_base_halites,
my_nearest_base_distances)
# if observation['step'] == 169 and ship_k == '65-2':
# import pdb; pdb.set_trace()
# Update the scores as a function of blocking opponent bases and my early
# game initial base
(collect_grid_scores, base_return_grid_multiplier, establish_base_scores,
attack_base_scores, valid_directions, one_step_valid_directions,
opponent_base_directions) = update_scores_blockers(
collect_grid_scores, base_return_grid_multiplier, establish_base_scores,
attack_base_scores, row, col, grid_size, opponent_bases,
half_dim_grid_mask, valid_directions, one_step_valid_directions,
early_next_base_dir, update_attack_base=False)
if last_episode_turn:
# Convert all ships with more halite than the convert cost on the last
# episode step
last_episode_step_convert = ship_halite >= convert_cost
if last_episode_step_convert and num_my_bases_with_excluded > 0:
# Don't convert if I can safely move to a base next to my square.
min_base_distance = all_base_distances_with_excluded[:, row, col].min()
if min_base_distance == 1:
if opponent_less_halite_ships.sum() == 0:
last_episode_step_convert = False
else:
for base_id in range(num_my_bases_with_excluded):
base_row = base_locations_with_excluded[0][base_id]
base_col = base_locations_with_excluded[1][base_id]
if all_base_distances_with_excluded[base_id, row, col] == 1:
if DISTANCES[base_row, base_col][
opponent_less_halite_ships].min() > 1:
last_episode_step_convert = False
break
if last_episode_step_convert:
establish_base_scores[row, col] = 1e12
base_locations_with_excluded = (
np.append(base_locations_with_excluded[0], row),
np.append(base_locations_with_excluded[1], col))
all_base_distances_with_excluded = np.concatenate(
[all_base_distances_with_excluded,
np.expand_dims(DISTANCES[row, col], 0)])
num_my_bases_with_excluded += 1
elif ship_halite > 0:
base_return_grid_multiplier[DISTANCES[row, col] == 1] += 1e5
end_game_base_return = True
else:
last_episode_step_convert = False
all_ship_scores[ship_k] = (
collect_grid_scores, base_return_grid_multiplier, establish_base_scores,
attack_base_scores, preferred_directions, agent_surrounded,
valid_directions, two_step_bad_directions, n_step_bad_directions,
one_step_valid_directions, opponent_base_directions, 0,
end_game_base_return, last_episode_step_convert,
n_step_bad_directions_die_probs, opponent_smoother_less_halite_ships,
ship_influence_priority_multipliers, original_n_step_bad_directions,
original_n_step_bad_directions_die_probs)
# if observation['relative_step'] < config[
# 'initial_collect_override_relative_step']:
# all_ship_scores, history = override_initial_collect(
# config, all_ship_scores, obs_halite, halite_ships, stacked_ships,
# stacked_bases, player_influence_maps, player_obs, observation,
# history)
ship_scores_duration = time.time() - ship_scores_start_time
return (all_ship_scores, opponent_ships_sensible_actions,
opponent_ships_sensible_actions_no_risk, weighted_base_mask,
opponent_ships_scaled, main_base_distances, ship_scores_duration,
halite_ships, player_influence_maps, boxed_in_zero_halite_opponents,
ignore_convert_positions, ship_diff_smoothed,
ballistic_attack_base_targets, safe_to_return_halites,
safe_to_collect_margin, always_attack_opponent_id,
likely_convert_opponent_positions,
possible_convert_opponent_positions, my_base_distances,
nearest_base_distances, history)
def get_mask_between_exclude_ends(r1, c1, r2, c2, grid_size):
rel_pos = get_relative_position(r1, c1, r2, c2, grid_size)
start_row = r2 if rel_pos[0] < 0 else r1
rows = np.mod(
np.arange(start_row, start_row+np.abs(rel_pos[0])+1), grid_size)
start_col = c2 if rel_pos[1] < 0 else c1
cols = np.mod(
np.arange(start_col, start_col+np.abs(rel_pos[1])+1), grid_size)
mask = np.zeros((grid_size, grid_size), dtype=np.bool)
mask[rows[:, None], cols] = 1
mask[r1, c1] = 0
mask[r2, c2] = 0
return mask
def consider_restoring_base(
observation, env_config, all_ship_scores, player_obs, convert_cost, np_rng,
history, max_considered_attackers=3, halite_on_board_mult=1e-6):
obs_halite = np.maximum(0, observation['halite'])
grid_size = obs_halite.shape[0]
collect_rate = env_config.collectRate
obs_halite[obs_halite < 1/collect_rate] = 0
my_ships = observation['rewards_bases_ships'][0][2]
my_ship_count = my_ships.sum()
my_bases = observation['rewards_bases_ships'][0][1]
stacked_ships = np.stack(
[rbs[2] for rbs in observation['rewards_bases_ships']])
halite_ships = np.stack([
rbs[3] for rbs in observation['rewards_bases_ships']]).sum(0)
halite_ships[stacked_ships.sum(0) == 0] = -1e-9
opponent_bases = np.stack([rbs[1] for rbs in observation[
'rewards_bases_ships']])[1:].sum(0)
opponent_ships = np.stack([
rbs[2] for rbs in observation['rewards_bases_ships'][1:]]).sum(0) > 0
opponent_ship_count = opponent_ships.sum()
all_ship_count = opponent_ship_count + my_ship_count
my_ship_fraction = my_ship_count/(1e-9+all_ship_count)
remaining_halite = obs_halite.sum()
steps_remaining = env_config.episodeSteps-1-observation['step']
ship_cargo = (np.minimum(convert_cost, halite_ships)*my_ships).sum()
expected_payoff_conversion = ship_cargo*0.5 + (max(
0, steps_remaining-20)**0.6)*(remaining_halite**0.9)*my_ship_fraction
last_ship_standing_no_collect = observation[
'relative_step'] > 1/4 and (
stacked_ships[0] & (halite_ships == 0)).sum() == 1
halite_density = smooth2d(obs_halite, smooth_kernel_dim=10)
my_base_density = smooth2d(my_bases, smooth_kernel_dim=10)
can_deposit_halite = expected_payoff_conversion > convert_cost
restored_base_pos = None
can_defend_converted = False
if can_deposit_halite:
# Decide what ship to convert - it should be relatively central, have high
# ship halite on board and be far away from opponent ships and bases
# Also, don't build a new base next to a base where there is a camper
# that I am not attacking
# Preferably restore a base close to other halite and close to my other
# bases
next_to_my_camped_not_attacked = np.zeros(
(grid_size, grid_size), dtype=np.bool)
for base_pos in history['my_base_not_attacked_positions']:
next_to_my_camped_not_attacked[ROW_COL_BOX_MAX_DISTANCE_MASKS[
base_pos[0], base_pos[1], 2]] = 1
my_ship_density = smooth2d(my_ships, smooth_kernel_dim=10)
opponent_base_density = smooth2d(opponent_bases, smooth_kernel_dim=5)
opponent_ship_density = smooth2d(opponent_ships, smooth_kernel_dim=5)
convert_priority_scores = np.zeros(my_ship_count)
for i, ship_k in enumerate(player_obs[2]):
row, col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
ship_halite = min(convert_cost, player_obs[2][ship_k][1])
# Compute if the base can be defended after conversion
my_ship_distances = np.sort(DISTANCES[row, col][my_ships]+(
halite_on_board_mult*halite_ships[my_ships]))[1:]
opponent_ship_distances = np.sort(DISTANCES[row, col][opponent_ships])+(
halite_on_board_mult*halite_ships[opponent_ships])
num_considered_distances = min([
max_considered_attackers, my_ship_count-1, opponent_ship_count])
can_defend = np.all(my_ship_distances[:num_considered_distances] <= (
opponent_ship_distances[:num_considered_distances]))
if num_considered_distances >= 1 and can_defend:
can_defend = my_ship_distances[0] < opponent_ship_distances[0]
can_afford = (halite_ships[row, col] + player_obs[0]) >= convert_cost*(
1+1e10*int(last_ship_standing_no_collect and ship_halite == 0))
convert_priority_scores[i] = ship_halite + halite_density[
row, col]/10 + (500*(
max(0, 0.3-np.abs(0.3-my_base_density[row, col])))) - 100*(
my_ship_density-opponent_ship_density)[row, col] - 200*(
opponent_base_density[row, col]) - 1e12*int(
not can_defend or not can_afford or next_to_my_camped_not_attacked[
row, col])
can_defend_converted = convert_priority_scores.max() > -1e11
# if observation['step'] == 153:
# import pdb; pdb.set_trace()
if can_defend_converted:
convert_k = list(player_obs[2].keys())[np.argmax(convert_priority_scores)]
convert_row, convert_col = row_col_from_square_grid_pos(
player_obs[2][convert_k][0], grid_size)
restored_base_pos = (convert_row, convert_col)
all_ship_scores[convert_k][2][convert_row, convert_col] = 1e12
# Send the closest ship towards the base
convert_distances = DISTANCES[convert_row, convert_col][my_ships]
convert_distances[convert_distances == 0] = 100
closest_id = np.argmin(convert_distances)
my_ship_positions = np.where(my_ships)
ship_to_base_row = my_ship_positions[0][closest_id]
ship_to_base_col = my_ship_positions[1][closest_id]
my_ship_pos_to_k = {v[0]: k for k, v in player_obs[2].items()}
ship_to_base_k = my_ship_pos_to_k[
ship_to_base_row*grid_size + ship_to_base_col]
to_base_directions = get_dir_from_target(
ship_to_base_row, ship_to_base_col, convert_row, convert_col,
grid_size)
for d in to_base_directions:
if not d in all_ship_scores[ship_to_base_k][6]:
all_ship_scores[ship_to_base_k][6].append(d)
print(observation['step'], "Moving ship to restored base",
ship_to_base_row, ship_to_base_col, convert_row, convert_col)
all_ship_scores[ship_to_base_k][1][convert_row, convert_col] = 1e9
else:
# Don't gather
# Add some small positive noise to the establish base score, away from
# the current square - this ensures ships keep moving around when I don't
# plan on restoring my last destroyed base
# Move ships closer to each other if we want to convert a base but they are
# not able to defend it
# Send all to the least dense opponent point that is still close to halite
# and my other bases
opponent_density = smooth2d(opponent_ships+opponent_bases,
smooth_kernel_dim=5)
desirability_score = 500*np.maximum(
0, 0.3-np.abs(0.3-my_base_density))+(halite_density/10)-100*(
opponent_density)
best_gather_locations = np.where(
desirability_score == desirability_score.max())
gather_row = best_gather_locations[0][0]
gather_col = best_gather_locations[1][0]
# lowest_densities = np.where(opponent_density == opponent_density.min())
# halite_density = smooth2d(obs_halite)
# target_id = np.argmax(halite_density[lowest_densities])
# gather_row = lowest_densities[0][target_id]
# gather_col = lowest_densities[1][target_id]
num_zero_halite_ships = ((halite_ships == 0) & my_ships).sum()
for ship_k in player_obs[2]:
row, col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
all_ship_scores[ship_k][0][:] *= 0
if can_deposit_halite:
# Gather with some low probability since we may not have enough halite
# to convert a ship (except when it is the last remaining zero halite
# ship)
if obs_halite[row, col] > 0 and np_rng.uniform() < 0.2 and (
halite_ships[row, col] > 0 or num_zero_halite_ships > 1):
all_ship_scores[ship_k][0][row, col] = 2e6
ensure_move_mask = 1e6*DISTANCE_MASKS[(gather_row, gather_col)]
else:
ensure_move_mask = np_rng.uniform(1e5, 1e9, (grid_size, grid_size))
ensure_move_mask[row, col] = 0
all_ship_scores[ship_k][2][:] += ensure_move_mask
can_deposit_halite = False
return all_ship_scores, can_deposit_halite, restored_base_pos
def edge_aware_3_avg(a, b, c, grid_size):
# Decide if we should be edge aware near the high or low end
vals = np.array([a, b, c])
num_lower_half = (vals <= grid_size//2).sum()
if num_lower_half == 1:
near_low_edge = vals <= grid_size//4
if np.any(near_low_edge):
low_edge_id = np.where(near_low_edge)[0][0]
vals[low_edge_id] += grid_size
elif num_lower_half == 2:
near_high_edge = vals >= 3*grid_size//4
if np.any(near_high_edge):
high_edge_id = np.where(near_high_edge)[0][0]
vals[high_edge_id] -= grid_size
return int(np.round(vals.sum())/3) % grid_size
def consider_adding_strategic_bases(
config, observation, env_config, all_ship_scores, player_obs, convert_cost,
np_rng, history, player_influence_maps, obs_halite,
non_abandoned_base_pos, all_base_pos, halite_ships,
my_nearest_ship_distances, my_nearest_ship_distances_raw,
opponent_nearest_ship_distances, evaluation_add_interval=15):
opponent_bases = np.stack(
[rbs[1] for rbs in observation['rewards_bases_ships']][1:]).sum(0) > 0
my_ships = observation['rewards_bases_ships'][0][2]
my_ship_halite_on_board = halite_ships[my_ships]
my_ship_positions = np.where(my_ships)
my_ship_pos_to_k = {v[0]: k for k, v in player_obs[2].items()}
num_bases = all_base_pos[0].size
num_non_abandoned_bases = non_abandoned_base_pos[0].size
num_abandoned_bases = num_bases - num_non_abandoned_bases
grid_size = opponent_bases.shape[0]
convert_unavailable_positions = np.zeros(
(grid_size, grid_size), dtype=np.bool)
target_strategic_base_distance = config['target_strategic_base_distance']
my_stacked_ship_distances = np.stack(my_nearest_ship_distances_raw)
spawn_cost = env_config.spawnCost
base_added = False
added_base_pos = None
# Decide *WHERE* to construct a new strategic base
# Determine if a converted square can be defended by the second closest of
# my ships (the closest would be converted)
second_closest_ids = np.argsort(my_stacked_ship_distances, 0)[1].flatten()
subset_ids = (second_closest_ids, np.repeat(np.arange(grid_size), grid_size),
np.tile(np.arange(grid_size), grid_size))
my_second_closest_ship_distances = my_stacked_ship_distances[
subset_ids].reshape((grid_size, grid_size))
can_defend_desirability = my_second_closest_ship_distances < (
opponent_nearest_ship_distances)
# Compute the desirability for each square to establish a new base.
# We want bases that are far away from opponent bases (critical), close to
# our other bases (but not too close), close to current and future potential
# halite, far away from opponent ships and boxing in a large number of
# future farming halite.
influence_desirability = player_influence_maps[0]-player_influence_maps[
1:].sum(0)
opponent_near_base_desirability = -smooth2d(
opponent_bases, smooth_kernel_dim=6)
opponent_distant_base_desirability = -smooth2d(
opponent_bases, smooth_kernel_dim=10)
near_halite_desirability = smooth2d(obs_halite)-obs_halite
near_halite_desirability /= max(1e3, near_halite_desirability.max())
near_potential_halite_desirability = smooth2d(observation['halite'] > 0)-(
observation['halite'] > 0)
independent_base_distance_desirability = np.zeros((grid_size, grid_size))
independent_base_distance_desirabilities = []
for base_id in range(num_non_abandoned_bases):
base_row = non_abandoned_base_pos[0][base_id]
base_col = non_abandoned_base_pos[1][base_id]
target_distance_scores = (1-np.abs(
target_strategic_base_distance - DISTANCES[base_row, base_col])/(
target_strategic_base_distance))**2
independent_base_distance_desirabilities.append(target_distance_scores)
if num_non_abandoned_bases == 1:
independent_base_distance_desirability += target_distance_scores
near_base_desirability = np.zeros((grid_size, grid_size))
for base_id in range(num_bases):
base_row = all_base_pos[0][base_id]
base_col = all_base_pos[1][base_id]
near_base_desirability[ROW_COL_MAX_DISTANCE_MASKS[
base_row, base_col, 6]] -= 1
triangle_base_distance_desirability = np.zeros((grid_size, grid_size))
if num_non_abandoned_bases > 1:
# For each potential triangle (consider all pairs of bases): factor in the
# symmetry and amount of potential enclosed halite
triangle_desirabilities = []
for i in range(num_non_abandoned_bases-1):
first_row, first_col = (non_abandoned_base_pos[0][i],
non_abandoned_base_pos[1][i])
for j in range(i+1, num_non_abandoned_bases):
second_row, second_col = (non_abandoned_base_pos[0][j],
non_abandoned_base_pos[1][j])
col_diff = second_col-first_col
row_diff = second_row-first_row
col_distance = min(np.abs(col_diff), grid_size - np.abs(col_diff))
row_distance = min(np.abs(row_diff), grid_size - np.abs(row_diff))
distance = col_distance+row_distance
target_height = np.sqrt(target_strategic_base_distance**2-(
(target_strategic_base_distance/2)**2))*(np.sqrt(2)/2)
combined_distance_scores = independent_base_distance_desirabilities[
i]*independent_base_distance_desirabilities[j]
# Additionally, aim for triangles with equal angles - boost for min
# distance from the two optimal locations.
col_base_diff = np.abs(col_diff) if (
np.abs(col_diff) <= grid_size//2) else (grid_size-np.abs(col_diff))
if (col_diff < 0) != (np.abs(col_diff) <= grid_size//2):
left_vertex = (first_row, first_col)
if first_row < second_row:
if row_diff <= grid_size//2:
row_base_diff = row_diff
else:
row_base_diff = row_diff-grid_size
else:
# This only happens with equal rows because of the np.where row
# order
if (-row_diff) <= grid_size//2:
row_base_diff = row_diff
else:
row_base_diff = row_diff+grid_size
else:
left_vertex = (second_row, second_col)
if second_row < first_row :
# This never happens because of the np.where row order
if (-row_diff) <= grid_size//2:
row_base_diff = (-row_diff)
else:
row_base_diff = -row_diff-grid_size
else:
if row_diff <= grid_size//2:
row_base_diff = -row_diff
else:
row_base_diff = -row_diff+grid_size
base_vector = np.array([row_base_diff, col_base_diff])
found_it = False
correction = 0
correction_range = [-5, 5]
iteration = 0
# Use binary search to get the optimal triangle points
while not found_it and iteration < 10:
orthogonal_vector = np.array([col_base_diff, -row_base_diff])
orthogonal_vector = orthogonal_vector/np.linalg.norm(
orthogonal_vector)
dot_product = np.dot(orthogonal_vector, np.array([1, 0]))
angle = np.arccos(dot_product)
height_divisor = max([
np.cos(np.abs(angle)), np.sin(angle), np.sin(angle+np.pi)])
orthogonal_vector = orthogonal_vector*(
target_height/height_divisor+correction)
mid_point = (left_vertex[0]+base_vector[0]/2,
left_vertex[1]+base_vector[1]/2)
first_optimal = (
int(np.round(mid_point[0]+orthogonal_vector[0]) % grid_size),
int(np.round(mid_point[1]+orthogonal_vector[1]) % grid_size))
second_optimal = (
int(np.round(mid_point[0]-orthogonal_vector[0]) % grid_size),
int(np.round(mid_point[1]-orthogonal_vector[1]) % grid_size))
distance_to_first = DISTANCES[first_optimal][first_row, first_col]
# print(observation['step'], distance_to_first, correction, iteration)
if distance_to_first > target_strategic_base_distance:
correction_range[1] = correction
elif distance_to_first < target_strategic_base_distance:
correction_range[0] = correction
else:
found_it = True
iteration += 1
correction = (correction_range[0]+correction_range[1])/2
first_optimal_center = (
edge_aware_3_avg(first_optimal[0], first_row, second_row, grid_size),
edge_aware_3_avg(first_optimal[1], first_col, second_col, grid_size))
second_optimal_center = (
edge_aware_3_avg(
second_optimal[0], first_row, second_row, grid_size),
edge_aware_3_avg(
second_optimal[1], first_col, second_col, grid_size))
# Give a penalty to the first or second optimal point as a function of
# the current halite and halite potential of the centres of the
# triangles
first_center_score = near_halite_desirability[first_optimal_center] + (
near_potential_halite_desirability[first_optimal_center])/15*(
1-observation['relative_step'])+10*near_base_desirability[
first_optimal]
second_center_score = near_halite_desirability[second_optimal_center] + (
near_potential_halite_desirability[second_optimal_center])/15*(
1-observation['relative_step'])+10*near_base_desirability[
second_optimal]
first_optimal_addition = 5*min(0.2, max(
0, second_center_score-first_center_score))
second_optimal_addition = 5*min(0.2, max(
0, first_center_score-second_center_score))
optimal_min_distances = np.minimum(
DISTANCES[first_optimal]+first_optimal_addition,
DISTANCES[second_optimal]+second_optimal_addition)
optimal_mask_scores = np.exp(-optimal_min_distances)
triangle_desirability = combined_distance_scores*optimal_mask_scores
triangle_desirabilities.append((
first_row, first_col, second_row, second_col, distance,
triangle_desirability))
# if observation['step'] == 47:
# import pdb; pdb.set_trace()
# x=1
if distance <= target_strategic_base_distance+2:
triangle_base_distance_desirability += triangle_desirability
new_base_desirability = 100*near_base_desirability + config[
'target_strategic_influence_desirability_multiplier']*(
influence_desirability) + 2*opponent_near_base_desirability + (
opponent_distant_base_desirability) + near_halite_desirability + (
near_potential_halite_desirability/config[
'target_strategic_potential_divisor']*(
1-observation['relative_step'])) + (config[
'target_strategic_independent_base_distance_multiplier']*(
independent_base_distance_desirability) + config[
'target_strategic_triangle_weight']*(
triangle_base_distance_desirability))*(
opponent_near_base_desirability > -0.2) + ( # Min D 5
0.5*can_defend_desirability)
bad_positions = (near_base_desirability < 0) | (
opponent_near_base_desirability < -0.2)
any_valid_locations = np.any(~bad_positions)
if any_valid_locations:
new_base_desirability[bad_positions] = new_base_desirability[
~bad_positions].min()
else:
# print(observation['step'],
# "Not constructing a new base since there are no valid locations")
new_base_desirability *= 0
# if observation['step'] == 211:
# import pdb; pdb.set_trace()
# x=1
# if triangle_base_distance_desirability.max() > 0:
# import pdb; pdb.set_trace()
# x=1
# Give a bonus to the preceding best square if we decided to create a base on
# that location
if history['construct_strategic_base_position']:
new_base_desirability[history['construct_strategic_base_position']] += 1
# Decide *IF* we should add a strategic base
# Alway add a strategic base early on in the game
# Later on, only decide to add a base at fixed intervals if
# - There is significant halite left to be mined AND
# - I have plenty of ships relative to my number of bases - give a boost to
# the target number of bases if I am currently winning
num_my_ships = observation['rewards_bases_ships'][0][2].sum()
current_scores = history['current_scores']
ahead_in_score = (current_scores[0] == current_scores.max()) and np.all((
current_scores[0]-3*spawn_cost) >= current_scores[1:])
current_halite_sum = history['current_halite_sum']
winning_clearly = ahead_in_score and (
current_halite_sum[0] >= (current_halite_sum.max()-2*spawn_cost))
game_almost_over = observation['relative_step'] >= 0.8
strategic_additional_bases = num_my_ships/config[
'target_strategic_num_bases_ship_divisor'] - 0.5*history[
'num_destroyed_bases']
if not ahead_in_score and strategic_additional_bases > 2/0.8:
# Don't create too many bases if I am not winning clearly (relates to
# fourth bases and beyond in the case of no base losses)
strategic_additional_bases *= 0.8
my_target_num_non_abandoned_bases = int(1+strategic_additional_bases)
if winning_clearly and not game_almost_over:
if obs_halite.sum() > config['min_halite_to_stop_early_hunt']:
my_target_num_non_abandoned_bases += 1
else:
# Don't reconstruct strategic bases towards the end of the game as they
# get destroyed
if observation['relative_step'] >= 0.8:
my_target_num_non_abandoned_bases -= 1
if observation['relative_step'] >= 0.9:
my_target_num_non_abandoned_bases -= 1
if observation['relative_step'] >= 0.95:
my_target_num_non_abandoned_bases -= 1
if observation['step'] % evaluation_add_interval == 0:
# print(observation['step'], my_target_num_non_abandoned_bases,
# num_non_abandoned_bases)
add_base = my_target_num_non_abandoned_bases > (
num_non_abandoned_bases + 0.5*(1+num_abandoned_bases))
history['add_strategic_base'] = add_base
else:
consider_adding_base = my_target_num_non_abandoned_bases > (
num_non_abandoned_bases)
add_base = history['add_strategic_base']
add_base = add_base and consider_adding_base
history['add_strategic_base'] = add_base
# if observation['step'] == 97:
# import pdb; pdb.set_trace()
# Decide *HOW* to add a strategic base
should_spawn_base_next_step = None
if add_base and any_valid_locations:
best_positions = np.where(
new_base_desirability == new_base_desirability.max())
add_strategic_base_position = (
best_positions[0][0], best_positions[1][0])
history['construct_strategic_base_position'] = (
add_strategic_base_position)
# print(observation['step'], add_strategic_base_position)
# If we can afford to create and defend: go ahead and do so!
# Otherwise, start saving up
near_new_base_ship_distances = DISTANCES[add_strategic_base_position][
my_ships]
base_position_is_defended = can_defend_desirability[
add_strategic_base_position]
convert_mission_cost = int(
not(base_position_is_defended))*spawn_cost + convert_cost
near_ship_scores = 100*near_new_base_ship_distances-np.maximum(
my_ship_halite_on_board, max(0, convert_mission_cost-player_obs[0]))
nearest_ship_id = np.argmin(near_ship_scores)
near_ship_position = (my_ship_positions[0][nearest_ship_id],
my_ship_positions[1][nearest_ship_id])
near_ship_halite = halite_ships[near_ship_position]
required_halite_to_convert = convert_mission_cost - near_ship_halite
requested_save_conversion_budget = max(0, required_halite_to_convert)
distance_to_conversion_square = DISTANCES[add_strategic_base_position][
near_ship_position]
conversion_ship_pos = near_ship_position[0]*grid_size+near_ship_position[
1]
conversion_ship_k = my_ship_pos_to_k[conversion_ship_pos]
if required_halite_to_convert <= player_obs[0] and not (
distance_to_conversion_square == 0 and not None in all_ship_scores[
conversion_ship_k][9]):
# Issue the conversion ship with a conversion objective
all_ship_scores[conversion_ship_k][2][add_strategic_base_position] = 1e12
convert_unavailable_positions[near_ship_position] = 1
# Decide if the second closest ship should be used to defend the future
# base
second_closest_id = np.argmin(
near_new_base_ship_distances + 100*(
np.arange(num_my_ships) == nearest_ship_id))
second_closest_distance = near_new_base_ship_distances[
second_closest_id]
# import pdb; pdb.set_trace()
move_second_closest_to_base = second_closest_distance + 2 > int(
opponent_nearest_ship_distances[add_strategic_base_position])
if move_second_closest_to_base:
second_closest_row = my_ship_positions[0][second_closest_id]
second_closest_col = my_ship_positions[1][second_closest_id]
towards_base_mask = get_mask_between_exclude_ends(
second_closest_row, second_closest_col,
add_strategic_base_position[0], add_strategic_base_position[1],
grid_size)
second_closest_ship_pos = grid_size*second_closest_row+(
second_closest_col)
second_closest_ship_k = my_ship_pos_to_k[second_closest_ship_pos]
all_ship_scores[second_closest_ship_k][0][towards_base_mask] += 3e6
convert_unavailable_positions[
second_closest_row, second_closest_col] = 1
# if observation['step'] == 169:
# import pdb; pdb.set_trace()
# x=1
# Return the conversion square when I am converting this step
if distance_to_conversion_square == 0:
proceed_conversion = True
if not(base_position_is_defended):
# Make sure the base considers a spawn in the next step so it can be
# defended
# import pdb; pdb.set_trace()
proceed_conversion = opponent_nearest_ship_distances[
add_strategic_base_position] > 2
if proceed_conversion:
should_spawn_base_next_step = add_strategic_base_position
if proceed_conversion:
base_added = True
added_base_pos = add_strategic_base_position
history['add_strategic_base'] = False
requested_save_conversion_budget = max(
0, requested_save_conversion_budget-convert_cost)
if move_second_closest_to_base:
for score_id in range(2):
all_ship_scores[second_closest_ship_k][score_id][
add_strategic_base_position] += 3e6
# Move onto the base if the ship can safely do so
second_to_base_distance = DISTANCES[add_strategic_base_position][
second_closest_row, second_closest_col]
if second_to_base_distance == 1:
to_base_dir = get_dir_from_target(
second_closest_row, second_closest_col,
add_strategic_base_position[0], add_strategic_base_position[1],
grid_size)[0]
if to_base_dir in all_ship_scores[second_closest_ship_k][9] and (
not to_base_dir in all_ship_scores[
second_closest_ship_k][6]):
all_ship_scores[second_closest_ship_k][6].append(to_base_dir)
else:
# If I have a ship that can move towards the desired convert position and
# the target square is currently not defended and we would otherwise
# proceed with the conversion: move the ship closer
if not base_position_is_defended and (
required_halite_to_convert-player_obs[0]) <= spawn_cost:
# import pdb; pdb.set_trace()
second_closest_id = np.argmin(
near_new_base_ship_distances + 100*(
np.arange(num_my_ships) == nearest_ship_id))
second_closest_row = my_ship_positions[0][second_closest_id]
second_closest_col = my_ship_positions[1][second_closest_id]
towards_base_mask = get_mask_between_exclude_ends(
second_closest_row, second_closest_col,
add_strategic_base_position[0], add_strategic_base_position[1],
grid_size)
second_closest_ship_pos = grid_size*second_closest_row+(
second_closest_col)
second_closest_ship_k = my_ship_pos_to_k[second_closest_ship_pos]
all_ship_scores[second_closest_ship_k][0][towards_base_mask] += 3e6
convert_unavailable_positions[
second_closest_row, second_closest_col] = 1
else:
history['construct_strategic_base_position'] = None
requested_save_conversion_budget = 0
# if observation['step'] == 275:
# import pdb; pdb.set_trace()
# x=1
return (all_ship_scores, base_added, added_base_pos,
requested_save_conversion_budget, convert_unavailable_positions,
should_spawn_base_next_step)
def protect_base(observation, env_config, all_ship_scores, player_obs,
defend_base_pos, history, base_override_move_positions,
ignore_defender_positions, max_considered_attackers=3,
halite_on_board_mult=1e-6):
opponent_ships = np.stack([
rbs[2] for rbs in observation['rewards_bases_ships'][1:]]).sum(0) > 0
defend_base_ignore_collision_key = None
base_protected = True
my_defend_base_ship_positions = np.zeros_like(opponent_ships)
ignore_base_collision_ship_keys = []
base_defense_keys = []
if opponent_ships.sum():
opponent_ship_count = opponent_ships.sum()
grid_size = opponent_ships.shape[0]
obs_halite = np.maximum(0, observation['halite'])
collect_rate = env_config.collectRate
obs_halite[obs_halite < 1/collect_rate] = 0
my_ship_count = len(player_obs[2])
base_row, base_col = defend_base_pos
stacked_ships = np.stack(
[rbs[2] for rbs in observation['rewards_bases_ships']])
halite_ships = np.stack([
rbs[3] for rbs in observation['rewards_bases_ships']]).sum(0)
halite_ships[stacked_ships.sum(0) == 0] = -1e-9
opponent_ship_distances = DISTANCES[(base_row, base_col)][opponent_ships]+(
halite_on_board_mult*halite_ships[opponent_ships])
sorted_opp_distance = np.sort(opponent_ship_distances)
ship_keys = list(player_obs[2].keys())
ship_base_distances = np.zeros((my_ship_count, 8))
# Go over all my ships and approximately compute how far they are expected
# to be from the base !with no halite on board! by the end of the next turn
# Approximate since returning ships are expected to always move towards the
# base and other ships are assumed to be moving away.
attack_opponent_campers = history['attack_opponent_campers']
for i, ship_k in enumerate(player_obs[2]):
row, col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
ship_scores = all_ship_scores[ship_k]
ship_halite = player_obs[2][ship_k][1]
current_distance = DISTANCES[(base_row, base_col)][row, col]
is_returning = ship_scores[1][base_row, base_col] > max([
ship_scores[0].max(), ship_scores[2].max(), ship_scores[3].max()])
to_base_directions = get_dir_from_target(
row, col, base_row, base_col, grid_size)
rather_not_move_to_base = len(
set(to_base_directions) & set(ship_scores[9])) == 0
can_not_move_to_base = int(
(ship_halite != 0) and rather_not_move_to_base) or (
ignore_defender_positions[row, col])
prev_step_box_ship_target_distance = 10-history['prev_step'][
'ships_on_box_mission'].get(ship_k, 10)
# Exclude ships that attack opponent campers from the base defense logic
if ship_k in attack_opponent_campers:
current_distance = 1e2
ship_halite = 1e6
can_not_move_to_base = True
ship_base_distances[i, 0] = current_distance
ship_base_distances[i, 1] = current_distance + 1 - int(2*is_returning)
ship_base_distances[i, 2] = ship_halite
ship_base_distances[i, 3] = row
ship_base_distances[i, 4] = col
ship_base_distances[i, 5] = can_not_move_to_base
ship_base_distances[i, 6] = int(rather_not_move_to_base)
ship_base_distances[i, 7] = prev_step_box_ship_target_distance
weighted_distances = ship_base_distances[:, 1] + halite_on_board_mult*(
ship_base_distances[:, 2])
defend_distances = ship_base_distances[:, 0] + halite_on_board_mult*(
ship_base_distances[:, 2]) + 100*ship_base_distances[:, 5]
# Update the defend distances so we allow a ship with halite to move onto
# the base when it is one step away, and the closest opponent is at least
# two steps away, or is one step away with strictly more halite on board.
if sorted_opp_distance[0] > defend_distances.min():
# I have at least one ship that can be used to defend the base
good_defense_ids = np.where(np.logical_and(
np.floor(defend_distances) <= max(1, np.floor(defend_distances.min())),
defend_distances < sorted_opp_distance[0]))[0]
# Pick the maximum distance (max halite) of the min step ids that can
# defend the base
best_good_defense_id = np.argmax(defend_distances[good_defense_ids])
if defend_distances[good_defense_ids[best_good_defense_id]] > 0:
defend_distances[good_defense_ids[best_good_defense_id]] = -0.1
next_ship_distances_ids = np.argsort(weighted_distances)
next_ship_distances_sorted = weighted_distances[next_ship_distances_ids]
worst_case_opponent_distances = sorted_opp_distance-1
num_considered_distances = min([
max_considered_attackers, my_ship_count, opponent_ship_count])
opponent_can_attack_sorted = next_ship_distances_sorted[
:num_considered_distances] > worst_case_opponent_distances[
:num_considered_distances]
base_protected = worst_case_opponent_distances[0] > 0
# if observation['step'] == 186:
# import pdb; pdb.set_trace()
if np.any(opponent_can_attack_sorted):
# Update the defend distances to make sure that two zero halite ships
# switch position when one is at the base and the other is at a distance
# of one - that way the second ship has no halite and can defend the base
# on the next step
if num_considered_distances > 1 and opponent_can_attack_sorted[1]:
num_defenders = defend_distances.size
argsort_defend = np.argsort(defend_distances)
sorted_manh_distances = ship_base_distances[argsort_defend, 0]
if (sorted_manh_distances[0] == 0 and sorted_manh_distances[1] == 1):
# Both should not have halite on board
if defend_distances[argsort_defend[1]] == 1 or int(
next_ship_distances_sorted[1] == 2):
defend_distances[argsort_defend[1]] -= 1.2
elif worst_case_opponent_distances[0] == 0 and (
worst_case_opponent_distances[1] in [0, 1]) and (
defend_distances.min() <= 0) and 2 in defend_distances and (
np.logical_and(defend_distances > 1,
defend_distances < 2).sum() > 0):
# FUTURE WORK: remove, this looks like a special case of the next
# elif
print("PROBABLY REDUNDANT - LOOK INTO ME")
defend_distances[np.where(defend_distances==2)[0][0]] = 1
elif num_defenders > 2 and (defend_distances[argsort_defend[0]]-1) <= (
worst_case_opponent_distances[0]) and (
defend_distances[argsort_defend[2]]-1) <= (
worst_case_opponent_distances[1]) and ship_base_distances[
argsort_defend[1], 2] > 0:
# Switch the second and third defenders when the second defender has
# halite on board and the third doesn't but can still defend the base
defend_score_diff = defend_distances[argsort_defend[2]] - (
defend_distances[argsort_defend[1]]) + halite_on_board_mult
defend_distances[argsort_defend[1]] += defend_score_diff
# Resolve ties by picking the ships that can safely move towards the
# base
defend_distances += 1e-9*ship_base_distances[:, 6]
# Resolve ties by picking the ships that were not on a box in mission
# in the past step
defend_distances += 1e-10*ship_base_distances[:, 7]
# Summon the closest K agents towards or onto the base to protect it.
# When the ship halite is zero, we should aggressively attack base
# raiders
num_attackers = 1+np.where(opponent_can_attack_sorted)[0][-1]
defend_distances_ids = np.argsort(defend_distances)
# if observation['step'] == 178:
# import pdb; pdb.set_trace()
for i in range(num_attackers):
defend_id = defend_distances_ids[i]
if opponent_can_attack_sorted[i] or defend_distances[defend_id] < 0:
# Very simple defense strategy for now: prefer returning to the
# base by increasing the gather score for all squares beween the
# current position and the only base. If my ship is currently on the
# base: keep it there
ship_id = defend_distances_ids[i]
distance_to_base = ship_base_distances[ship_id, 0]
ship_k = ship_keys[ship_id]
base_defense_keys.append(ship_k)
ship_scores = list(all_ship_scores[ship_k])
ship_halite = int(ship_base_distances[ship_id, 2])
row = int(ship_base_distances[ship_id, 3])
col = int(ship_base_distances[ship_id, 4])
if distance_to_base > 0 or i == 0:
if distance_to_base <= 1:
# Stay or move to the base; or stay 1 step away
if i == 0:
ship_scores[1][base_row, base_col] += 1e6*(
3+max_considered_attackers-i)
elif obs_halite[row, col] == 0 or (
worst_case_opponent_distances[i] > distance_to_base):
base_override_move_positions[row, col] = 1
ship_scores[0][row, col] += 2e6
if None in ship_scores[9]:
# Stay close to the base when defending
ship_scores[6].append(None)
if halite_ships[row, col] == 0 or (i == 0 and (
worst_case_opponent_distances[0] > halite_on_board_mult*(
ship_base_distances[defend_id, 2]))):
ship_scores[11] = max_considered_attackers-i
if defend_base_ignore_collision_key is None:
defend_base_ignore_collision_key = ship_k
else:
# Set the base as the target and override the base return
# synchronization
towards_base_mask = get_mask_between_exclude_ends(
row, col, base_row, base_col, grid_size)
ship_scores[0][towards_base_mask] += 9e5*(
1+max_considered_attackers-i)
ship_scores[1][base_row, base_col] += 1e6*(
1+max_considered_attackers-i)
ignore_base_collision_ship_keys.append(ship_k)
# Defend the base without fear if I have no halite on board
# Only consider staying at the current position or moving towards
# the base in order to increase the action execution priority
if ship_halite == 0:
ship_scores[6] = copy.copy(MOVE_DIRECTIONS)
ship_scores[7] = []
ship_scores[8] = []
ship_scores[9] = copy.copy(MOVE_DIRECTIONS)
ship_scores[11] = max_considered_attackers-i
else:
# Still move towards the base to defend it when there is halite
# on board as long as it does not mean selecting a 1-step bad
# action
base_defend_dirs = get_dir_from_target(
row, col, base_row, base_col, grid_size)
not_bad_defend_dirs = list(set(ship_scores[9]) & set(
base_defend_dirs))
ship_scores[6] = list(
set(ship_scores[6] + not_bad_defend_dirs))
my_defend_base_ship_positions[row, col] = 1
all_ship_scores[ship_k] = tuple(ship_scores)
return (all_ship_scores, defend_base_ignore_collision_key,
base_protected, ignore_base_collision_ship_keys,
my_defend_base_ship_positions, base_override_move_positions,
base_defense_keys)
def update_occupied_count(
row, col, occupied_target_squares, occupied_squares_count):
occupied_target_squares[row, col] = 1
occupied_squares_count[row, col] += 1
def update_scores_rescue_missions(
config, all_ship_scores, stacked_ships, observation, halite_ships,
steps_remaining, player_obs, obs_halite, history,
opponent_ships_sensible_actions, weighted_base_mask, my_bases, np_rng,
main_base_distances, my_defend_base_ship_positions, safe_to_return_halites,
player_influence_maps, nearest_base_distances, max_box_distance=5,
max_1_step_rescue_risk=0.02):
grid_size = stacked_ships.shape[1]
opponent_ships = stacked_ships[1:].sum(0) > 0
my_zero_halite_ships = stacked_ships[0] & (halite_ships == 0) & (
~my_defend_base_ship_positions)
player_ids = -1*np.ones((grid_size, grid_size), dtype=np.int)
for i in range(stacked_ships.shape[0]):
player_ids[stacked_ships[i]] = i
# Exclude my campers that are not available for rescuing
camping_ships_strategy = history['camping_ships_strategy']
for ship_k in camping_ships_strategy:
if not camping_ships_strategy[ship_k][3]:
camping_row, camping_col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
my_zero_halite_ships[camping_row, camping_col] = 0
opponent_zero_halite_ships = opponent_ships & (halite_ships == 0)
opponent_zero_halite_ship_density = smooth2d(
opponent_zero_halite_ships, smooth_kernel_dim=4)
zero_halite_pos = np.where(my_zero_halite_ships)
on_rescue_mission = np.zeros((grid_size, grid_size), dtype=np.bool)
rescue_move_positions_taken = np.zeros((grid_size, grid_size), dtype=np.bool)
pos_to_k = {v[0]: k for k, v in player_obs[2].items()}
_, base_distances, _ = get_nearest_base_distances(
grid_size, history['my_base_not_attacked_positions'], observation)
my_ships = observation['rewards_bases_ships'][0][2]
my_bases = np.copy(observation['rewards_bases_ships'][0][1])
for base_pos in history['my_base_not_attacked_positions']:
# Note: Make sure to work on a copy of the original base observation!
my_bases[base_pos] = 0
my_base_locations = np.where(my_bases)
# main_base_location = np.where(main_base_distances == 0)
# Identify the squares that are surrounded by an opponent by computing
# the minimum halite of each square in each box in direction for each
# opponent
num_players = stacked_ships.shape[0]
opponent_min_halite_box_dirs = 1e6*np.ones(
(num_players, 4, grid_size, grid_size))
opponents_num_nearby = np.zeros(
(num_players, grid_size, grid_size))
for i in range(1, num_players):
opponent_ship_pos = np.where(stacked_ships[i])
for j in range(stacked_ships[i].sum()):
row = opponent_ship_pos[0][j]
col = opponent_ship_pos[1][j]
opponents_num_nearby[i][DISTANCES[row, col] <= 7] += 1
opponent_ship_halite = halite_ships[row, col]
for dir_id, d in enumerate(NOT_NONE_DIRECTIONS):
mask = ROW_COL_BOX_DIR_MAX_DISTANCE_MASKS[row, col, d]
opponent_min_halite_box_dirs[i, dir_id][mask] = np.minimum(
opponent_min_halite_box_dirs[i, dir_id][mask],
opponent_ship_halite)
opponent_min_halite_box_all_dirs = np.max(opponent_min_halite_box_dirs, 1)
opponent_min_halite_box_all_dirs[opponents_num_nearby < 4] = 1e6
any_opponent_min_halite_box_all_dirs = np.min(
opponent_min_halite_box_all_dirs, 0)
my_boxed_ships = my_ships & (
halite_ships > any_opponent_min_halite_box_all_dirs)
boxed_ships = []
if np.any(my_boxed_ships):
my_boxed_pos = np.where(my_boxed_ships)
for box_id in range(my_boxed_pos[0].size):
row = my_boxed_pos[0][box_id]
col = my_boxed_pos[1][box_id]
ship_k = pos_to_k[row*grid_size + col]
boxed_ships.append(ship_k)
# if observation['step'] == 36:
# import pdb; pdb.set_trace()
# print("Boxed in ships", observation['step'], my_boxed_pos)
# Consider chased or boxed in ships
chased_ships = list(history['chase_counter'][0].keys())
chased_or_boxed = list(set(chased_ships+boxed_ships))
# Put the ships that are on the escort to base list first
escort_to_base_ships = list(set([
e[0] for e in history['escort_to_base_list'] if (e[0] in player_obs[2])]))
if len(escort_to_base_ships):
chased_or_boxed = list(escort_to_base_ships + list(set(
chased_or_boxed)-set(escort_to_base_ships)))
already_escorted_ships = []
num_considered_bases = my_base_locations[0].size
for ship_k in chased_or_boxed:
recompute_pos = False
row, col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
ship_scores = all_ship_scores[ship_k]
valid_directions = copy.copy(ship_scores[6])
should_rescue_is_chased = ship_k in history['chase_counter'] and len(
set(valid_directions) - set(ship_scores[7]+ship_scores[8])) == 0 and (
history['chase_counter'][0][ship_k][1] > 3)
# Only call for help when the considered ship is nearly boxed in in all
# directions and has been chased for a while
# Flag the ship for rescuing if there is no safe path to my nearest base
base_distances_ship = base_distances[:, row, col]
nearest_base_id = np.argmin(base_distances_ship)
# main_base_row = main_base_location[0][0]
# main_base_col = main_base_location[1][0]
# return_main_base_distance = main_base_distances[row, col]
# Returning to a base that is not the main base is also supported
# Select the target base as a function of the distance and influence score
# on the square halfway between my current square and the considered base
base_return_scores = np.zeros(num_considered_bases)
for base_id in range(num_considered_bases):
base_row = my_base_locations[0][base_id]
base_col = my_base_locations[1][base_id]
base_distance = DISTANCES[row, col][base_row, base_col]
relative_pos = get_relative_position(
row, col, base_row, base_col, grid_size)
mid_row = int(row + relative_pos[0]/2) % grid_size
mid_col = int(col + relative_pos[1]/2) % grid_size
return_base_directions = get_dir_from_target(
row, col, base_row, base_col, grid_size)
can_move_to_base = False
for d in return_base_directions:
if d in valid_directions:
can_move_to_base = True
break
base_return_scores[base_id] = player_influence_maps[
0, mid_row, mid_col] - (player_influence_maps[
1:, mid_row, mid_col].sum() + base_distance/1.5) + 2*int(
can_move_to_base)
best_base_id = np.argmax(base_return_scores)
return_base_row = my_base_locations[0][best_base_id]
return_base_col = my_base_locations[1][best_base_id]
# if base_distances_ship[nearest_base_id] < (
# return_main_base_distance-1):
# return_base_row = my_base_locations[0][nearest_base_id]
# return_base_col = my_base_locations[1][nearest_base_id]
# else:
# return_base_row = main_base_row
# return_base_col = main_base_col
# if observation['step'] == 288:
# import pdb; pdb.set_trace()
return_base_directions = get_dir_from_target(
row, col, return_base_row, return_base_col, grid_size)
one_step_invalid = list(set(NOT_NONE_DIRECTIONS).difference(set(
all_ship_scores[ship_k][9])))
not_good_dirs = list(set(all_ship_scores[ship_k][7] + all_ship_scores[
ship_k][8] + one_step_invalid))
base_return_not_good_dirs = [d in not_good_dirs for d in (
return_base_directions)]
should_rescue_can_not_return_base = (halite_ships[row, col] > 0) and (
len(set(return_base_directions) & set(
all_ship_scores[ship_k][6])) == 0 or (
np.all(np.array(base_return_not_good_dirs)))) and (
halite_ships[row, col] >= safe_to_return_halites[row, col])
if (should_rescue_is_chased or should_rescue_can_not_return_base) and (
base_distances_ship[nearest_base_id] > 2):
# import pdb; pdb.set_trace()
# print(observation['step'], row, col, return_base_row, return_base_col)
# if observation['step'] == 56 and row == 18:
# import pdb; pdb.set_trace()
# if ship_k in boxed_ships and not ship_k in chased_ships:
# import pdb; pdb.set_trace()
# x=1
nearly_boxed_in = True
if should_rescue_is_chased and not should_rescue_can_not_return_base:
valid_directions = valid_directions if len(ship_scores[8]) == 0 else (
ship_scores[17]) # Also consider bad N-step directions
threat_opponents = opponent_ships & (halite_ships < halite_ships[
row, col])
for d in NOT_NONE_DIRECTIONS:
opposite_d = OPPOSITE_MAPPING[d]
rel_move = RELATIVE_DIR_MAPPING[d]
ref_square = ((row + max_box_distance*rel_move[0]) % grid_size,
(col + max_box_distance*rel_move[1]) % grid_size)
dir_mask = HALF_PLANES_CATCH[ref_square][opposite_d] & (
ROW_COL_MAX_DISTANCE_MASKS[
ref_square[0], ref_square[1], max_box_distance])
num_threats = (dir_mask & threat_opponents).sum()
if num_threats == 0:
nearly_boxed_in = False
break
if nearly_boxed_in and my_zero_halite_ships.sum():
friendly_zero_halite_distances = DISTANCES[row, col][zero_halite_pos]
min_halite_distance = friendly_zero_halite_distances.min()
# if None in valid_directions and None in ship_scores[8] and len(
# valid_directions) == 1 and len(ship_scores[17]) > 1:
# # Edge case - we should consider all n step bad directions rather
# # than only considering staying still when rescuing a ship
# valid_directions = ship_scores[17]
# this_ship_scores = list(all_ship_scores[ship_k])
# this_ship_scores[6] = copy.copy(valid_directions)
# this_ship_scores[8] = copy.copy(valid_directions)
# this_ship_scores[14] = this_ship_scores[18]
# all_ship_scores[ship_k] = tuple(this_ship_scores)
# if num_considered_bases > 1:
# # import pdb; pdb.set_trace()
# print(observation['step'], row, col, return_base_row,
# return_base_col)
# Compute weights for all d1 and d2 friendly zero halite rescuers
num_d1_friends = (friendly_zero_halite_distances == 1).sum()
num_d2_friends = (friendly_zero_halite_distances == 2).sum()
any_good_d1_rescuers = False
any_good_d2_rescuers = False
if num_d1_friends > 0:
# import pdb; pdb.set_trace()
d1_rescuer_ids = np.where(friendly_zero_halite_distances == 1)[0]
d1_scores = np.zeros(num_d1_friends)
for rescuer_id in range(num_d1_friends):
rescuer_row = zero_halite_pos[0][d1_rescuer_ids[rescuer_id]]
rescuer_col = zero_halite_pos[1][d1_rescuer_ids[rescuer_id]]
to_rescuer_dir = get_dir_from_target(
row, col, rescuer_row, rescuer_col, grid_size)[0]
rescuer_k = pos_to_k[rescuer_row*grid_size+rescuer_col]
ship_k_not_immediate_bad = list(
set(all_ship_scores[ship_k][6]).union(
set(all_ship_scores[ship_k][17])))
aligned_dirs = list(set(ship_k_not_immediate_bad) & set(
all_ship_scores[rescuer_k][6]) & set(return_base_directions))
good_rescuer = (to_rescuer_dir in valid_directions) or (
len(aligned_dirs) > 0)
d1_scores[rescuer_id] = -50 + 100*int(good_rescuer) - DISTANCES[
rescuer_row, rescuer_col][return_base_row, return_base_col]/1.5+(
player_influence_maps[0, rescuer_row, rescuer_col]) - (
player_influence_maps[1:, rescuer_row, rescuer_col].sum())
any_good_d1_rescuers = np.any(d1_scores > 0)
if num_d2_friends > 0:
# import pdb; pdb.set_trace()
d2_rescuer_ids = np.where(friendly_zero_halite_distances == 2)[0]
d2_scores = np.zeros(num_d2_friends)
for rescuer_id in range(num_d2_friends):
rescuer_row = zero_halite_pos[0][d2_rescuer_ids[rescuer_id]]
rescuer_col = zero_halite_pos[1][d2_rescuer_ids[rescuer_id]]
to_rescuer_dir = get_dir_from_target(
row, col, rescuer_row, rescuer_col, grid_size)
any_rescuer_dir_ind_valid_dir = False
for d in to_rescuer_dir:
any_rescuer_dir_ind_valid_dir = any_rescuer_dir_ind_valid_dir or(
d in valid_directions)
rescuer_k = pos_to_k[rescuer_row*grid_size+rescuer_col]
ship_k_not_immediate_bad = list(
set(all_ship_scores[ship_k][6]).union(
set(all_ship_scores[ship_k][17])))
aligned_dirs = list(set(ship_k_not_immediate_bad) & set(
all_ship_scores[rescuer_k][6]) & set(return_base_directions))
good_rescuer = (any_rescuer_dir_ind_valid_dir and None in (
all_ship_scores[rescuer_k][6])) or (len(aligned_dirs) > 0)
d2_scores[rescuer_id] = -50 + 100*int(good_rescuer) - DISTANCES[
rescuer_row, rescuer_col][return_base_row, return_base_col]/1.5+(
player_influence_maps[0, rescuer_row, rescuer_col]) - (
player_influence_maps[1:, rescuer_row, rescuer_col].sum())
any_good_d2_rescuers = np.any(d2_scores > 0)
friendly_zero_halite_distance_d1_scores = np.copy(
friendly_zero_halite_distances).astype(np.float)
if any_good_d1_rescuers:
# If there are multiple 1-distance ships: pick the rescuer that is
# closest to the target base which I can move to safely
# import pdb; pdb.set_trace()
friendly_zero_halite_distance_d1_scores[d1_rescuer_ids[
np.argmax(d1_scores)]] -= 1e-6
elif min_halite_distance == 1:
# Go for a 2-distance ship if I can move to it safely and I can't
# move safely to the 1-distance ship.
if any_good_d2_rescuers:
# Use a d2 rescuer instead
# import pdb; pdb.set_trace()
min_halite_distance = 2
else:
# Pick the best bad d1 rescuer
friendly_zero_halite_distance_d1_scores[d1_rescuer_ids[
np.argmax(d1_scores)]] -= 1e-6
# if observation['step'] >= 154:
# import pdb; pdb.set_trace()
# Follow the nearest zero halite ship home if it is at a distance one
# of the ship
is_protected = False
if min_halite_distance == 1:
# Jointly move to the nearest weighted base and assume that the
# former zero halite position won't be attacked
# dm = DISTANCE_MASKS[(row, col)]
# base_scores = dm*weighted_base_mask*my_bases
# target_base = np.where(base_scores == base_scores.max())
# target_base = (target_base[0][0], target_base[1][0])
target_base = (return_base_row, return_base_col)
nearest_halite_id = np.argmin(
friendly_zero_halite_distance_d1_scores)
rescuer_row = zero_halite_pos[0][nearest_halite_id]
rescuer_col = zero_halite_pos[1][nearest_halite_id]
on_rescue_mission[row, col] = 1
on_rescue_mission[rescuer_row, rescuer_col] = 1
recompute_pos = True
to_rescuer_dir = get_dir_from_target(
row, col, rescuer_row, rescuer_col, grid_size)[0]
rescuer_k = pos_to_k[rescuer_row*grid_size+rescuer_col]
ship_k_not_immediate_bad = list(
set(all_ship_scores[ship_k][6]).union(
set(all_ship_scores[ship_k][17])))
aligned_dirs = list(set(ship_k_not_immediate_bad) & set(
all_ship_scores[rescuer_k][6]) & set(return_base_directions))
# Don't risk moving onto the previous step zero halite square if a
# potential opponent has a considerable probability of engaging in
# 1-step risky actions
if len(aligned_dirs) == 0:
risk_score = 0
for d in NOT_NONE_DIRECTIONS:
neighbor_rescue_row, neighbor_rescue_col = move_ship_row_col(
rescuer_row, rescuer_col, d, grid_size)
if opponent_zero_halite_ships[
neighbor_rescue_row, neighbor_rescue_col]:
opponent_id = np.where(stacked_ships[
:, neighbor_rescue_row, neighbor_rescue_col])[0][0]
risk_score += history['zero_halite_move_behavior'][opponent_id][
'False_0']
shared_escape_dirs = list(set(ship_k_not_immediate_bad) & set(
all_ship_scores[rescuer_k][6]))
if None in shared_escape_dirs:
shared_escape_dirs.remove(None)
if risk_score > max_1_step_rescue_risk and len(
shared_escape_dirs) > 0 and len(aligned_dirs) == 0:
# print("Avoiding risky rescue move", observation['step'], row,
# col, risk_score)
aligned_dirs = shared_escape_dirs
# elif risk_score > max_1_step_rescue_risk and OPPOSITE_MAPPING[
# to_rescuer_dir] in all_ship_scores[rescuer_k][6] and len(
# aligned_dirs) == 0 and (
# len(all_ship_scores[ship_k][6]) > 0 and (
# not None in all_ship_scores[ship_k][6])):
# # Also consider having the zero halite ship take the position of
# # the rescued ship if that is safe. This branch is likely
# # impossible
# import pdb; pdb.set_trace()
# x=1
override_override_mask = False
if (to_rescuer_dir not in ship_k_not_immediate_bad) or (
len(aligned_dirs) > 0):
# It is better to take a safe step with both ships if that option
# is available
# import pdb; pdb.set_trace()
if len(aligned_dirs) == 0 and not to_rescuer_dir in (
all_ship_scores[ship_k][6]):
all_ship_scores[ship_k][6].append(to_rescuer_dir)
else:
if len(aligned_dirs) > 1:
str_aligned_dirs = [d if d is not None else "None" for d in (
aligned_dirs)]
str_aligned_dirs.sort() # Set intersect is flaky
aligned_dirs = [d if d != "None" else None for d in (
str_aligned_dirs)]
override_override_mask = True
if override_override_mask:
aligned_dir = np_rng.choice(aligned_dirs)
rescuer_move_row, rescuer_move_col = move_ship_row_col(
rescuer_row, rescuer_col, aligned_dir, grid_size)
rescue_move_positions_taken[rescuer_move_row, rescuer_move_col] = 1
for score_id in range(3):
all_ship_scores[rescuer_k][score_id][
rescuer_move_row, rescuer_move_col] += 1e4
if not aligned_dir in all_ship_scores[ship_k][6]:
all_ship_scores[ship_k][6].append(aligned_dir)
escape_row, escape_col = move_ship_row_col(
row, col, aligned_dir, grid_size)
else:
# Consider escaping along the opposite side of the board if one
# side is much more congested
relative_pos = get_relative_position(
rescuer_row, rescuer_col, target_base[0], target_base[1],
grid_size)
mid_path_row_short = int(
rescuer_row + relative_pos[0]/2) % grid_size
mid_path_col_short = int(
rescuer_col + relative_pos[1]/2) % grid_size
mid_path_row_long = int(
rescuer_row - (grid_size - relative_pos[0])/2) % grid_size
mid_path_col_long = int(
rescuer_col - (grid_size - relative_pos[1])/2) % grid_size
start_rescuer_row = rescuer_row
if np.abs(relative_pos[0]) > 7:
# Determine start_rescuer_row
mid_density_short_row = player_influence_maps[
0, mid_path_row_short, mid_path_col_short] - (
player_influence_maps[
1:, mid_path_row_short, mid_path_col_short]).sum()
mid_density_long_row = player_influence_maps[
0, mid_path_row_long, mid_path_col_short] - (
player_influence_maps[
1:, mid_path_row_long, mid_path_col_short]).sum()
distance_diff_short = grid_size-2*np.abs(relative_pos[0])
if mid_density_long_row - mid_density_short_row > (
distance_diff_short/2):
# import pdb; pdb.set_trace()
start_rescuer_row = mid_path_row_long
start_rescuer_col = rescuer_col
if np.abs(relative_pos[1]) > 7:
# Determine start_rescuer_col
mid_density_short_col = player_influence_maps[
0, mid_path_row_short, mid_path_col_short] - (
player_influence_maps[
1:, mid_path_row_short, mid_path_col_short]).sum()
mid_density_long_col = player_influence_maps[
0, mid_path_row_short, mid_path_col_long] - (
player_influence_maps[
1:, mid_path_row_short, mid_path_col_long]).sum()
distance_diff_short = grid_size-2*np.abs(relative_pos[1])
if mid_density_long_col - mid_density_short_col > (
distance_diff_short):
# import pdb; pdb.set_trace()
start_rescuer_col = mid_path_col_long
increase_mask = get_mask_between_exclude_ends(
target_base[0], target_base[1], start_rescuer_row,
start_rescuer_col, grid_size)
# Avoid the rescued ship half plane if that leaves the rescuer with
# options
to_base_dirs = get_dir_from_target(
start_rescuer_row, start_rescuer_col, target_base[0],
target_base[1], grid_size)
valid_to_base_dirs = list(set(to_base_dirs) & set(
all_ship_scores[rescuer_k][6]))
if len(valid_to_base_dirs) > 1:
to_rescued_dir = OPPOSITE_MAPPING[to_rescuer_dir]
if to_rescued_dir in all_ship_scores[rescuer_k][6]:
# import pdb; pdb.set_trace()
increase_mask[HALF_PLANES_CATCH[
start_rescuer_row, start_rescuer_col][to_rescued_dir]] = 0
if len(all_ship_scores[rescuer_k][6]) == 1 and (
all_ship_scores[rescuer_k][6][0] is None):
# Take a move risk rather than staying still - otherwise the
# non zero halite ship is likely to get orphaned
# import pdb; pdb.set_trace()
risk_scores = np.zeros(len(to_base_dirs))
for risk_id, d in enumerate(to_base_dirs):
move_row, move_col = move_ship_row_col(
rescuer_row, rescuer_col, d, grid_size)
for potential_threat_dir in MOVE_DIRECTIONS:
threat_row, threat_col = move_ship_row_col(
move_row, move_col, potential_threat_dir, grid_size)
if opponent_ships[threat_row, threat_col] and halite_ships[
threat_row, threat_col] == 0:
opponent_id = player_ids[threat_row, threat_col]
is_near_base = nearest_base_distances[
threat_row, threat_col] <= config[
'log_near_base_distance']
distance = int(d is not None) + int(
potential_threat_dir is not None)
risk_lookup_k = str(is_near_base) + '_' + str(distance)
risk_scores[risk_id] = max(
risk_scores[risk_id], history[
'zero_halite_move_behavior'][opponent_id][
risk_lookup_k])
best_risk_score = risk_scores.min()
if best_risk_score < 0.15:
print("Risky escort move with boxed in zero halite ship",
observation['step'], row, col)
# import pdb; pdb.set_trace()
rescuer_selected_move_dir = to_base_dirs[
np.argmin(risk_scores)]
increase_mask = np.zeros_like(increase_mask)
rescue_move_position = move_ship_row_col(
rescuer_row, rescuer_col, rescuer_selected_move_dir,
grid_size)
all_ship_scores[rescuer_k][0][rescue_move_position] += 3e5
all_ship_scores[rescuer_k][6].remove(None)
all_ship_scores[rescuer_k][6].append(rescuer_selected_move_dir)
for score_id in range(3):
all_ship_scores[rescuer_k][score_id][increase_mask] += 1e4
escape_row = rescuer_row
escape_col = rescuer_col
all_ship_scores[rescuer_k][3][:] -= 1e4 # No attack during rescue
all_ship_scores[ship_k][0][escape_row, escape_col] = 1e8
rescue_move_positions_taken[escape_row, escape_col] = 1
history['escort_to_base_list'].append( # I will not let you go baby
(ship_k, rescuer_k, True, 50, 50))
already_escorted_ships.append(ship_k)
# Add the ship pair to the escort-list for a fixed number of steps
is_protected = True
elif min_halite_distance == 2:
friendly_zero_halite_distance_scores = np.copy(
friendly_zero_halite_distances).astype(np.float)
# If there is a single threatening ship: prefer distance 2 ships
# that are less towards that direction and avoid that direction when
# moving to the base
threat_directions = list(
set(NOT_NONE_DIRECTIONS) - set(valid_directions))
avoid_2_step_direction = None
if not None in valid_directions and len(threat_directions) == 1:
single_threat_direction = threat_directions[0]
avoid_2_step_direction = OPPOSITE_MAPPING[single_threat_direction]
for zero_2_id, zero_halite_ship_id in enumerate(
np.argsort(friendly_zero_halite_distances)[:5]):
rescuer_distance = friendly_zero_halite_distances[
zero_halite_ship_id]
if rescuer_distance == 2:
rescuer_row = zero_halite_pos[0][zero_halite_ship_id]
rescuer_col = zero_halite_pos[1][zero_halite_ship_id]
direction_distance = get_directional_distance(
row, col, rescuer_row, rescuer_col, grid_size,
avoid_2_step_direction)
friendly_zero_halite_distance_scores[zero_halite_ship_id] +=(
1e-6*direction_distance)
considered_zero_2_ship_ids = np.argsort(
friendly_zero_halite_distance_scores)[:5]
for zero_halite_ship_id in considered_zero_2_ship_ids:
rescuer_distance = friendly_zero_halite_distances[
zero_halite_ship_id]
if rescuer_distance == 2:
rescuer_row = zero_halite_pos[0][zero_halite_ship_id]
rescuer_col = zero_halite_pos[1][zero_halite_ship_id]
rescuer_k = pos_to_k[rescuer_row*grid_size+rescuer_col]
# Figure out if the chased ship can move to the rescuer
valid_move_dirs = []
for d in valid_directions:
if d is not None:
if HALF_PLANES_RUN[row, col][d][rescuer_row, rescuer_col]:
valid_move_dirs.append(d)
# Plan A: Let the non zero halite ship wait
# (stupid, this never happens - the non zero halite ship can not
# wait, it is being chased)
# if None in valid_directions:
# rescue_dirs = get_dir_from_target(
# rescuer_row, rescuer_col, row, col, grid_size)
# safe_rescue_dirs = set(rescue_dirs) & set(
# all_ship_scores[rescuer_k][6])
# if len(safe_rescue_dirs) > 0:
# rescue_dirs = list(safe_rescue_dirs)
# rescue_dir = np_rng.choice(rescue_dirs)
# rescuer_move_row, rescuer_move_col = move_ship_row_col(
# rescuer_row, rescuer_col, rescue_dir, grid_size)
# move_row = row
# move_col = col
# is_protected = True
# Plan B: Let the zero halite ship wait if there is no halite at
# the considered square, and the chased ship can move to the
# rescuer.
if not is_protected and obs_halite[
rescuer_row, rescuer_col] == 0:
to_rescuer_dirs = get_dir_from_target(
row, col, rescuer_row, rescuer_col, grid_size)
valid_to_rescuer_dirs = list(set(to_rescuer_dirs) & set(
valid_directions))
if len(valid_to_rescuer_dirs) > 0:
if len(valid_to_rescuer_dirs) > 1 and (
avoid_2_step_direction in valid_to_rescuer_dirs) and (
not avoid_2_step_direction is None):
valid_to_rescuer_dirs.remove(avoid_2_step_direction)
to_rescuer_dir = np_rng.choice(valid_to_rescuer_dirs)
move_row, move_col = move_ship_row_col(
row, col, to_rescuer_dir, grid_size)
rescuer_move_row = rescuer_row
rescuer_move_col = rescuer_col
is_protected = True
# Plan C: move towards the rescuer move square and have the
# rescuer move to another neighboring zero halite square
if not is_protected:
safe_zero_halite_squares_dirs = []
for d in NOT_NONE_DIRECTIONS:
if d in all_ship_scores[rescuer_k][6]:
rescue_r, rescue_c = move_ship_row_col(
rescuer_row, rescuer_col, d, grid_size)
if obs_halite[rescue_r, rescue_c] == 0:
safe_zero_halite_squares_dirs.append((
d, rescue_r, rescue_c, DISTANCES[rescue_r, rescue_c][
return_base_row, return_base_col]))
# Order the zero halite move squares by distance to the target
# base
distance_order = np.argsort(np.array([s[3] for s in (
safe_zero_halite_squares_dirs)])).tolist()
safe_zero_halite_squares_dirs_ordered = []
for ordered_id in distance_order:
safe_zero_halite_squares_dirs_ordered.append(
safe_zero_halite_squares_dirs[ordered_id])
for rescue_d, rescue_r, rescue_c, _ in (
safe_zero_halite_squares_dirs_ordered):
# Figure out if the chased ship can move to the new rescuer
# square
valid_to_rescuer_moved_dirs = []
for d in valid_directions:
if d is not None:
move_row, move_col = move_ship_row_col(
row, col, d, grid_size)
if HALF_PLANES_RUN[row, col][d][rescue_r, rescue_c] and (
not (move_row == rescue_r and move_col == rescue_c)):
valid_to_rescuer_moved_dirs.append(d)
if valid_to_rescuer_moved_dirs:
if len(valid_to_rescuer_moved_dirs) > 1 and (
not avoid_2_step_direction is None) and (
avoid_2_step_direction in valid_to_rescuer_moved_dirs):
valid_to_rescuer_moved_dirs.remove(
avoid_2_step_direction)
to_rescuer_dir = np_rng.choice(valid_to_rescuer_moved_dirs)
move_row, move_col = move_ship_row_col(
row, col, to_rescuer_dir, grid_size)
rescuer_move_row = rescue_r
rescuer_move_col = rescue_c
is_protected = True
break
# Plan D: Consider other rescuers up to distance 3 if I
# can't wait at the current/nearby square
# This won't be implemented
if is_protected:
on_rescue_mission[row, col] = 1
on_rescue_mission[rescuer_row, rescuer_col] = 1
recompute_pos = True
rescue_move_positions_taken[
rescuer_move_row, rescuer_move_col] = 1
rescue_move_positions_taken[move_row, move_col] = 1
all_ship_scores[ship_k][0][move_row, move_col] = 1e8
all_ship_scores[rescuer_k][0][
rescuer_move_row, rescuer_move_col] = 1e8
already_escorted_ships.append(ship_k)
break
if not is_protected and len(valid_directions) > 0:
# Only consider zero halite ships in the directions I can move to
# or ships that are about as close as the nearest threatening
# opponent
valid_rescue_mask = np.zeros_like(my_zero_halite_ships)
for d in valid_directions:
valid_rescue_mask[HALF_PLANES_RUN[row, col][d]] = 1
nearest_threat_distance = DISTANCES[row, col][opponent_ships & (
halite_ships < halite_ships[row, col])].min()
nearby_mask = DISTANCES[row, col] <= (nearest_threat_distance+1)
valid_rescue_mask = valid_rescue_mask | nearby_mask
valid_zero_halite_ships = np.copy(my_zero_halite_ships)*(
valid_rescue_mask)
valid_zero_halite_pos = np.where(valid_zero_halite_ships)
valid_friendly_zero_halite_distances = DISTANCES[row, col][
valid_zero_halite_pos]
if valid_zero_halite_pos[0].size:
min_valid_distance = valid_friendly_zero_halite_distances.min()
else:
min_valid_distance = grid_size
if min_valid_distance <= 6:
# Consider rescuing the ship if there is a nearby zero halite ship
# that can move to me and is in a valid direction of the move pos
considered_zero_ship_ids = np.argsort(
valid_friendly_zero_halite_distances)[:5]
for zero_halite_ship_id in considered_zero_ship_ids:
rescuer_distance = valid_friendly_zero_halite_distances[
zero_halite_ship_id]
if rescuer_distance > 2:
rescuer_row = valid_zero_halite_pos[0][zero_halite_ship_id]
rescuer_col = valid_zero_halite_pos[1][zero_halite_ship_id]
rescuer_k = pos_to_k[rescuer_row*grid_size+rescuer_col]
valid_move_dirs = []
for d in valid_directions:
if d is not None:
if HALF_PLANES_RUN[row, col][d][rescuer_row, rescuer_col]:
valid_move_dirs.append(d)
# Break valid move ties using moving towards my weighted bases
# and a region where I have more 0 halite ships and preferably
# a lower die probability while moving along the diagonal
# (empowerment)
if len(valid_move_dirs) > 1:
my_valid_zero_halite_ship_density = smooth2d(
valid_zero_halite_ships, smooth_kernel_dim=8)
move_scores = np.zeros(len(valid_move_dirs))
for i, d in enumerate(valid_move_dirs):
move_row, move_col = move_ship_row_col(
row, col, d, grid_size)
dm = DISTANCE_MASKS[(row, col)]
dir_penalty = ship_scores[14][d] if (
d in ship_scores[14]) else 1
horiz_diff = move_col-rescuer_col
horiz_distance = min(np.abs(horiz_diff),
min(np.abs(horiz_diff-grid_size),
np.abs(horiz_diff+grid_size)))
vert_diff = move_row-rescuer_row
vert_distance = min(np.abs(vert_diff),
min(np.abs(vert_diff-grid_size),
np.abs(vert_diff+grid_size)))
empowerment_bonus = min(
vert_distance, horiz_distance)/2
# import pdb; pdb.set_trace()
move_scores[i] = 0.5*my_valid_zero_halite_ship_density[
move_row, move_col] + empowerment_bonus + (
dm*weighted_base_mask*my_bases).sum() - dir_penalty
move_dir = valid_move_dirs[np.argmax(move_scores)]
else:
if valid_move_dirs:
move_dir = valid_move_dirs[0]
if valid_move_dirs:
move_row, move_col = move_ship_row_col(
row, col, move_dir, grid_size)
# Check if the zero halite ship can move to the move position
rescue_dirs = get_dir_from_target(
rescuer_row, rescuer_col, move_row, move_col, grid_size)
valid_rescue_dirs = [d for d in rescue_dirs if (
d in all_ship_scores[rescuer_k][6])]
rescuer_should_wait = rescuer_distance in [4, 6] and (
obs_halite[rescuer_row, rescuer_col] == 0) and not (
my_bases[rescuer_row, rescuer_col])
if valid_rescue_dirs or rescuer_should_wait or (
rescuer_distance == 3):
if rescuer_should_wait:
# The rescuer should wait on a zero halite square when
# the distance is 4 or 6 and there is no halite at the
# waiting square
rescuer_move_row = rescuer_row
rescuer_move_col = rescuer_col
else:
if rescuer_distance == 3 and len(valid_rescue_dirs) == 0:
valid_rescue_dirs = rescue_dirs
# Both ships should move to each other
# Break rescuer ties using the lower 0 halite opponent
# density and moving along the diagonal (empowerment)
# Strongly prefer zero halite squares when the current
# distance is 4
rescuer_move_scores = np.zeros(len(valid_rescue_dirs))
for i, d in enumerate(valid_rescue_dirs):
rescuer_move_row, rescuer_move_col = move_ship_row_col(
rescuer_row, rescuer_col, d, grid_size)
move_zero_halite_bonus = int((rescuer_distance == 4)*(
obs_halite[rescuer_move_row, rescuer_move_col] == 0))
horiz_diff = rescuer_move_col-move_col
horiz_distance = min(np.abs(horiz_diff),
min(np.abs(horiz_diff-grid_size),
np.abs(horiz_diff+grid_size)))
vert_diff = rescuer_move_row-move_row
vert_distance = min(np.abs(vert_diff),
min(np.abs(vert_diff-grid_size),
np.abs(vert_diff+grid_size)))
empowerment_bonus = min(
vert_distance, horiz_distance)/2
# import pdb; pdb.set_trace()
rescuer_move_scores[i] = (
-opponent_zero_halite_ship_density[
rescuer_move_row, rescuer_move_col]) + (
move_zero_halite_bonus) + empowerment_bonus
rescuer_dir = valid_rescue_dirs[np.argmax(
rescuer_move_scores)]
rescuer_move_row, rescuer_move_col = move_ship_row_col(
rescuer_row, rescuer_col, rescuer_dir, grid_size)
on_rescue_mission[row, col] = 1
on_rescue_mission[rescuer_row, rescuer_col] = 1
recompute_pos = True
rescue_move_positions_taken[move_row, move_col] = 1
all_ship_scores[ship_k][0][move_row, move_col] = 1e8
all_ship_scores[rescuer_k][0][
rescuer_move_row, rescuer_move_col] = 1e8
break
else:
# If I have no valid zero halite ships nearby - prefer moving
# towards my weighted bases and a region where I have more 0
# halite ships and preferably a lower die probability
if len(valid_directions) > 1:
my_valid_zero_halite_ship_density = smooth2d(
valid_zero_halite_ships, smooth_kernel_dim=8)
move_scores = np.zeros(len(valid_directions))
for i, d in enumerate(valid_directions):
move_row, move_col = move_ship_row_col(
row, col, d, grid_size)
dm = DISTANCE_MASKS[(row, col)]
dir_penalty = ship_scores[14][d] if d in ship_scores[14] else 1
move_scores[i] = 0.5*my_valid_zero_halite_ship_density[
move_row, move_col] + (dm*weighted_base_mask*my_bases).sum(
) - dir_penalty
move_dir = valid_directions[np.argmax(move_scores)]
move_row, move_col = move_ship_row_col(
row, col, move_dir, grid_size)
rescue_move_positions_taken[move_row, move_col] = 1
all_ship_scores[ship_k][0][move_row, move_col] = 1e4
else:
move_dir = valid_directions[0]
# Slightly incentivize the nearest zero halite ship to move
# towards my move square (does not have to be in my valid move
# direction mask)
if zero_halite_pos[0].size:
move_row, move_col = move_ship_row_col(
row, col, move_dir, grid_size)
my_zero_halite_ships_move_distances = DISTANCES[
move_row, move_col][my_zero_halite_ships]
nearest_halite_id = np.argmin(
my_zero_halite_ships_move_distances)
rescuer_row = zero_halite_pos[0][nearest_halite_id]
rescuer_col = zero_halite_pos[1][nearest_halite_id]
rescuer_k = pos_to_k[rescuer_row*grid_size+rescuer_col]
increase_mask = get_mask_between_exclude_ends(
move_row, move_col, rescuer_row, rescuer_col, grid_size)
for score_id in range(3):
all_ship_scores[rescuer_k][score_id][increase_mask] += 1e2
if recompute_pos:
my_zero_halite_ships &= (~on_rescue_mission)
zero_halite_pos = np.where(my_zero_halite_ships)
# Escort previously chased ships to the base - only stop escorting once I
# have two valid actions towards the base or have one valid action and am
# in the same row/col
new_escort_list = []
added_escorted_ships = []
for (ship_k, rescuer_k, rescue_executed, min_escort_steps_remaining,
max_escort_steps_remaining) in history['escort_to_base_list']:
if ship_k in player_obs[2] and (rescuer_k in player_obs[2]):
row, col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
dm = DISTANCE_MASKS[(row, col)]
base_scores = dm*weighted_base_mask*my_bases
# target_base = np.where(base_scores == base_scores.max())
# target_base = (target_base[0][0], target_base[1][0])
ship_scores = all_ship_scores[ship_k]
valid_directions = valid_directions if len(ship_scores[8]) == 0 else (
ship_scores[17]) # Also consider bad N-step directions
base_return_scores = np.zeros(num_considered_bases)
for base_id in range(num_considered_bases):
base_row = my_base_locations[0][base_id]
base_col = my_base_locations[1][base_id]
base_distance = base_distances[base_id][row, col]
relative_pos = get_relative_position(
row, col, base_row, base_col, grid_size)
mid_row = int(row + relative_pos[0]/2) % grid_size
mid_col = int(col + relative_pos[1]/2) % grid_size
# mid_row = int(np.where(
# np.abs(base_row-row) <= (grid_size // 2),
# np.round((base_row*(1-1e-9)+row*(1+1e-9))/2),
# np.where(base_row*(1-1e-9)+row*(1+1e-9) >= grid_size,
# np.round(
# (base_row*(1-1e-9)+row*(1+1e-9)-grid_size)/2),
# np.mod(np.round(
# (base_row*(1-1e-9)+row*(1+1e-9)+grid_size)/2),
# grid_size))
# ))
# mid_col = int(np.where(
# np.abs(base_col-col) <= (grid_size // 2),
# np.round((base_col*(1-1e-9)+col*(1+1e-9))/2),
# np.where(base_col*(1-1e-9)+col*(1+1e-9) >= grid_size,
# np.round(
# (base_col*(1-1e-9)+col*(1+1e-9)-grid_size)/2),
# np.mod(np.round(
# (base_col*(1-1e-9)+col*(1+1e-9)+grid_size)/2),
# grid_size))
# ))
return_base_directions = get_dir_from_target(
row, col, base_row, base_col, grid_size)
can_move_to_base = False
for d in return_base_directions:
if d in valid_directions:
can_move_to_base = True
break
# base_return_scores[base_id] = player_influence_maps[
# 0, mid_row, mid_col] - (player_influence_maps[
# 1:, mid_row, mid_col].sum() + base_distance/3)
base_return_scores[base_id] = player_influence_maps[
0, mid_row, mid_col] - (player_influence_maps[
1:, mid_row, mid_col].sum() + base_distance/1.5) + 2*int(
can_move_to_base)
best_base_id = np.argmax(base_return_scores)
return_base_row = my_base_locations[0][best_base_id]
return_base_col = my_base_locations[1][best_base_id]
target_base = (return_base_row, return_base_col)
base_distances_ship = base_distances[:, row, col]
nearest_base_id = np.argmin(base_distances_ship)
abort_rescue = base_distances_ship[nearest_base_id] <= 2
# main_base_row = main_base_location[0]
# main_base_col = main_base_location[1]
# return_main_base_distance = main_base_distances[row, col]
# # Returning to a base that is not the main base
# if base_distances_ship[nearest_base_id] < (
# return_main_base_distance-1):
# return_base_row = my_base_locations[0][nearest_base_id]
# return_base_col = my_base_locations[1][nearest_base_id]
# else:
# return_base_row = main_base_row
# return_base_col = main_base_col
# if observation['step'] == 278:
# import pdb; pdb.set_trace()
return_base_directions = get_dir_from_target(
row, col, return_base_row, return_base_col, grid_size)
one_step_invalid = list(set(NOT_NONE_DIRECTIONS).difference(set(
all_ship_scores[ship_k][9])))
not_good_dirs = list(set(all_ship_scores[ship_k][7] + all_ship_scores[
ship_k][8] + one_step_invalid))
base_return_good_dirs = np.array([not d in not_good_dirs for d in (
return_base_directions)])
num_good_base_return_dirs = base_return_good_dirs.sum()
if not rescue_executed and base_scores.max() > 0 and (
not ship_k in already_escorted_ships) and not (
ship_k in added_escorted_ships):
# print("ENTERING THE ESCORT TO BASE BRANCH")
# Jointly move to the nearest weighted base and assume that the
# former zero halite position won't be attacked
rescuer_row, rescuer_col = row_col_from_square_grid_pos(
player_obs[2][rescuer_k][0], grid_size)
# Abort the rescue operation when the rescuing ship has gobbled up
# another non-zero halite ship or if both are no longer next to
# each other
abort_rescue = (DISTANCES[row, col][rescuer_row, rescuer_col] > 1) or (
halite_ships[rescuer_row, rescuer_col] > 0)
# Abort once I have two valid actions towards the base or have one
# valid action and am in the same row/col
# print(row, col, min_escort_steps_remaining, observation['step'])
if min_escort_steps_remaining == 0:
abort_rescue = abort_rescue or (num_good_base_return_dirs == 2)
if not abort_rescue and num_good_base_return_dirs == 1:
# import pdb; pdb.set_trace()
abort_rescue = (row == return_base_row) or (
col == return_base_col)
# Abort if the ship can safely return to a base
abort_rescue = abort_rescue or (
halite_ships[row, col] < safe_to_return_halites[row, col])
if not abort_rescue:
already_escorted_ships.append(ship_k)
on_rescue_mission[row, col] = 1
on_rescue_mission[rescuer_row, rescuer_col] = 1
to_rescuer_dir = get_dir_from_target(
row, col, rescuer_row, rescuer_col, grid_size)[0]
ship_k_not_immediate_bad = list(
set(all_ship_scores[ship_k][6]).union(
set(all_ship_scores[ship_k][17])))
aligned_dirs = list(set(ship_k_not_immediate_bad) & set(
all_ship_scores[rescuer_k][6]) & set(return_base_directions))
# if observation['step'] == 231:
# import pdb; pdb.set_trace()
if len(aligned_dirs) == 0:
# Don't risk moving onto the previous step zero halite square if a
# potential opponent has a considerable probability of engaging in
# 1-step risky actions
risk_score = 0
for d in NOT_NONE_DIRECTIONS:
neighbor_rescue_row, neighbor_rescue_col = move_ship_row_col(
rescuer_row, rescuer_col, d, grid_size)
if opponent_zero_halite_ships[
neighbor_rescue_row, neighbor_rescue_col]:
opponent_id = np.where(stacked_ships[
:, neighbor_rescue_row, neighbor_rescue_col])[0][0]
risk_score += history['zero_halite_move_behavior'][
opponent_id]['False_0']
shared_escape_dirs = list(set(ship_k_not_immediate_bad) & set(
all_ship_scores[rescuer_k][6]))
if risk_score > max_1_step_rescue_risk and len(
shared_escape_dirs) > 0 and len(aligned_dirs) == 0:
# print("Avoiding risky rescue move", observation['step'], row,
# col)
aligned_dirs = shared_escape_dirs
override_override_mask = False
if (to_rescuer_dir not in ship_k_not_immediate_bad) or (
len(aligned_dirs) > 0):
# It is probably worth the risk when I am rescuing a ship in
# trouble
# It is better to take a safe step with both ships if that option
# is available
# import pdb; pdb.set_trace()
if len(aligned_dirs) == 0 and not to_rescuer_dir in (
all_ship_scores[ship_k][6]):
all_ship_scores[ship_k][6].append(to_rescuer_dir)
else:
if len(aligned_dirs) > 1:
str_aligned_dirs = [d if d is not None else "None" for d in (
aligned_dirs)]
str_aligned_dirs.sort() # Set intersect is flaky
aligned_dirs = [d if d != "None" else None for d in (
str_aligned_dirs)]
override_override_mask = True
if override_override_mask:
aligned_dir = aligned_dirs[0]
rescuer_move_row, rescuer_move_col = move_ship_row_col(
rescuer_row, rescuer_col, aligned_dir, grid_size)
rescue_move_positions_taken[rescuer_move_row, rescuer_move_col] = 1
for score_id in range(3):
all_ship_scores[rescuer_k][score_id][
rescuer_move_row, rescuer_move_col] += 1e4
if not aligned_dir in all_ship_scores[ship_k][6]:
all_ship_scores[ship_k][6].append(aligned_dir)
escape_row, escape_col = move_ship_row_col(
row, col, aligned_dir, grid_size)
else:
# Consider escaping along the opposite side of the board if one
# side is much more congested
relative_pos = get_relative_position(
rescuer_row, rescuer_col, target_base[0], target_base[1],
grid_size)
mid_path_row_short = int(
rescuer_row + relative_pos[0]/2) % grid_size
mid_path_col_short = int(
rescuer_col + relative_pos[1]/2) % grid_size
mid_path_row_long = int(
rescuer_row - (grid_size - relative_pos[0])/2) % grid_size
mid_path_col_long = int(
rescuer_col - (grid_size - relative_pos[1])/2) % grid_size
start_rescuer_row = rescuer_row
if np.abs(relative_pos[0]) > 7:
# Determine start_rescuer_row
mid_density_short_row = player_influence_maps[
0, mid_path_row_short, mid_path_col_short] - (
player_influence_maps[
1:, mid_path_row_short, mid_path_col_short]).sum()
mid_density_long_row = player_influence_maps[
0, mid_path_row_long, mid_path_col_short] - (
player_influence_maps[
1:, mid_path_row_long, mid_path_col_short]).sum()
distance_diff_short = grid_size-2*np.abs(relative_pos[0])
if mid_density_long_row - mid_density_short_row > (
distance_diff_short/2):
# import pdb; pdb.set_trace()
start_rescuer_row = mid_path_row_long
start_rescuer_col = rescuer_col
if np.abs(relative_pos[1]) > 7:
# Determine start_rescuer_col
mid_density_short_col = player_influence_maps[
0, mid_path_row_short, mid_path_col_short] - (
player_influence_maps[
1:, mid_path_row_short, mid_path_col_short]).sum()
mid_density_long_col = player_influence_maps[
0, mid_path_row_short, mid_path_col_long] - (
player_influence_maps[
1:, mid_path_row_short, mid_path_col_long]).sum()
distance_diff_short = grid_size-2*np.abs(relative_pos[1])
if mid_density_long_col - mid_density_short_col > (
distance_diff_short):
# import pdb; pdb.set_trace()
start_rescuer_col = mid_path_col_long
increase_mask = get_mask_between_exclude_ends(
target_base[0], target_base[1], start_rescuer_row,
start_rescuer_col, grid_size)
# Avoid the rescued ship half plane if that leaves the rescuer with
# options (that way the rescue mission makes progress)
to_base_dirs = get_dir_from_target(
start_rescuer_row, start_rescuer_col, target_base[0],
target_base[1], grid_size)
valid_to_base_dirs = list(set(to_base_dirs) & set(
all_ship_scores[rescuer_k][6]))
if len(valid_to_base_dirs) > 1:
to_rescued_dir = OPPOSITE_MAPPING[to_rescuer_dir]
if to_rescued_dir in all_ship_scores[rescuer_k][6]:
# import pdb; pdb.set_trace()
increase_mask[HALF_PLANES_CATCH[
start_rescuer_row, start_rescuer_col][to_rescued_dir]] = 0
if len(all_ship_scores[rescuer_k][6]) == 1 and (
all_ship_scores[rescuer_k][6][0] is None):
# Take a move risk rather than staying still - otherwise the
# non zero halite ship is likely to get orphaned
# import pdb; pdb.set_trace()
risk_scores = np.zeros(len(to_base_dirs))
for risk_id, d in enumerate(to_base_dirs):
move_row, move_col = move_ship_row_col(
rescuer_row, rescuer_col, d, grid_size)
for potential_threat_dir in MOVE_DIRECTIONS:
threat_row, threat_col = move_ship_row_col(
move_row, move_col, potential_threat_dir, grid_size)
if opponent_ships[threat_row, threat_col] and halite_ships[
threat_row, threat_col] == 0:
opponent_id = player_ids[threat_row, threat_col]
is_near_base = nearest_base_distances[
threat_row, threat_col] <= config[
'log_near_base_distance']
distance = int(d is not None) + int(
potential_threat_dir is not None)
risk_lookup_k = str(is_near_base) + '_' + str(distance)
risk_scores[risk_id] = max(
risk_scores[risk_id], history[
'zero_halite_move_behavior'][opponent_id][
risk_lookup_k])
best_risk_score = risk_scores.min()
if best_risk_score < 0.15:
print("Risky escort move with boxed in zero halite ship",
observation['step'], row, col)
# import pdb; pdb.set_trace()
rescuer_selected_move_dir = to_base_dirs[
np.argmin(risk_scores)]
increase_mask = np.zeros_like(increase_mask)
rescue_move_position = move_ship_row_col(
rescuer_row, rescuer_col, rescuer_selected_move_dir,
grid_size)
all_ship_scores[rescuer_k][0][rescue_move_position] += 3e5
all_ship_scores[rescuer_k][6].remove(None)
all_ship_scores[rescuer_k][6].append(rescuer_selected_move_dir)
for score_id in range(3):
all_ship_scores[rescuer_k][score_id][increase_mask] += 1e4
escape_row = rescuer_row
escape_col = rescuer_col
all_ship_scores[rescuer_k][3][:] -= 1e4 # No attack during rescue
all_ship_scores[ship_k][0][escape_row, escape_col] = 1e8
rescue_move_positions_taken[escape_row, escape_col] = 1
if not abort_rescue:
if min_escort_steps_remaining > 1:
if not ship_k in added_escorted_ships:
added_escorted_ships.append(ship_k)
new_escort_list.append((ship_k, rescuer_k, False,
min_escort_steps_remaining-1,
max_escort_steps_remaining-1))
elif (max_escort_steps_remaining > 1) and not (
ship_k in added_escorted_ships):
# can_not_return_base = (halite_ships[row, col] > 0) and (
# len(set(return_base_directions) & set(
# all_ship_scores[ship_k][6])) == 0 or (
# len(set(return_base_directions) & set(
# all_ship_scores[ship_k][7])) == num_return_directions) or (
# len(set(return_base_directions) & set(
# all_ship_scores[ship_k][8])) == num_return_directions))
added_escorted_ships.append(ship_k)
# can_not_return_base = (halite_ships[row, col] > 0) and (
# len(set(return_base_directions) & set(
# all_ship_scores[ship_k][6])) == 0 or (
# np.all(np.array(base_return_not_good_dirs))))
# Decide on the next step if we should abort escorting the ship
new_escort_list.append(
(ship_k, rescuer_k, False, 0, max_escort_steps_remaining-1))
history['escort_to_base_list'] = new_escort_list
# print(observation['step'], history['escort_to_base_list'])
return (all_ship_scores, on_rescue_mission, rescue_move_positions_taken,
history)
def update_scores_victory_formation(
all_ship_scores, config, env_config, stacked_ships, observation,
halite_ships, steps_remaining, obs_halite, player_obs,
main_base_distances):
# Compute the current approximate player score
scores = np.array(
[rbs[0] for rbs in observation['rewards_bases_ships']])
halite_cargos = np.array(
[rbs[3].sum() for rbs in observation['rewards_bases_ships']])
halite_ships = np.stack([
rbs[3] for rbs in observation['rewards_bases_ships']]).sum(0)
halite_ships[stacked_ships.sum(0) == 0] = -1e-9
my_bases = observation['rewards_bases_ships'][0][1]
grid_size = halite_ships.shape[0]
ship_counts = stacked_ships.sum((1, 2))
all_ship_count = ship_counts.sum()
obs_halite_sum = observation['halite'].sum()
ship_value = min(env_config.spawnCost,
steps_remaining*obs_halite_sum**0.6/(
all_ship_count+1e-9))
current_scores = scores+halite_cargos+ship_value*ship_counts
min_advantage = current_scores[0] - current_scores[1:].max()
# Obtain the TOM pattern location
top_row = 8
left_col = 1
target_pattern = np.zeros((grid_size, grid_size), dtype=np.bool)
# T of TOM
target_pattern[top_row, left_col:(left_col+5)] = 1
target_pattern[top_row:(top_row+4), left_col+2] = 1
# O of TOM
target_pattern[top_row, (left_col+7):(left_col+9)] = 1
target_pattern[top_row+3, (left_col+7):(left_col+9)] = 1
target_pattern[(top_row+1):(top_row+3), left_col+6] = 1
target_pattern[(top_row+1):(top_row+3), left_col+9] = 1
# M of TOM
target_pattern[top_row+3, left_col+11] = 1
target_pattern[top_row+2, left_col+12] = 1
target_pattern[top_row+1, left_col+13] = 1
target_pattern[top_row+2, left_col+14] = 1
target_pattern[top_row+1, left_col+15] = 1
target_pattern[top_row+2, left_col+16] = 1
target_pattern[top_row+3, left_col+17] = 1
# First compute if we can afford a victory formation
uncovered_pattern = target_pattern & ~(my_bases)
num_uncovered_squares = uncovered_pattern.sum()
min_required_for_formation = (
steps_remaining*0.2+10+num_uncovered_squares)*(
env_config.spawnCost + ship_value)
player_ship_counts = stacked_ships.sum((1, 2))
ship_advantage = player_ship_counts[0] - player_ship_counts[1:].max()
available_ships_formation = np.copy(stacked_ships[0])
can_form_victory_formation = min_advantage > min_required_for_formation and (
steps_remaining < 20) and (ship_advantage > num_uncovered_squares) and (
num_uncovered_squares > 0)
if can_form_victory_formation:
my_ship_pos_to_k = {v[0]: k for k, v in player_obs[2].items()}
uncovered_positions = np.where(uncovered_pattern)
for uncovered_id in range(num_uncovered_squares):
pattern_row = uncovered_positions[0][uncovered_id]
pattern_col = uncovered_positions[1][uncovered_id]
my_pattern_distances = DISTANCES[pattern_row, pattern_col] + 100*(
~available_ships_formation)
best_target_match = np.where(
my_pattern_distances == my_pattern_distances.min())
ship_row = best_target_match[0][0]
ship_col = best_target_match[1][0]
ship_k = my_ship_pos_to_k[ship_row*grid_size+ship_col]
# import pdb; pdb.set_trace()
all_ship_scores[ship_k][0][pattern_row, pattern_col] += 1e12
all_ship_scores[ship_k][2][pattern_row, pattern_col] = 1e13
available_ships_formation[ship_row, ship_col] = 0
# for ship_k in all_ship_scores:
# row, col = row_col_from_square_grid_pos(
# player_obs[2][ship_k][0], grid_size)
# ship_halite = halite_ships[row, col]
# # Greedily assign ships to convert at each of the uncovered positions in
# # the target pattern
# for j in [0, 1, 2, 3]:
# all_ship_scores[ship_k][j][:] = -1e6
# dance_scores = 1e12*(v_pattern*(
# DISTANCE_MASKS[(row, col)] ** (1/np.sqrt(steps_remaining+1)))*(
# 1.08**main_base_distances) - 0.2*(
# my_zero_halite_ship_density))
# if obs_halite[row, col] > 0:
# dance_scores[row, col] = -1e6
# best_dance_score = dance_scores.max()
# best_squares = np.where(dance_scores == best_dance_score)
# best_row = best_squares[0][0]
# best_col = best_squares[1][0]
# v_pattern[best_row, best_col] = 0
# all_ship_scores[ship_k][0][best_row, best_col] = best_dance_score
return all_ship_scores, can_form_victory_formation
def get_ship_plans(
config, observation, player_obs, env_config, verbose, all_ship_scores,
np_rng, weighted_base_mask, steps_remaining,
opponent_ships_sensible_actions, opponent_ships_scaled,
main_base_distances, history, env_observation, player_influence_maps,
ignore_convert_positions, ship_diff_smoothed, safe_to_return_halites,
safe_collect_margin, always_attack_opponent_id,
likely_convert_opponent_positions, possible_convert_opponent_positions,
my_current_base_distances, nearest_base_distances,
convert_first_ship_on_None_action=True, halite_on_board_mult=1e-6):
ship_plans_start_time = time.time()
all_my_bases = copy.copy(observation['rewards_bases_ships'][0][1])
my_considered_bases = copy.copy(observation['rewards_bases_ships'][0][1])
all_base_pos = np.where(all_my_bases)
my_abandoned_base_count = len(
history['my_base_not_attacked_positions'])
if history['my_base_not_attacked_positions']:
abandoned_rows, abandoned_cols = zip(*history[
'my_base_not_attacked_positions'])
abandoned_base_pos = (np.array(abandoned_rows), np.array(abandoned_cols))
else:
abandoned_base_pos = ()
# Exclude bases that are persistently camped by opponents
for base_pos in history['my_base_not_attacked_positions']:
my_considered_bases[base_pos] = 0
opponent_bases = np.stack(
[rbs[1] for rbs in observation['rewards_bases_ships'][1:]]).sum(0) > 0
can_deposit_halite = my_considered_bases.sum() > 0
stacked_ships = np.stack(
[rbs[2] for rbs in observation['rewards_bases_ships']])
all_ships = stacked_ships.sum(0) > 0
my_ships = stacked_ships[0]
opponent_ships = stacked_ships[1:].sum(0) > 0
halite_ships = np.stack([
rbs[3] for rbs in observation['rewards_bases_ships']]).sum(0)
halite_ships[~all_ships] = -1e-9
grid_size = observation['halite'].shape[0]
ship_ids = list(player_obs[2])
my_ship_count = len(player_obs[2])
my_non_converted_ship_count = my_ship_count
convert_cost = env_config.convertCost
obs_halite = np.maximum(0, observation['halite'])
collect_rate = env_config.collectRate
spawn_cost = env_config.spawnCost
num_bases = my_considered_bases.sum()
non_abandoned_base_pos = np.where(my_considered_bases)
new_bases = []
base_attackers = {}
max_attackers_per_base = config['max_attackers_per_base']
camping_ships_strategy = history['camping_ships_strategy']
my_nearest_ship_distances_raw = []
opponent_nearest_ship_distances = [1e6*np.ones((grid_size, grid_size))]
my_ship_pos = np.where(my_ships)
opponent_ship_pos = np.where(opponent_ships)
if my_ships.sum():
for ship_id in range(my_ship_pos[0].size):
row = my_ship_pos[0][ship_id]
col = my_ship_pos[1][ship_id]
my_nearest_ship_distances_raw.append(DISTANCES[row, col] + (
halite_on_board_mult*halite_ships[row, col]))
my_nearest_ship_distances_raw.append(1e6*np.ones((grid_size, grid_size)))
if opponent_ships.sum():
for ship_id in range(opponent_ship_pos[0].size):
row = opponent_ship_pos[0][ship_id]
col = opponent_ship_pos[1][ship_id]
opponent_nearest_ship_distances.append(DISTANCES[row, col] + (
halite_on_board_mult*halite_ships[row, col]))
my_nearest_ship_distances = np.stack(my_nearest_ship_distances_raw).min(0)
opponent_nearest_ship_distances = np.stack(
opponent_nearest_ship_distances).min(0)
# if observation['step'] == 131:
# import pdb; pdb.set_trace()
# Update ship scores to make sure that the plan does not contradict with
# invalid actions when the plan is executed (ship_plans_to_actions)
for ship_k in all_ship_scores:
# if observation['step'] == 360 and ship_k == '134-1':
# import pdb; pdb.set_trace()
bad_directions = list(set(MOVE_DIRECTIONS) - set(
all_ship_scores[ship_k][6]))
row, col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
if max_attackers_per_base <= 0:
all_ship_scores[ship_k][3][:] = -1e12
if len(bad_directions) < len(MOVE_DIRECTIONS) and not all_ship_scores[
ship_k][13] and not (
all_ship_scores[ship_k][12] and steps_remaining == 1):
for d in bad_directions:
mask_avoid = np.copy(HALF_PLANES_RUN[(row, col)][d])
# Don't punish entire half plains when there is only a single bad
# direction. > 20 hack to avoid blocking my own base early on
if len(bad_directions) == 1 and observation['step'] > 20:
# FUTURE WORK: should we do this only for base return? Should we make
# this even less strict?
if d in [NORTH, SOUTH]:
mask_avoid &= COLUMN_MASK[col]
elif d in [EAST, WEST]:
mask_avoid &= ROW_MASK[row]
if d is not None:
mask_avoid[row, col] = False
for i in range(3):
all_ship_scores[ship_k][i][mask_avoid] -= 1e5
if d is None and num_bases == 0:
# Don't suppress conversion at the current square for the first or
# reconstructed base
all_ship_scores[ship_k][2][mask_avoid] += 1e5
if d not in all_ship_scores[ship_k][10]:
move_row, move_col = move_ship_row_col(row, col, d, grid_size)
if opponent_bases.sum() > 0 and DISTANCES[(move_row, move_col)][
opponent_bases].min() > 1:
if not d in all_ship_scores[ship_k][9]:
all_ship_scores[ship_k][3][mask_avoid] -= 1e5
# if observation['step'] == 131:
# import pdb; pdb.set_trace()
# Decide whether to build a new base after my last base has been destroyed.
# A camped base where I do not consider attacking the campers is also
# considered destroyed
should_spawn_base_next_step = None
if num_bases == 0 and my_ship_count > 1:
requested_save_conversion_budget = 0
all_ship_scores, can_deposit_halite, restored_base_pos = (
consider_restoring_base(
observation, env_config, all_ship_scores, player_obs, convert_cost,
np_rng, history))
num_restored_or_added_bases = int(can_deposit_halite)
convert_unavailable_positions = np.zeros(
(grid_size, grid_size), dtype=np.bool)
if can_deposit_halite:
non_abandoned_base_pos = (
np.array([restored_base_pos[0]]), np.array([restored_base_pos[1]]))
restored_or_added_base_pos = restored_base_pos
my_considered_bases[restored_base_pos] = 1
else:
can_deposit_halite = num_bases > 0
# Strategically add bases
if num_bases > 0 and my_ship_count > 2:
(all_ship_scores, base_added, added_base_pos,
requested_save_conversion_budget, convert_unavailable_positions,
should_spawn_base_next_step) = consider_adding_strategic_bases(
config, observation, env_config, all_ship_scores, player_obs,
convert_cost, np_rng, history, player_influence_maps, obs_halite,
non_abandoned_base_pos, all_base_pos, halite_ships,
my_nearest_ship_distances, my_nearest_ship_distances_raw,
opponent_nearest_ship_distances)
num_restored_or_added_bases = int(base_added)
num_bases += num_restored_or_added_bases
if base_added:
non_abandoned_base_pos = (
np.array(non_abandoned_base_pos[0].tolist() + [added_base_pos[0]]),
np.array(non_abandoned_base_pos[1].tolist() + [added_base_pos[1]])
)
restored_or_added_base_pos = added_base_pos
else:
num_restored_or_added_bases = 0
requested_save_conversion_budget = 0
convert_unavailable_positions = np.zeros(
(grid_size, grid_size), dtype=np.bool)
# if observation['step'] == 360:
# import pdb; pdb.set_trace()
# Decide to redirect ships to the base to avoid the main and potential other
# strategic bases being destroyed by opposing ships
defend_base_ignore_collision_key = None
ignore_base_collision_ship_keys = []
should_defend = (my_ship_count-num_restored_or_added_bases) > min(
4, 2 + steps_remaining/5)
remaining_defend_base_budget = max(0, min([7, int((my_ship_count-2)/2.5)]))
base_override_move_positions = history['base_camping_override_positions']
my_defended_abandoned_bases = np.zeros((grid_size, grid_size), dtype=np.bool)
defend_base_ignore_collision_keys = []
bases_protected = {}
all_ignore_base_collision_ship_keys = []
this_step_base_defense_keys = []
my_defend_base_ship_positions = np.zeros(
(grid_size, grid_size), dtype=np.bool)
my_considered_bases_rescue_mission = np.copy(my_considered_bases)
my_considered_bases_rescue_mission &= (
my_nearest_ship_distances <= opponent_nearest_ship_distances)
if num_restored_or_added_bases > 0:
# The converted ship can not be used to defend the newly created base
my_defend_base_ship_positions[restored_or_added_base_pos] = 1
early_hunting_season = observation['relative_step'] >= config[
'start_hunting_season_relative_step'] and observation[
'relative_step'] <= config[
'early_hunting_season_less_collect_relative_step'] and (
obs_halite.sum() < config['min_halite_to_stop_early_hunt']) and (
not history['early_hunting_season_ended'])
late_hunting_season = observation['relative_step'] > config[
'late_hunting_season_more_collect_relative_step']
if (num_bases >= 1 or my_abandoned_base_count > 0) and should_defend:
if abandoned_base_pos:
can_defend_abandoned = my_nearest_ship_distances[abandoned_base_pos] <= (
opponent_nearest_ship_distances[abandoned_base_pos])
num_can_defend_abandoned = can_defend_abandoned.sum()
else:
num_can_defend_abandoned = 0
defend_desirabilities = player_influence_maps[0] - player_influence_maps[
1:].max(0)
abandoned_defend_desirabilities = defend_desirabilities[abandoned_base_pos]
# First, consider non abandoned bases - prefer bases where I have a
# relatively high number of ships (same logic to compute the main base)
can_defend_not_abandoned = my_nearest_ship_distances[
non_abandoned_base_pos] <= opponent_nearest_ship_distances[
non_abandoned_base_pos]
non_abandoned_defend_desirabilities = ship_diff_smoothed[
non_abandoned_base_pos] + 100*can_defend_not_abandoned
defend_priority_ids = np.argsort(-non_abandoned_defend_desirabilities)
base_locations_defense_budget = []
num_non_abandoned = defend_priority_ids.size
if num_non_abandoned > 0:
non_abandoned_min_defenders = min(3, (
remaining_defend_base_budget-num_can_defend_abandoned)//(
num_non_abandoned))
if (num_non_abandoned + num_can_defend_abandoned) == 1:
main_base_defense_budget = 3
else:
if remaining_defend_base_budget <= (
num_non_abandoned + num_can_defend_abandoned):
main_base_defense_budget = 1
else:
main_base_defense_budget = 2
if not early_hunting_season and observation['relative_step'] >= config[
'start_hunting_season_relative_step']:
# Once we trigger the condition once, the early hunting season remains
# over
history['early_hunting_season_ended'] = True
# Defend with less passion if my bases were not destroyed before
if history['num_destroyed_bases'] == 0:
non_abandoned_min_defenders = 1
main_base_defense_budget = min(
main_base_defense_budget, 1 + int(
num_non_abandoned == 1 or early_hunting_season))
elif history['num_destroyed_bases'] == 1:
non_abandoned_min_defenders = min(non_abandoned_min_defenders, 2)
main_base_defense_budget = min(main_base_defense_budget, 2)
for defend_id, defend_priority_id in enumerate(defend_priority_ids):
base_max_defenders = main_base_defense_budget if defend_id == 0 else 1
num_defenders = max(non_abandoned_min_defenders, min(
remaining_defend_base_budget, base_max_defenders))
if num_defenders > 0 and (
defend_id == 0 or can_defend_not_abandoned[defend_priority_id]):
remaining_defend_base_budget -= num_defenders
base_locations_defense_budget.append((
non_abandoned_base_pos[0][defend_priority_id],
non_abandoned_base_pos[1][defend_priority_id],
num_defenders))
# First consider the non main, non abandoned bases, then the main base and
# finally the abandoned bases
base_locations_defense_budget = base_locations_defense_budget[::-1]
if abandoned_base_pos and can_defend_abandoned.sum() and (
remaining_defend_base_budget > 0):
# Minimally (1 ship) defend at most N of the abandoned bases - prefer
# squares where I have a relatively high influence
abandoned_defense_scores = -50+100*can_defend_abandoned + (
abandoned_defend_desirabilities)
abandoned_defend_priority_ids = np.argsort(-abandoned_defense_scores)
max_abandoned_defenses = 3
for abandoned_defend_priority_id in abandoned_defend_priority_ids:
num_defenders = min(
remaining_defend_base_budget,
int(can_defend_abandoned[abandoned_defend_priority_id] and (
max_abandoned_defenses > 0)))
if num_defenders > 0:
abandoned_base_row = abandoned_base_pos[
0][abandoned_defend_priority_id]
abandoned_base_col = abandoned_base_pos[
1][abandoned_defend_priority_id]
my_defended_abandoned_bases[
abandoned_base_row, abandoned_base_col] = 1
max_abandoned_defenses -= 1
remaining_defend_base_budget -= num_defenders
base_locations_defense_budget.append(
(abandoned_base_row, abandoned_base_col, num_defenders))
# print(observation['step'], base_locations_defense_budget)
# if observation['step'] == 360:
# import pdb; pdb.set_trace()
for base_row, base_col, num_defenders in base_locations_defense_budget:
defend_base_pos = (base_row, base_col)
(all_ship_scores, defend_base_ignore_collision_key, base_protected,
ignore_base_collision_ship_keys, defend_base_ship_positions_base,
base_override_move_positions, base_defense_keys) = protect_base(
observation, env_config, all_ship_scores, player_obs, defend_base_pos,
history, base_override_move_positions, my_defend_base_ship_positions,
max_considered_attackers=num_defenders)
defend_base_ignore_collision_keys.append(
defend_base_ignore_collision_key)
bases_protected[defend_base_pos] = base_protected
all_ignore_base_collision_ship_keys += ignore_base_collision_ship_keys
this_step_base_defense_keys += base_defense_keys
my_defend_base_ship_positions |= defend_base_ship_positions_base
else:
# main_base_protected = True
my_defend_base_ship_positions = np.zeros(
(grid_size, grid_size), dtype=np.bool)
num_non_abandoned = 0
# if observation['step'] == 360:
# import pdb; pdb.set_trace()
# Decide on redirecting ships to friendly ships that are boxed in/chased and
# can not return to any of my bases
if (num_bases - num_restored_or_added_bases) > 0:
(all_ship_scores, on_rescue_mission, rescue_move_positions_taken,
history) = update_scores_rescue_missions(
config, all_ship_scores, stacked_ships, observation, halite_ships,
steps_remaining, player_obs, obs_halite, history,
opponent_ships_sensible_actions, weighted_base_mask,
my_considered_bases_rescue_mission, np_rng, main_base_distances,
my_defend_base_ship_positions, safe_to_return_halites,
player_influence_maps, nearest_base_distances)
else:
on_rescue_mission = np.zeros((grid_size, grid_size), dtype=np.bool)
rescue_move_positions_taken = np.zeros(
(grid_size, grid_size), dtype=np.bool)
override_move_squares_taken = rescue_move_positions_taken | (
base_override_move_positions)
# if observation['step'] == 188:
# import pdb; pdb.set_trace()
# Coordinate box in actions of opponent more halite ships
box_start_time = time.time()
if main_base_distances.max() > 0:
(all_ship_scores, boxing_in_mission, box_opponent_targets,
override_move_squares_taken,
ships_on_box_mission) = update_scores_opponent_boxing_in(
all_ship_scores, stacked_ships, observation, env_config,
opponent_ships_sensible_actions, halite_ships, steps_remaining,
player_obs, np_rng, opponent_ships_scaled, collect_rate, obs_halite,
main_base_distances, history, on_rescue_mission,
my_defend_base_ship_positions, env_observation, player_influence_maps,
override_move_squares_taken, ignore_convert_positions,
convert_unavailable_positions, always_attack_opponent_id,
num_non_abandoned, likely_convert_opponent_positions,
possible_convert_opponent_positions, my_current_base_distances)
else:
boxing_in_mission = np.zeros((grid_size, grid_size), dtype=np.bool)
box_opponent_targets = []
ships_on_box_mission = {}
box_in_duration = time.time() - box_start_time
# if observation['step'] == 188:
# import pdb; pdb.set_trace()
# Coordinated pack hunting (hoard in fixed directions with zero halite ships)
# Send all non zero halite ships to a base so we can hunt safely
if observation['relative_step'] >= config[
'start_hunting_season_relative_step'] and observation[
'relative_step'] <= config['end_hunting_season_relative_step']:
if not history['limit_ships_timeout']:
(all_ship_scores, history,
override_move_squares_taken) = update_scores_pack_hunt(
all_ship_scores, config, stacked_ships, observation,
opponent_ships_sensible_actions, halite_ships, steps_remaining,
player_obs, np_rng, opponent_ships_scaled, collect_rate, obs_halite,
main_base_distances, history, on_rescue_mission, boxing_in_mission,
my_defend_base_ship_positions, env_observation, box_opponent_targets,
override_move_squares_taken, player_influence_maps,
ignore_convert_positions, convert_unavailable_positions,
early_hunting_season, late_hunting_season, safe_collect_margin,
spawn_cost)
# if observation['step'] == 188:
# import pdb; pdb.set_trace()
# Go into a victory formation when I have won
# This can be perceived as arrogant (because it is)
if observation['relative_step'] >= 0.85:
all_ship_scores, victory_formation = update_scores_victory_formation(
all_ship_scores, config, env_config, stacked_ships, observation,
halite_ships, steps_remaining, obs_halite, player_obs,
main_base_distances)
else:
victory_formation = False
# Lower the collect scores for non high priority ships for the squares where
# a high priority ships has claimed the move position.
# High priority actions:
# - Rescue
# - Base defense
# - Base attack
# - Boxing in
# - Opponent hoarding
# - Victory dance
if override_move_squares_taken.sum() > 0:
for ship_k in all_ship_scores:
best_collect_score = all_ship_scores[ship_k][0].max()
if best_collect_score <= 5e4:
# Very likely not a high priority collect override ship - lower the
# collect scores for the claimed high priority ships to avoid conflicts
# downstream
# override_move_squares_taken = 1
all_ship_scores[ship_k][0][override_move_squares_taken] *= 0.1
# First, process the convert actions
ship_plans = OrderedDict()
for i, ship_k in enumerate(player_obs[2]):
row, col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
ship_scores = all_ship_scores[ship_k]
ship_halite = player_obs[2][ship_k][1]
has_budget_to_convert = (ship_halite + player_obs[0]) >= convert_cost
convert_surrounded_ship = ship_scores[5] and (
ship_halite >= (convert_cost/config[
'boxed_in_halite_convert_divisor'])) and has_budget_to_convert and(
my_non_converted_ship_count > 1)
one_step_valid_directions = ship_scores[9]
almost_boxed_in = not None in one_step_valid_directions and (len(
one_step_valid_directions) == 1 or set(one_step_valid_directions) in [
set([NORTH, SOUTH]), set([EAST, WEST])])
can_return_safely = all_my_bases & (
opponent_nearest_ship_distances >= (
DISTANCES[row, col] + ship_halite*halite_on_board_mult))
# if np.any(can_return_safely & (~(my_considered_bases | (
# my_defended_abandoned_bases)))):
# import pdb; pdb.set_trace()
# x=1
# if observation['step'] == 129 and row == 11 and col == 8:
# import pdb; pdb.set_trace()
# if observation['step'] == 63 and ship_k in ['5-3', '36-1']:
# import pdb; pdb.set_trace()
if (has_budget_to_convert and (
my_ship_count > 1 or observation['step'] < 20 or (
steps_remaining == 1 and ship_halite >= convert_cost and (
ship_halite + player_obs[0]) >= 2*convert_cost or ((
(ship_halite + player_obs[0]) >= convert_cost) and (
my_ship_count > 3)))) and (
ship_scores[2].max()) >= max([
ship_scores[0].max()*can_deposit_halite,
(ship_scores[1]*(my_considered_bases | (
my_defended_abandoned_bases) | can_return_safely)).max(),
ship_scores[3].max(),
]) and (not almost_boxed_in)) or convert_surrounded_ship or (
ship_scores[13]) or victory_formation:
# Obtain the row and column of the new target base
target_base = np.where(ship_scores[2] == ship_scores[2].max())
target_row = target_base[0][0]
target_col = target_base[1][0]
my_non_converted_ship_count -= 1
if (target_row == row and target_col == col) or convert_surrounded_ship:
ship_plans[ship_k] = CONVERT
new_bases.append((row, col))
my_considered_bases[row, col] = True
can_deposit_halite = True
else:
ship_plans[ship_k] = (target_row, target_col, ship_scores[4], False,
row, col, -1)
# if observation['step'] == 322:
# import pdb; pdb.set_trace()
# x=1
# Next, do another pass to coordinate the target squares. This is done in a
# double pass for now where the selection order is determined based on the
# availability of > 1 direction in combination with the initial best score.
# The priorities are recomputed as target squares are taken by higher
# priority ships.
my_prev_step_base_attacker_ships = history[
'my_prev_step_base_attacker_ships']
best_ship_scores = {}
ship_priority_scores = np.zeros(my_ship_count)
ship_priority_matrix = np.zeros((my_ship_count, 8))
all_ship_valid_directions = {}
for i, ship_k in enumerate(player_obs[2]):
if ship_k in ship_plans:
# Make sure that already planned ships stay on top of the priority Q
ship_priority_scores[i] = 1e20
else:
ship_scores = all_ship_scores[ship_k]
row, col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
# if observation['step'] == 131 and ship_k in ['74-1']:
# import pdb; pdb.set_trace()
# Incorporate new bases in the return to base scores
ship_scores = list(ship_scores)
ship_halite = player_obs[2][ship_k][1]
can_return_safely = all_my_bases & (
opponent_nearest_ship_distances >= (
DISTANCES[row, col] + ship_halite*halite_on_board_mult))
ship_scores[1][np.logical_not(
my_considered_bases | my_defended_abandoned_bases | (
can_return_safely))] = -1e7
ship_scores[3][np.logical_not(opponent_bases)] = -1e7
ship_scores = tuple(ship_scores)
for (r, c) in new_bases:
if r != row or c != col:
ship_scores[0][r, c] = -1e7
ship_scores[2][r, c] = -1e7
all_ship_scores[ship_k] = ship_scores
num_non_immediate_bad_directions = len(set(
ship_scores[6] + ship_scores[8]))
num_two_step_neighbors = all_ships[
ROW_COL_MAX_DISTANCE_MASKS[(row, col, 2)]].sum() - 1
best_score = np.stack([
ship_scores[0], ship_scores[1], ship_scores[2], ship_scores[3]]).max()
best_ship_scores[ship_k] = best_score
prev_base_attacker = int(ship_k in my_prev_step_base_attacker_ships)
all_ship_valid_directions[ship_k] = copy.copy(ship_scores[6])
ship_priority_scores[i] = best_score + 1e12*(
(len(ship_scores[6]) == 1)) - 1e6*(
num_non_immediate_bad_directions) + 1e4*(
len(ship_scores[8])) + 1e2*(
num_two_step_neighbors) - 1e5*(
len(ship_scores[9])) + 1e7*(
ship_scores[11]) + 3e5*prev_base_attacker
ship_priority_matrix[i] = np.array([
len(ship_scores[6]) == 1, # 1e12
ship_scores[11], # 1e7
num_non_immediate_bad_directions, # 1e6
prev_base_attacker, # 3e5
len(ship_scores[9]), #1e5
len(ship_scores[8]), #1e4
num_two_step_neighbors, #1e2
best_score, #no unit
])
ship_order = np.argsort(-ship_priority_scores)
occupied_target_squares = np.zeros((grid_size, grid_size), dtype=np.bool)
occupied_squares_count = np.zeros((grid_size, grid_size), dtype=np.int)
single_path_squares = np.zeros((grid_size, grid_size), dtype=np.bool)
single_path_max_block_distances = np.ones(
(grid_size, grid_size), dtype=np.int)
return_base_distances = []
chain_conflict_resolution = []
ship_conflict_resolution = []
# if observation['step'] == 279:
# # ship_positions = [
# # row_col_from_square_grid_pos(
# # player_obs[2][ship_ids[o]][0], grid_size) for o in (ship_order)]
# # print([ship_ids[o] for o in ship_order])
# import pdb; pdb.set_trace()
# List the ships that want to collect and greedily assign conflicted collect
# squares to the ship with the highest collect score
all_collect_scores = []
collect_ship_keys = []
all_collect_multipliers = []
taken_collect_squares = np.zeros((grid_size, grid_size), dtype=np.bool)
for i in range(my_ship_count):
ship_k = ship_ids[ship_order[i]]
ship_scores = all_ship_scores[ship_k]
collect_scores = np.copy(ship_scores[0])
best_collect_score = collect_scores.max()
best_return_score = ship_scores[1].max()
best_establish_score = ship_scores[2].max()
best_attack_base_score = ship_scores[3].max()
best_other_score = max([
best_return_score, best_establish_score, best_attack_base_score])
best_score = max(best_collect_score, best_other_score)
if best_collect_score < 0:
# Set the collect scores to a small value on all squares in the valid
# directions
row, col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
valid_mask = np.zeros((grid_size, grid_size), dtype=np.bool)
if len(ship_scores[6]) == 0:
# Add all squares to the valid mask - everything seems lost anyway
valid_mask |= np.ones_like(valid_mask)
else:
# Add a small positive value for the escape directions - this way
# the planning is undisturbed and the other ships remain unaffected of
# the selected escape path
for d in ship_scores[6]:
valid_mask |= HALF_PLANES_CATCH[row, col][d]
# Boxed in by zero halite ships on a non zero halite square
collect_scores[valid_mask] = 1e-10*np.random.uniform(
size=valid_mask.sum())
if best_score == best_collect_score:
if best_collect_score < 1e4:
collect_ship_keys.append(ship_k)
all_collect_scores.append(collect_scores)
all_collect_multipliers.append(ship_scores[16])
else:
best_collect_pos = np.unravel_index(
collect_scores.argmax(), collect_scores.shape)
taken_collect_squares[best_collect_pos] = 1
num_collect_ships = len(collect_ship_keys)
if num_collect_ships > 0:
stacked_collect_scores = np.stack(all_collect_scores)
updated_collect_scores = np.copy(stacked_collect_scores)
# original_collect_scores = np.copy(stacked_collect_scores)
stacked_collect_scores /= np.stack(all_collect_multipliers)
min_collect_score = stacked_collect_scores.min()
if np.any(taken_collect_squares):
stacked_collect_scores[:, taken_collect_squares] = min_collect_score
for loop_id in range(num_collect_ships):
best_index = np.unravel_index(stacked_collect_scores.argmax(),
stacked_collect_scores.shape)
# print(loop_id, best_index)
# Reduce the best square for all other ships by an order of magnitude
other_ships = np.arange(num_collect_ships) != best_index[0]
updated_collect_scores[
other_ships, best_index[1], best_index[2]] /= 10
stacked_collect_scores [
other_ships, best_index[1], best_index[2]] = min_collect_score
# Exclude the assigned ship for subsequent steps
stacked_collect_scores[best_index[0]] = min_collect_score
for ship_id, ship_k in enumerate(collect_ship_keys):
all_ship_scores[ship_k][0][:] = updated_collect_scores[ship_id]
# List the ships that want to attack bases and greedily assign
# base squares to the ships with the highest attack scores
if max_attackers_per_base > 0:
all_attack_details = {}
for i in range(my_ship_count):
ship_k = ship_ids[ship_order[i]]
ship_scores = all_ship_scores[ship_k]
row, col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
attack_base_scores = ship_scores[3]
best_collect_score = ship_scores[0].max()
best_return_score = ship_scores[1].max()
best_establish_score = ship_scores[2].max()
best_attack_base_score = attack_base_scores.max()
best_other_score = max([
best_collect_score, best_return_score, best_establish_score])
best_score = max(best_attack_base_score, best_other_score)
if best_score == best_attack_base_score:
best_collect_pos = np.unravel_index(
attack_base_scores.argmax(), attack_base_scores.shape)
attack_ship_details = (best_score, ship_k, row, col)
if best_collect_pos in all_attack_details:
all_attack_details[best_collect_pos].append(
attack_ship_details)
else:
all_attack_details[best_collect_pos] = [attack_ship_details]
for base_pos in all_attack_details:
num_attackers_this_base = len(all_attack_details[base_pos])
if num_attackers_this_base > max_attackers_per_base:
attack_ship_scores = np.array([d[0] for d in (
all_attack_details[base_pos])])
keep_attack_ids = np.argsort(-attack_ship_scores)[
:max_attackers_per_base].tolist()
for attacker_id in range(num_attackers_this_base):
ship_k = all_attack_details[base_pos][attacker_id][1]
if not attacker_id in keep_attack_ids:
all_ship_scores[ship_k][3][base_pos] = -1e12
# if observation['step'] == 75:
# import pdb; pdb.set_trace()
# x=1
inner_loop_start_time = time.time()
recompute_ship_plan_order_duration = 0
base_return_ship_keys = []
for i in range(my_ship_count):
ship_k = ship_ids[ship_order[i]]
ship_scores = all_ship_scores[ship_k]
ship_halite = player_obs[2][ship_k][1]
if not ship_k in ship_plans:
row, col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
valid_directions = ship_scores[6]
# if observation['step'] == 118 and ship_k == '50-1':
# import pdb; pdb.set_trace()
after_blocked_valid_dirs = copy.copy(ship_scores[6])
if single_path_squares.sum() and not ship_scores[12]:
(s0, s1, s2, s3, after_blocked_valid_dirs, _,
_) = update_scores_blockers(
ship_scores[0], ship_scores[1], ship_scores[2], ship_scores[3],
row, col, grid_size, single_path_squares,
single_path_max_block_distances, valid_directions,
ship_scores[9], update_attack_base=True)
ship_scores = (s0, s1, s2, s3, ship_scores[4], ship_scores[5],
ship_scores[6], ship_scores[7], ship_scores[8],
ship_scores[9], ship_scores[10], ship_scores[11],
ship_scores[12], ship_scores[13], ship_scores[14],
ship_scores[15], ship_scores[16], ship_scores[17],
ship_scores[18])
if ship_halite == 0 and (len(after_blocked_valid_dirs) == 0 or (len(
after_blocked_valid_dirs) == 1 and (
valid_directions[0] is None))) and obs_halite[row, col] > 0:
# There are no longer any valid directions available for my zero halite
# ship due to my other ships taking the final escape squares.
# Typically this means being surrounded by opponent zero halite ships
# In this situation we have to prefer risky actions over staying still!
for d in np_rng.permutation(NOT_NONE_DIRECTIONS):
move_row, move_col = move_ship_row_col(
row, col, d, grid_size)
if not single_path_squares[move_row, move_col]:
ship_scores[0][move_row, move_col] = 5e4
ship_scores[6].append(d)
break
best_collect_score = ship_scores[0].max()
best_return_score = ship_scores[1].max()
best_establish_score = ship_scores[2].max()
best_attack_base_score = ship_scores[3].max()
best_score = max([best_collect_score, best_return_score,
best_establish_score, best_attack_base_score])
# if observation['step'] == 247 and ship_k == '52-2':
# import pdb; pdb.set_trace()
if best_collect_score == best_score:
# 1) Gather mode
target_gather = np.where(ship_scores[0] == ship_scores[0].max())
target_row = target_gather[0][0]
target_col = target_gather[1][0]
if target_row == row and target_col == col and my_ship_count == 1 and (
num_bases == 0 and (
ship_halite+player_obs[0]) >= 2*convert_cost) and (
convert_first_ship_on_None_action):
ship_plans[ship_k] = CONVERT
my_considered_bases[row, col] = True
update_occupied_count(
row, col, occupied_target_squares, occupied_squares_count)
else:
# Join the base attack in some base camping phases
consider_base_attack = False
if ship_k in camping_ships_strategy:
base_location = camping_ships_strategy[ship_k][5]
consider_base_attack = camping_ships_strategy[ship_k][4]
if consider_base_attack:
base_distance = DISTANCES[base_location][row, col]
attack_tuple = (base_distance, ship_halite, ship_k, row, col,
True)
if base_location in base_attackers:
base_attackers[base_location].append(attack_tuple)
else:
base_attackers[base_location] = [attack_tuple]
ship_plans[ship_k] = (target_row, target_col, ship_scores[4],
consider_base_attack, row, col, best_score)
if best_collect_score > 1e5:
# If there is only one path to defend the base: treat it as if
# there is only one valid action:
defend_dirs = get_dir_from_target(
row, col, target_row, target_col, grid_size)
if len(defend_dirs) == 1:
move_row, move_col = move_ship_row_col(
row, col, defend_dirs[0], grid_size)
single_path_squares[move_row, move_col] = 1
update_occupied_count(
target_row, target_col, occupied_target_squares,
occupied_squares_count)
elif best_return_score == best_score:
# 2) Return base mode
target_return = np.where(ship_scores[1] == ship_scores[1].max())
target_row = target_return[0][0]
target_col = target_return[1][0]
# Element 4 is whether we can ignore collisions when moving onto a base
ship_plans[ship_k] = (target_row, target_col, ship_scores[4],
ship_k in defend_base_ignore_collision_keys and (
not bases_protected.get(
(target_row, target_col), True)),
row, col, best_score)
base_distance = DISTANCES[target_row, target_col][row, col]
base_return_ship_keys.append(ship_k)
# Mark the ship as returning to a base
if not ship_k in history['returning_to_base_ships']:
history['returning_to_base_ships'].append(ship_k)
if not bases_protected.get((target_row, target_col), True):
bases_protected[target_row, target_col] = base_distance==0
if not ship_k in all_ignore_base_collision_ship_keys:
return_base_distances.append((target_row, target_col, base_distance))
elif best_establish_score == best_score:
# 3) Establish base mode
target_base = np.where(ship_scores[2] == ship_scores[2].max())
target_row = target_base[0][0]
target_col = target_base[1][0]
ship_plans[ship_k] = (target_row, target_col, ship_scores[4], False,
row, col, best_score)
update_occupied_count(
target_row, target_col, occupied_target_squares,
occupied_squares_count)
else:
# 4) Attack base mode
# print("Attack!", observation['step'], row, col)
target_base = np.where(ship_scores[3] == ship_scores[3].max())
target_row = target_base[0][0]
target_col = target_base[1][0]
base_distance = DISTANCES[(row, col)][target_row, target_col]
attack_tuple = (base_distance, ship_halite, ship_k, row, col, False)
if (target_row, target_col) in base_attackers:
base_attackers[(target_row, target_col)].append(attack_tuple)
else:
base_attackers[(target_row, target_col)] = [attack_tuple]
ship_plans[ship_k] = (target_row, target_col, ship_scores[4], True,
row, col, best_score)
update_occupied_count(
target_row, target_col, occupied_target_squares,
occupied_squares_count)
deterministic_next_pos = None
if target_row == row and target_col == col:
deterministic_next_pos = (row, col)
single_path_squares[row, col] = 1
elif len(valid_directions) == 1 and not None in valid_directions and (
target_row != row or target_col != col):
# I have only one escape direction and must therefore take that path
escape_square = move_ship_row_col(
row, col, valid_directions[0], grid_size)
deterministic_next_pos = escape_square
single_path_squares[escape_square[0], escape_square[1]] = 1
elif (target_row == row) != (target_col == col):
# I only have a single path to my target, so my next position, which is
# deterministic, should be treated as an opponent base (don't set
# plan targets behind the ship to avoid collisions with myself)
move_dir = get_dir_from_target(
row, col, target_row, target_col, grid_size)[0]
next_square = move_ship_row_col(row, col, move_dir, grid_size)
deterministic_next_pos = next_square
single_path_squares[next_square[0], next_square[1]] = 1
else:
# Check if higher priority ships have already selected one of my two
# possible paths to the target and keep track of the domino effect of
# selected optimal squares for two-path targets
move_dirs = get_dir_from_target(
row, col, target_row, target_col, grid_size)
if len(move_dirs) == 2:
square_taken = []
considered_squares = []
for square_id, d in enumerate(move_dirs):
move_square = move_ship_row_col(row, col, d, grid_size)
taken = single_path_squares[move_square]
square_taken.append(taken)
considered_squares.append(move_square)
if not taken:
not_taken_square = move_square
if square_taken[0] != square_taken[1]:
single_path_squares[not_taken_square] = 1
else:
if not square_taken[0]:
first_pair = (considered_squares[0], considered_squares[1])
second_pair = (considered_squares[1], considered_squares[0])
chain_conflict_resolution.append(first_pair)
chain_conflict_resolution.append(second_pair)
# If two ships move in opposite diagonal directions, both squares
# are definitely occupied
if ((first_pair, second_pair) in ship_conflict_resolution) and (
first_pair in chain_conflict_resolution) and (
second_pair in chain_conflict_resolution):
deterministic_next_pos = considered_squares[0]
ship_conflict_resolution.remove((first_pair, second_pair))
ship_conflict_resolution.remove((second_pair, first_pair))
else:
ship_conflict_resolution.append((first_pair, second_pair))
ship_conflict_resolution.append((second_pair, first_pair))
# if observation['step'] == 55 and (row, col) in [
# (3, 4), (4, 5), (5, 4)]:
# import pdb; pdb.set_trace()
if deterministic_next_pos is not None:
det_stack = [deterministic_next_pos]
while det_stack:
det_pos = det_stack.pop()
single_path_squares[det_pos] = 1
chained_pos = []
del_pairs = []
for sq1, sq2 in chain_conflict_resolution:
if det_pos == sq1:
if not sq2 in chained_pos:
chained_pos.append(sq2)
del_pairs.append((sq1, sq2))
del_pairs.append((sq2, sq1))
if chained_pos:
det_stack += chained_pos
chain_conflict_resolution = list(
set(chain_conflict_resolution)-set(del_pairs))
all_ship_scores[ship_k] = ship_scores
# Update the ship scores for future ships - largely redundant but likely
# not to be a performance bottleneck.
# Just to be sure: don't do this when I have a risk of timing out
if not history['limit_ships_timeout']:
reorder_start_time = time.time()
for j in range(i+1, my_ship_count):
order_id = ship_order[j]
ship_k_future = ship_ids[order_id]
if not ship_k_future in ship_plans and not all_ship_scores[
ship_k_future][12]:
future_ship_scores = all_ship_scores[ship_k_future]
future_row, future_col = row_col_from_square_grid_pos(
player_obs[2][ship_k_future][0], grid_size)
# if observation['step'] == 122 and ship_k_future == '102-1':
# import pdb; pdb.set_trace()
future_ship_scores[0][occupied_target_squares] = -1e7
future_ship_scores[2][occupied_target_squares] = -1e7
future_ship_scores[3][
occupied_squares_count >= max_attackers_per_base] = -1e7
# if np.any(future_ship_scores_c[0] != future_ship_scores[0]) or np.any(
# future_ship_scores_c[2] != future_ship_scores[2]) or np.any(
# future_ship_scores_c[3] != future_ship_scores[3]):
# import pdb; pdb.set_trace()
for (r, c, d) in return_base_distances:
# This coordinates return to base actions and avoids base blocking
if DISTANCES[r, c][future_row, future_col] == d:
future_ship_scores[1][r, c] = -1e7
updated_best_score = max([
future_ship_scores[0].max(), future_ship_scores[1].max(),
future_ship_scores[2].max(), future_ship_scores[3].max()])
# Verify if the number of valid directions changed from > 1 to 1 or
# from 1 to 0 and change the priority accordingly.
considered_valid_directions = all_ship_valid_directions[ship_k_future]
valid_considered_valid_directions = []
for d in considered_valid_directions:
move_future_row, move_future_col = move_ship_row_col(
future_row, future_col, d, grid_size)
if not single_path_squares[move_future_row, move_future_col]:
valid_considered_valid_directions.append(d)
all_ship_valid_directions[ship_k_future] = (
valid_considered_valid_directions)
# if (int(len(
# valid_considered_valid_directions) == 1) - int(len(
# considered_valid_directions) == 1)) != 0 and observation[
# 'step'] == 142:
# import pdb; pdb.set_trace()
# Update the priority for future ships using the updated ship scores
# and the updated single valid direction counts
priority_change = updated_best_score - (
best_ship_scores[ship_k_future]) + 1e12*(int(len(
valid_considered_valid_directions) == 1) - int(len(
considered_valid_directions) == 1))
# assert priority_change <= 0 # Only relevant for best score changes
ship_priority_scores[order_id] += priority_change
best_ship_scores[ship_k_future] = updated_best_score
all_ship_scores[ship_k_future] = future_ship_scores
# if observation['step'] == 142:
# import pdb; pdb.set_trace()
# Update the ship order - this works since priorities can only be lowered
# and we only consider future ships when downgrading priorities
# Make sure no ships get skipped by the +X hack
ship_priority_scores[ship_order[:(i+1)]] += 1e30 # Max float 32: 3e38
ship_order = np.argsort(-ship_priority_scores)
recompute_ship_plan_order_duration += time.time() - reorder_start_time
# Override of the ship plan: if I have a ship that is returning to a base
# after the first N steps, and it can safely collect on an interesting
# halite square: collect rather than return.
if observation['relative_step'] > config[
'collect_on_safe_return_relative_step']:
for ship_k in base_return_ship_keys:
row, col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
base_row = ship_plans[ship_k][0]
base_col = ship_plans[ship_k][1]
safe_collect_ship_margin = safe_collect_margin[row, col]
return_score = ship_plans[ship_k][6]
this_square_collect_score = all_ship_scores[ship_k][0][row, col]
distance_to_base = DISTANCES[row, col][base_row, base_col]
# if observation['step'] == 393 and row == 11 and col == 7:
# import pdb; pdb.set_trace()
if safe_collect_ship_margin > 0 and obs_halite[row, col] > 100 and (
not occupied_target_squares[row, col]) and (
(distance_to_base == (steps_remaining-1)) or (
return_score < 1e6 and (
distance_to_base > 5-3*observation['relative_step']))) and (
this_square_collect_score < 1e5) and (
steps_remaining >= distance_to_base):
# print("Forced gather", observation['step'], row, col)
ship_plans[ship_k] = (
row, col, [], False, row, col, this_square_collect_score)
# Drop the camping ships from the base attackers if there are no non-camper
# base attacker for the targeted base
del_keys = []
for target_base in base_attackers:
attackers = base_attackers[target_base]
has_non_camp_attacker = False
for i in range(len(attackers)):
if not attackers[i][5]:
has_non_camp_attacker = True
break
if not has_non_camp_attacker:
del_keys.append(target_base)
for del_key in del_keys:
del base_attackers[del_key]
my_prev_step_base_attacker_ships = []
for target_base in base_attackers:
base_attacker_keys = [attacker[2] for attacker in base_attackers[
target_base]]
my_prev_step_base_attacker_ships.extend(base_attacker_keys)
history['my_prev_step_base_attacker_ships'] = (
my_prev_step_base_attacker_ships)
# if observation['step'] == 247:
# import pdb; pdb.set_trace()
# Reorder the ship plans by current location for debugging purposed
ship_plans_reordered = []
all_reordered_vals = []
order_scores = []
for k in ship_plans:
vals = ship_plans[k]
if isinstance(vals, str):
reordered_vals = vals
else:
reordered_vals = (vals[4], vals[5], k, vals[0], vals[1], vals[2],
vals[3], int(vals[6]))
all_reordered_vals.append(reordered_vals)
order_scores.append(vals[4]*grid_size+vals[5])
for ordered_id in np.argsort(np.array(order_scores)):
ship_plans_reordered.append(all_reordered_vals[ordered_id])
ship_plans_duration = time.time() - ship_plans_start_time
inner_loop_ship_plans_duration = time.time() - inner_loop_start_time
return (ship_plans, my_considered_bases, all_ship_scores, base_attackers,
box_in_duration, history, ship_plans_duration,
inner_loop_ship_plans_duration,
recompute_ship_plan_order_duration, on_rescue_mission,
ships_on_box_mission, requested_save_conversion_budget,
non_abandoned_base_pos, this_step_base_defense_keys,
should_spawn_base_next_step, ship_plans_reordered,
victory_formation)
def get_dir_from_target(row, col, target_row, target_col, grid_size):
if row == target_row and col == target_col:
return [None]
horiz_diff = target_col-col
horiz_distance = min(np.abs(horiz_diff),
min(np.abs(horiz_diff-grid_size), np.abs(horiz_diff+grid_size)))
vert_diff = target_row-row
vert_distance = min(np.abs(vert_diff),
min(np.abs(vert_diff-grid_size), np.abs(vert_diff+grid_size)))
half_grid = grid_size / 2
shortest_directions = []
if horiz_distance > 0:
if target_col > col:
shortest_dirs = [EAST if (target_col - col) <= half_grid else WEST]
else:
shortest_dirs = [WEST if (col - target_col) <= half_grid else EAST]
if horiz_distance == grid_size/2:
shortest_dirs = [EAST, WEST]
shortest_directions.extend(shortest_dirs)
if vert_distance > 0:
if target_row > row:
shortest_dirs = [SOUTH if (target_row - row) <= half_grid else NORTH]
else:
shortest_dirs = [NORTH if (row - target_row) <= half_grid else SOUTH]
if vert_distance == grid_size/2:
shortest_dirs = [NORTH, SOUTH]
shortest_directions.extend(shortest_dirs)
return shortest_directions
def base_can_be_defended(base_attackers, target_row, target_col, stacked_bases,
stacked_ships, halite_ships):
attackers = base_attackers[(target_row, target_col)]
num_attackers = len(attackers)
attack_distances = np.array([a[0] for a in attackers])
distance_argsort = np.argsort(attack_distances)
attack_distances_sorted = np.maximum(
attack_distances[distance_argsort], 1+np.arange(num_attackers))
attack_halite_sorted = np.array([
attackers[distance_argsort[i]][1] for i in range(num_attackers)])
attack_scores = np.sort(attack_distances_sorted+1e-6*attack_halite_sorted)
opponent_ids = np.where(stacked_bases[:, target_row, target_col])[0]
can_defend = False
if opponent_ids:
opponent_id = opponent_ids[0]
opponent_ships = stacked_ships[opponent_id]
defend_scores = 1e-6*halite_ships[opponent_ships] + np.maximum(
1, DISTANCES[(target_row, target_col)][opponent_ships])
sorted_defend_scores = np.sort(defend_scores)
max_ships = min(attack_scores.size, defend_scores.size)
can_defend = not np.any(
attack_scores[:max_ships] < sorted_defend_scores[:max_ships])
if can_defend:
# Check that the opponent can defend against a sequence of sacrifices
in_sequence_near_base = attack_distances_sorted[:max_ships] == (
1+np.arange(max_ships))
num_lined_up = (np.cumprod(in_sequence_near_base) == 1).sum()
opponent_zero_halite_sorted_base_distances = np.sort(DISTANCES[
(target_row, target_col)][np.logical_and(
opponent_ships, halite_ships == 0)])
can_defend = opponent_zero_halite_sorted_base_distances.size >= (
num_lined_up) and not np.any(
opponent_zero_halite_sorted_base_distances[:num_lined_up] > (
1+np.arange(num_lined_up)))
return can_defend
def get_opponent_blocked_escape_dir(
bad_positions, opponent_ships_sensible_actions, row, col, np_rng, grid_size,
observation, ship_k):
escape_actions = []
for a in MOVE_DIRECTIONS:
move_row, move_col = move_ship_row_col(row, col, a, grid_size)
if not bad_positions[move_row, move_col]:
# Check all five ways the move row, move col can be reached. If none is
# in the opponent sensible actions: take that action
opponent_can_attack_square = False
for b in MOVE_DIRECTIONS:
neighbor_row, neighbor_col = move_ship_row_col(
move_row, move_col, b, grid_size)
neighbor_k = (neighbor_row, neighbor_col)
if neighbor_k in opponent_ships_sensible_actions:
threat_opponent_rel_dir = RELATIVE_DIR_MAPPING[OPPOSITE_MAPPING[b]]
if threat_opponent_rel_dir in opponent_ships_sensible_actions[
neighbor_k]:
opponent_can_attack_square = True
break
if not opponent_can_attack_square:
escape_actions.append(a)
return escape_actions
def map_ship_plans_to_actions(
config, observation, player_obs, env_observation, env_config, verbose,
all_ship_scores, before_plan_ship_scores, ship_plans, np_rng,
ignore_bad_attack_directions, base_attackers, steps_remaining,
opponent_ships_sensible_actions, opponent_ships_sensible_actions_no_risk,
history, env_obs_ids, opponent_ships_scaled, main_base_distances,
ignore_convert_positions, ballistic_attack_base_targets,
player_influence_maps):
ship_map_start_time = time.time()
ship_actions = {}
remaining_budget = player_obs[0]
convert_cost = env_config.convertCost
obs_halite = np.maximum(0, observation['halite'])
# Clip obs_halite to zero when gathering it doesn't add to the score
# code: delta_halite = int(cell.halite * configuration.collect_rate)
obs_halite[obs_halite < 1/env_config.collectRate] = 0
grid_size = obs_halite.shape[0]
my_ship_count = len(player_obs[2])
my_next_ships = np.zeros((grid_size, grid_size), dtype=np.bool)
stacked_bases = np.stack(
[rbs[1] for rbs in observation['rewards_bases_ships']])
my_bases = stacked_bases[0]
stacked_ships = np.stack(
[rbs[2] for rbs in observation['rewards_bases_ships']])
halite_ships = np.stack([
rbs[3] for rbs in observation['rewards_bases_ships']]).sum(0)
halite_ships[stacked_ships.sum(0) == 0] = -1e-9
updated_ship_pos = {}
my_non_zero_halite_ship_density = smooth2d(np.logical_and(
stacked_ships[0], halite_ships > 0), smooth_kernel_dim=3)
my_zero_halite_ship_density = smooth2d(np.logical_and(
stacked_ships[0], halite_ships == 0), smooth_kernel_dim=5)
player_ids = -1*np.ones((grid_size, grid_size), dtype=np.int)
for i in range(stacked_ships.shape[0]):
player_ids[stacked_ships[i]] = i
# When boxed in: avoid squares where lower halite opponents have their only
# escape square since they will most likely move there
avoid_squares_boxed_in = np.zeros_like(stacked_ships[0])
for (row, col) in opponent_ships_sensible_actions:
escape_directions = opponent_ships_sensible_actions[(row, col)]
if len(escape_directions) == 1:
escape_dir = RELATIVE_DIR_TO_DIRECTION_MAPPING[escape_directions[0]]
move_row, move_col = move_ship_row_col(row, col, escape_dir, grid_size)
avoid_squares_boxed_in[move_row, move_col] = 1
# For debugging - the order in which actions are planned
ordered_debug_ship_plans = [[k]+list(v) for k, v in ship_plans.items()]
ordered_debug_ship_plans = ordered_debug_ship_plans
# if observation['step'] == 394:
# import pdb; pdb.set_trace()
base_attack_override_wait = []
for target_base in base_attackers:
attackers = base_attackers[target_base]
# Only consider attackers that can make it onto the base in time
attackers = [a for a in attackers if a[0] <= steps_remaining]
num_attackers = len(attackers)
attack_distances = np.array([a[0] for a in attackers])
if num_attackers > 1 and steps_remaining > 2*attack_distances.max():
# If the base can not be defended: don't bother synchronizing the attack
# if observation['step'] == 349:
# import pdb; pdb.set_trace()
# x=1
can_defend = base_can_be_defended(
base_attackers, target_base[0], target_base[1], stacked_bases,
stacked_ships, halite_ships)
# Synchronize the attackers
try_to_wait_strategy = False
if can_defend:
argsort_distances = np.argsort(attack_distances)
sorted_distances = attack_distances[argsort_distances]
if not np.all(np.diff(sorted_distances) == 1):
# Pause the attackers that are the closest to the target on squares
# with no halite
should_wait = np.zeros((num_attackers), dtype=np.bool)
if sorted_distances[0] == sorted_distances[1]:
should_wait[argsort_distances[1]] = 1
next_distance = sorted_distances[0]
elif sorted_distances[0] == sorted_distances[1]-1:
should_wait[argsort_distances[0]] = 1
should_wait[argsort_distances[1]] = 1
next_distance = sorted_distances[1]
else:
should_wait[argsort_distances[0]] = 1
next_distance = sorted_distances[1]-1
for i in range(2, num_attackers):
if next_distance == sorted_distances[i]-2:
# I should close the ranks and move along
next_distance = sorted_distances[i]-1
else:
# I should wait for other ships to catch up
should_wait[argsort_distances[i]] = 1
next_distance = sorted_distances[i]
# if observation['step'] == 225:
# import pdb; pdb.set_trace()
# Verify that all ships that should wait can wait
can_all_wait = True
should_wait_ids = np.where(should_wait)[0]
for should_wait_id in should_wait_ids:
row = attackers[should_wait_id][3]
col = attackers[should_wait_id][4]
# Don't wait at one of my bases or a non-zero halite square
if obs_halite[row, col] > 0 or my_bases[row, col]:
can_all_wait = False
break
if can_all_wait:
for should_wait_id in should_wait_ids:
ship_k = attackers[should_wait_id][2]
row = attackers[should_wait_id][3]
col = attackers[should_wait_id][4]
ship_plans[ship_k] = (row, col, [], False, row, col, -2)
if not None in all_ship_scores[ship_k][6]:
# print("PERFORMING A RISKY BASE ATTACK OVERRIDE SHIP WAIT",
# observation['step'], row, col)
all_ship_scores[ship_k][6].append(None)
else:
# Consider waiting with one of the two closest attackers to make
# the two closest attackers aligned
try_to_wait_strategy = True
else:
try_to_wait_strategy = True
if try_to_wait_strategy:
# Maybe override the attack actions - wait on zero halite squares so
# we can take the base down in one of the subsequent steps
attack_distances = np.array([a[0] for a in attackers])
min_distance = attack_distances.min()
consider_wait_ids = None
if (attack_distances == min_distance).sum() > 1:
consider_wait_ids = np.where(attack_distances == min_distance)[0]
elif (attack_distances == (min_distance+1)).sum() > 1:
consider_wait_ids = np.where(attack_distances == (min_distance+1))[0]
if consider_wait_ids is not None:
for wait_id in range(consider_wait_ids.size):
consider_wait_id = consider_wait_ids[wait_id]
considered_ship_k = attackers[consider_wait_id][2]
considered_row = attackers[consider_wait_id][3]
considered_col = attackers[consider_wait_id][4]
if obs_halite[considered_row, considered_col] == 0:
base_attack_override_wait.append(considered_ship_k)
break
# List all positions you definitely don't want to move to. Initially this
# only contains opponent bases and eventually also earlier ships.
opponent_bases = np.stack([rbs[1] for rbs in observation[
'rewards_bases_ships']])[1:].sum(0)
bad_positions = np.copy(opponent_bases)
# Add current likely converted bases to the bad positions
for (ignore_convert_row, ignore_convert_col) in ignore_convert_positions:
bad_positions[ignore_convert_row, ignore_convert_col] = 1
# Order the ship plans based on the available valid direction count. Break
# ties using the original order.
# move_valid_actions = OrderedDict()
shortest_path_count = {}
ship_priority_scores = np.zeros(my_ship_count)
ship_key_plans = list(ship_plans)
for i, ship_k in enumerate(ship_key_plans):
row, col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
valid_actions = []
if not isinstance(ship_plans[ship_k], str):
(target_row, target_col, preferred_directions, ignore_base_collision,
_, _, _) = ship_plans[ship_k]
shortest_actions = get_dir_from_target(row, col, target_row, target_col,
grid_size)
# Filter out bad positions from the shortest actions
for a in shortest_actions:
move_row, move_col = move_ship_row_col(row, col, a, grid_size)
if not bad_positions[move_row, move_col] or (
ignore_base_collision and (
move_row == target_row and move_col == target_col)) or (
not opponent_bases[move_row, move_col] and all_ship_scores[
ship_k][12]):
valid_actions.append(a)
for a in valid_actions:
move_row, move_col = move_ship_row_col(row, col, a, grid_size)
path_lookup_k = (move_row, move_col)
square_weight = 1 if len(valid_actions) > 1 else 1.1
if path_lookup_k in shortest_path_count:
shortest_path_count[path_lookup_k] += square_weight
else:
shortest_path_count[path_lookup_k] = square_weight
# move_valid_actions[ship_k] = valid_actions
# num_non_immediate_bad_directions = len(set(
# all_ship_scores[ship_k][6] + all_ship_scores[ship_k][8]))
# Just keep the order from the planning - this is cleaner and works better!
ship_priority_scores[i] = -i
# ship_priority_scores[i] = -1e6*num_non_immediate_bad_directions -1e3*len(
# valid_actions) + 1e4*len(all_ship_scores[ship_k][8]) - 1e5*len(
# before_plan_ship_scores[ship_k][9]) - i + 1e7*(
# all_ship_scores[ship_k][11])
ship_order = np.argsort(-ship_priority_scores)
ordered_ship_plans = [ship_key_plans[o] for o in ship_order]
# Keep track of all my ship positions and rearrange the action planning when
# one of my ships only has one remaining option that does not self destruct.
ship_non_self_destructive_actions = {}
for ship_k in ordered_ship_plans:
ship_non_self_destructive_actions[ship_k] = copy.copy(MOVE_DIRECTIONS)
num_ships = len(ordered_ship_plans)
action_overrides = np.zeros((7))
camping_ships_strategy = history['camping_ships_strategy']
for i in range(num_ships):
ship_k = ordered_ship_plans[i]
row, col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
has_selected_action = False
if isinstance(ship_plans[ship_k], str):
if ship_plans[ship_k] == CONVERT and (
halite_ships[row, col] < convert_cost/config[
'boxed_in_halite_convert_divisor']) and (
halite_ships[row, col] > 0) and my_ship_count > 1 and (
all_ship_scores[ship_k][2][row, col] < 1e6):
# Override the convert logic - it's better to lose some ships than to
# convert too often (good candidate for stateful logic)
# We enter this path when the only remaining base is reconstructed
# (rare!)
if not all_ship_scores[ship_k][6]:
target_row = np.mod(row + np_rng.choice([-1, 1]), grid_size)
target_col = np.mod(col + np_rng.choice([-1, 1]), grid_size)
else:
target_row = np.mod(row + np_rng.choice([-1, 1]), grid_size)
target_col = np.mod(col + np_rng.choice([-1, 1]), grid_size)
shortest_actions = get_dir_from_target(
row, col, target_row, target_col, grid_size)
# Filter out bad positions from the shortest actions
for a in shortest_actions:
move_row, move_col = move_ship_row_col(row, col, a, grid_size)
if not bad_positions[move_row, move_col] or (not opponent_bases[
move_row, move_col] and all_ship_scores[ship_k][12]):
path_lookup_k = (move_row, move_col)
if not path_lookup_k in shortest_path_count:
shortest_path_count[path_lookup_k] = 0
ship_plans[ship_k] = (target_row, target_col, [], False, row, col, -3)
else:
ship_actions[ship_k] = ship_plans[ship_k]
obs_halite[row, col] = 0
remaining_budget -= convert_cost
has_selected_action = True
del ship_non_self_destructive_actions[ship_k]
# if observation['step'] == 78 and ship_k in ['30-2', '35-2']:
# import pdb; pdb.set_trace()
if not has_selected_action:
(target_row, target_col, preferred_directions, ignore_base_collision,
_, _, _) = ship_plans[ship_k]
# Override the target row and column if this ship is an aggressive base
# camper and the base can not be defended
if ship_k in camping_ships_strategy:
base_location = camping_ships_strategy[ship_k][5]
consider_base_attack = camping_ships_strategy[ship_k][4]
if consider_base_attack and (
base_location[0], base_location[1]) in base_attackers:
base_distance = DISTANCES[base_location][row, col]
can_defend = base_can_be_defended(
base_attackers, base_location[0], base_location[1], stacked_bases,
stacked_ships, halite_ships)
if base_distance == 1 and not can_defend:
target_row = base_location[0]
target_col = base_location[1]
shortest_path_count[base_location] = 1.0
shortest_actions = get_dir_from_target(row, col, target_row, target_col,
grid_size)
if ship_k in base_attack_override_wait:
shortest_actions = [None]
path_lookup_k = (row, col)
if not path_lookup_k in shortest_path_count:
shortest_path_count[path_lookup_k] = 1.0
ignore_bad_attack_directions_this_ship = ignore_bad_attack_directions
if ignore_base_collision and not ignore_bad_attack_directions and (
(target_row, target_col) in base_attackers):
can_defend = base_can_be_defended(
base_attackers, target_row, target_col, stacked_bases, stacked_ships,
halite_ships)
if can_defend:
ignore_base_collision = False
else:
ignore_bad_attack_directions_this_ship = True
# You shall have no fear in ballistic mode
if (target_row, target_col) in ballistic_attack_base_targets:
ignore_bad_attack_directions_this_ship = True
ignore_base_collision = True
# if observation['step'] == 106 and row == 20 and col == 7:
# import pdb; pdb.set_trace()
# Remove own ships from the shortest action bad positions when the ships
# are returning to the base at the end of the game
for a in shortest_actions:
move_row, move_col = move_ship_row_col(row, col, a, grid_size)
if bad_positions[move_row, move_col] and not opponent_bases[
move_row, move_col] and all_ship_scores[ship_k][12]:
bad_positions[move_row, move_col] = False
# Filter out bad positions from the shortest actions
valid_actions = []
valid_move_positions = []
for a in shortest_actions:
move_row, move_col = move_ship_row_col(row, col, a, grid_size)
if (not bad_positions[move_row, move_col] and (
a in all_ship_scores[ship_k][6])) or (ignore_base_collision and ((
move_row == target_row and move_col == target_col) or (
ignore_bad_attack_directions_this_ship and not bad_positions[
move_row, move_col])) and not my_next_ships[
move_row, move_col]) or (
all_ship_scores[ship_k][12] and steps_remaining == 1):
valid_actions.append(a)
valid_move_positions.append((move_row, move_col))
path_lookup_k = (move_row, move_col)
if not path_lookup_k in shortest_path_count:
# import pdb; pdb.set_trace()
print("Path key lookup fail step", observation['step'], row, col,
ship_k, i, move_row, move_col)
shortest_path_count[path_lookup_k] = 1
if valid_actions:
# Prefer actions that conflict with the lowest number of optimal
# ship trajectories
if len(valid_actions) > 1:
shortest_path_counts = np.array([
shortest_path_count[k] for k in valid_move_positions])
shortest_path_ids = np.where(
shortest_path_counts == shortest_path_counts.min())[0].tolist()
valid_actions = [a for i, a in enumerate(valid_actions) if (
i in shortest_path_ids)]
# Don't take an action that blocks my path to the target with an
# opponent base
if len(valid_actions) > 1:
row_distance = DISTANCES[row, 0][target_row, 0]
col_distance = DISTANCES[0, col][0, target_col]
single_path_square = None
if row_distance == 1 and col_distance > 1:
single_path_square = (target_row, col)
single_path_dir = NORTH if NORTH in valid_actions else SOUTH
elif col_distance == 1 and row_distance > 1:
single_path_square = (row, target_col)
single_path_dir = EAST if EAST in valid_actions else WEST
if single_path_square is not None:
should_avoid = np.any(opponent_bases[get_mask_between_exclude_ends(
single_path_square[0], single_path_square[1], target_row,
target_col, grid_size)])
if should_avoid and single_path_dir in valid_actions:
valid_actions.remove(single_path_dir)
# Take a preferred action when it is among the shortest path options
if len(valid_actions) > 1 and preferred_directions:
intersect_directions = list(set(valid_actions) & set(
preferred_directions))
if intersect_directions:
valid_actions = intersect_directions
# Prefer a non invalid one step action when defending the base
if len(valid_actions) > 1:
intersect_directions = list(set(valid_actions) & set(
before_plan_ship_scores[ship_k][9]))
if intersect_directions:
valid_actions = intersect_directions
# Move onto one of my friendly bases if I have halite on board and it
# is one of the available options
if len(valid_actions) > 1 and halite_ships[row, col] > 0:
move_onto_base_action = None
for a in valid_actions:
move_row, move_col = move_ship_row_col(row, col, a, grid_size)
if my_bases[move_row, move_col]:
move_onto_base_action = a
break
if move_onto_base_action is not None:
valid_actions = [move_onto_base_action]
# When all actions are equally valid, move to a square with lower non
# zero halite ship density in order to avoid double box-ins.
# Only do this for non zero halite ships
if len(valid_actions) > 1 and halite_ships[row, col] > 0:
considered_densities = np.zeros(len(valid_actions))
for a_id, a in enumerate(valid_actions):
move_row, move_col = move_ship_row_col(row, col, a, grid_size)
considered_densities[a_id] = my_non_zero_halite_ship_density[
move_row, move_col]
min_density = considered_densities.min()
valid_actions = [a for (a_id, a) in enumerate(valid_actions) if (
considered_densities[a_id] == min_density)]
if len(valid_actions) > 1 and halite_ships[row, col] > 0:
# When all actions are equally valid: prefer a square where I have
# more influence
considered_influences = np.zeros(len(valid_actions))
for a_id, a in enumerate(valid_actions):
move_row, move_col = move_ship_row_col(row, col, a, grid_size)
considered_influences[a_id] = player_influence_maps[
0, move_row, move_col] - player_influence_maps[
1:, move_row, move_col].sum()
max_influence = considered_influences.max()
valid_actions = [a for (a_id, a) in enumerate(valid_actions) if (
considered_influences[a_id] == max_influence)]
if len(valid_actions) > 1:
# Do this in order to obtain reproducible results - the set intersect
# logic is flaky.
valid_actions.sort()
action = str(np_rng.choice(valid_actions))
action = None if action == 'None' else action
else:
# I can not move to my target using a shortest path action
# Alternative: consider all valid, not bad actions
# import pdb; pdb.set_trace()
action_overrides[0] += 1
valid_not_bad_actions = []
for a in np_rng.permutation(MOVE_DIRECTIONS):
if a in all_ship_scores[ship_k][6]:
move_row, move_col = move_ship_row_col(row, col, a, grid_size)
if not bad_positions[move_row, move_col]:
valid_not_bad_actions.append(a)
# if valid_not_bad_actions:
# import pdb; pdb.set_trace()
# Pick a direction where my opponent should not go to since I can
# attack that square with one of my less halite ships
if not valid_not_bad_actions:
action_overrides[1] += 1
self_escape_actions = get_opponent_blocked_escape_dir(
bad_positions, opponent_ships_sensible_actions, row, col, np_rng,
grid_size, observation, ship_k)
if len(self_escape_actions) == 0:
# Consider non risky opponent actions to plan my escape
self_escape_actions = get_opponent_blocked_escape_dir(
bad_positions, opponent_ships_sensible_actions_no_risk, row, col,
np_rng, grid_size, observation, ship_k)
if self_escape_actions:
if halite_ships[row, col] == 0 and len(
self_escape_actions) > 1 and None in self_escape_actions and (
obs_halite[row, col] > 0) and (
main_base_distances[row, col] < 0 or main_base_distances[
row, col] > 2):
# Filter out the stay still action for zero halite ships on a non
# zero halite square if that leaves us with options
self_escape_actions.remove(None)
if before_plan_ship_scores[ship_k][9]:
# Filter out 1-step bad actions if that leaves us with options
self_escape_actions_not_1_step_bad = list(
set(self_escape_actions) & set(
before_plan_ship_scores[ship_k][9]))
if self_escape_actions_not_1_step_bad:
self_escape_actions = self_escape_actions_not_1_step_bad
if all_ship_scores[ship_k][7]:
# Filter out 2-step bad actions if that leaves us with options
self_escape_actions_not_2_step_bad = list(
set(self_escape_actions) - set(all_ship_scores[ship_k][7]))
if self_escape_actions_not_2_step_bad:
self_escape_actions = self_escape_actions_not_2_step_bad
# Filter out n-step bad actions if that leaves us with options
if all_ship_scores[ship_k][8]:
self_escape_actions_not_n_step_bad = list(
set(self_escape_actions) - set(all_ship_scores[ship_k][8]))
if self_escape_actions_not_n_step_bad:
self_escape_actions = self_escape_actions_not_n_step_bad
# Select the shortest actions if that leaves us with options (that
# way we stick to the ship plan)
if bool(self_escape_actions) and bool(shortest_actions):
intersect_directions = list(set(self_escape_actions) & set(
shortest_actions))
if intersect_directions:
self_escape_actions = intersect_directions
# Pick the least bad of the n-step bad directions if that is all we
# are choosing from
if len(all_ship_scores[ship_k][8]) > 1 and len(
self_escape_actions) > 1:
if np.all(
[a in all_ship_scores[ship_k][8] for a in (
self_escape_actions)]):
missing_keys = [a for a in (self_escape_actions) if not a in (
all_ship_scores[ship_k][14])]
if len(missing_keys) > 0:
print("MISSING N-STEP BAD KEYS:", observation['step'],
row, col, missing_keys)
die_probs = np.array([all_ship_scores[ship_k][14].get(
a, 0) for a in self_escape_actions])
self_escape_actions = [
self_escape_actions[np.argmin(die_probs)]]
valid_not_bad_actions = self_escape_actions
# There is no valid direction available; consider n step bad actions
# This check is most probably obsolete since it would appear in
# self_escape_actions
if not valid_not_bad_actions:
action_overrides[2] += 1
for a in np_rng.permutation(MOVE_DIRECTIONS):
if a in all_ship_scores[ship_k][8]:
move_row, move_col = move_ship_row_col(row, col, a, grid_size)
if not bad_positions[move_row, move_col]:
valid_not_bad_actions.append(a)
# There is still no valid direction available; consider 2 step bad
# actions
# This check is most probably obsolete since it would appear in
# self_escape_actions
if not valid_not_bad_actions:
action_overrides[3] += 1
for a in np_rng.permutation(MOVE_DIRECTIONS):
if a in all_ship_scores[ship_k][7]:
move_row, move_col = move_ship_row_col(row, col, a, grid_size)
if not bad_positions[move_row, move_col]:
valid_not_bad_actions.append(a)
# When attacking a base it is better to keep moving or stay still on
# a square that has no halite - otherwise it becomes a target for
# stealing halite.
# if observation['step'] == 258:
# import pdb; pdb.set_trace()
if valid_not_bad_actions and ignore_base_collision and obs_halite[
row, col] > 0:
if len(valid_not_bad_actions) > 1 and None in valid_not_bad_actions:
valid_not_bad_actions.remove(None)
if valid_not_bad_actions:
if len(valid_not_bad_actions) > 1:
# Do this in order to obtain reproducible results - the set
# intersect logic is flaky.
valid_not_bad_actions = [str(a) for a in valid_not_bad_actions]
valid_not_bad_actions.sort()
valid_not_bad_actions = [
a if a != "None" else None for a in valid_not_bad_actions]
action = np_rng.choice(valid_not_bad_actions)
else:
action_overrides[4] += 1
# By default: pick a random, not bad moving action
found_non_bad = False
# When being chased: consider replacing the postion of the chaser
if ship_k in history['chase_counter'][0]:
chase_details = history['chase_counter'][0][ship_k]
# If the opponent can move towards me and has no other ships that
# can take the position of the chaser: take the place of the chaser
chaser_row = chase_details[4]
chaser_col = chase_details[5]
num_opp_chase_step_counter = chase_details[1]
if num_opp_chase_step_counter > 2:
to_opponent_dir = get_dir_from_target(
row, col, chaser_row, chaser_col, grid_size)[0]
opp_to_me_dir = OPPOSITE_MAPPING[to_opponent_dir]
rel_opp_to_me_dir = RELATIVE_DIR_MAPPING[opp_to_me_dir]
opp_can_move_to_me = rel_opp_to_me_dir in (
opponent_ships_sensible_actions_no_risk[
chaser_row, chaser_col])
# There is a unique opponent id with the least amount of halite
# on the chaser square or the chaser has at least one friendly
# ship that can replace it
chaser_id = player_ids[chaser_row, chaser_col]
near_chaser = ROW_COL_MAX_DISTANCE_MASKS[
chaser_row, chaser_col, 1]
near_halite = halite_ships[near_chaser]
near_chaser_friendly_halite = near_halite[
(near_halite >= 0) & (player_ids[near_chaser] == chaser_id)]
min_non_chaser_halite = near_halite[
(near_halite >= 0) & (
player_ids[near_chaser] != chaser_id)].min()
min_near_chaser_halite = near_halite[near_halite >= 0].min()
opponent_min_hal_ids = player_ids[np.logical_and(
near_chaser, halite_ships == min_near_chaser_halite)]
near_me = ROW_COL_MAX_DISTANCE_MASKS[row, col, 1]
near_me_threat_players = player_ids[np.logical_and(
near_me, (halite_ships >= 0) & (
halite_ships < halite_ships[row, col]))]
double_opp_chase = (near_me_threat_players.size > 1) and (
np.all(near_me_threat_players == chaser_id))
chaser_can_replace = ((opponent_min_hal_ids.size > 1) and (
np.all(opponent_min_hal_ids == chaser_id) or (
(opponent_min_hal_ids == chaser_id).sum() > 1)) or (
(near_chaser_friendly_halite <= (
min_non_chaser_halite)).sum() > 1)) or double_opp_chase
chaser_players_index = env_obs_ids[chaser_id]
chaser_k = [k for k, v in env_observation.players[
chaser_players_index][2].items() if v[0] == (
chaser_row*grid_size + chaser_col)][0]
chaser_is_chased = chaser_k in history[
'chase_counter'][chaser_id]
chaser_is_chased_by_not_me = chaser_is_chased
if chaser_is_chased:
chaser_chaser = history['chase_counter'][chaser_id][chaser_k]
chaser_is_chased_by_not_me = (chaser_chaser[4] is None) or (
player_ids[chaser_chaser[4], chaser_chaser[5]] != 0)
# if observation['step'] == 179:
# import pdb; pdb.set_trace()
if opp_can_move_to_me and not chaser_can_replace and not (
chaser_is_chased_by_not_me):
# Move to the position of the chaser
action = str(to_opponent_dir)
found_non_bad = True
# if observation['step'] == 161:
# import pdb; pdb.set_trace()
if not found_non_bad:
action_scores = np.zeros(4)
for a_id, a in enumerate(NOT_NONE_DIRECTIONS):
move_row, move_col = move_ship_row_col(row, col, a, grid_size)
# There is always only a single opponent that can safely attack
# my move_square. First determine the opponent and count the
# number of potential attack ships
potential_threat_ships = []
for d_move in MOVE_DIRECTIONS:
other_row, other_col = move_ship_row_col(
move_row, move_col, d_move, grid_size)
other_player = player_ids[other_row, other_col]
if other_player > 0:
potential_threat_ships.append(
(other_player, halite_ships[other_row, other_col]))
other_ships = np.array(potential_threat_ships)
if len(other_ships) == 0:
opponent_threat_count = 0
opponent_id_penalty = 0
# I have already taken the square
if my_next_ships[move_row, move_col]:
attack_base_bonus = 0
else:
# This is an opponent base - add an attack base bonus if
# it can not be defended
attack_base_bonus = 0
print("BOXED IN NEXT TO AN OPPONENT BASE!")
assert opponent_bases[move_row, move_col]
opponent_base_id = np.where(
stacked_bases[:, move_row, move_col])[0][0]
opponent_id_bonus = opponent_ships_scaled[
opponent_base_id-1]
base_distance_my_main_base = main_base_distances[
move_row, move_col]
# import pdb; pdb.set_trace()
opponent_can_move_to_base = False
for base_dir in MOVE_DIRECTIONS:
base_defend_row, base_defend_col = move_ship_row_col(
move_row, move_col, base_dir, grid_size)
if stacked_ships[
opponent_base_id, base_defend_row, base_defend_col]:
defend_dir = RELATIVE_DIR_MAPPING[
OPPOSITE_MAPPING[base_dir]]
print("OPPONENT NEXT TO BASE!")
if defend_dir in opponent_ships_sensible_actions[
base_defend_row, base_defend_col]:
print("OPPONENT CAN MOVE TO BASE!")
opponent_can_move_to_base = True
break
if not opponent_can_move_to_base:
attack_base_bonus = 1e6*int(
base_distance_my_main_base <= 5 or (
opponent_id_bonus > 0)) + (20*opponent_id_bonus-5) + (
max(0, 6-base_distance_my_main_base))
else:
attack_base_bonus = 0
min_halite_player = int(
other_ships[np.argmin(other_ships[:, 1]), 0])
if np.all(other_ships[:, 0] == min_halite_player):
min_other_halite = 1e10
else:
min_other_halite = other_ships[other_ships[:, 0] != (
min_halite_player), 1].min()
my_move_neighbor_halite_mask = stacked_ships[0] & (
ROW_COL_DISTANCE_MASKS[move_row, move_col, 1])
min_other_halite = min(min_other_halite, halite_ships[
my_move_neighbor_halite_mask].min())
opponent_threat_count = (other_ships[
other_ships[:, 0] == min_halite_player, 1] < (
min_other_halite)).sum()
opponent_id_penalty = opponent_ships_scaled[
min_halite_player-1]
# import pdb; pdb.set_trace()
action_scores[a_id] = -1e6*bad_positions[
move_row, move_col] - main_base_distances[
move_row, move_col] - 3*opponent_threat_count -2*(
opponent_id_penalty*halite_ships[row, col]/250) + (
attack_base_bonus) - 0.5*all_ship_scores[ship_k][15][
move_row, move_col] - 0.5*(
my_non_zero_halite_ship_density[
move_row, move_col]) + my_zero_halite_ship_density[
move_row, move_col] -1e2*(
avoid_squares_boxed_in[move_row, move_col])
best_action_score = action_scores.max()
if best_action_score > -1e5:
best_ids = np.where(action_scores == best_action_score)[0]
select_id = np_rng.choice(best_ids)
action = str(NOT_NONE_DIRECTIONS[select_id])
found_non_bad = True
# If all actions are bad: do nothing - this is very rare since it
# would mean being surrounded by my other ships and opponent bases
if not found_non_bad:
action_overrides[5] += 1
action = None
# Update my_next_ships
new_row, new_col = move_ship_row_col(row, col, action, grid_size)
my_next_ships[new_row, new_col] = 1
bad_positions[new_row, new_col] = 1
updated_ship_pos[ship_k] = (new_row, new_col)
if action is not None:
ship_actions[ship_k] = action
# Update the shortest path counts for the remaining ships
shortest_path_count = {}
for future_ship_k in ordered_ship_plans[(i+1):]:
row, col = row_col_from_square_grid_pos(
player_obs[2][future_ship_k][0], grid_size)
# if observation['step'] == 390 and row == 0 and col == 8 and i == 5:
# import pdb; pdb.set_trace()
if not isinstance(ship_plans[future_ship_k], str):
(target_row, target_col, _, ignore_base_collision,
_, _, _) = ship_plans[future_ship_k]
shortest_actions = get_dir_from_target(
row, col, target_row, target_col, grid_size)
# Filter out bad positions from the shortest actions
valid_actions = []
for a in shortest_actions:
move_row, move_col = move_ship_row_col(row, col, a, grid_size)
if not bad_positions[move_row, move_col] or (
ignore_base_collision and (
move_row == target_row and move_col == target_col)) or (
not opponent_bases[move_row, move_col] and all_ship_scores[
future_ship_k][12]):
valid_actions.append(a)
for a in valid_actions:
move_row, move_col = move_ship_row_col(row, col, a, grid_size)
path_lookup_k = (move_row, move_col)
square_weight = 1 if len(valid_actions) > 1 else 1.1
if path_lookup_k in shortest_path_count:
shortest_path_count[path_lookup_k] += square_weight
else:
shortest_path_count[path_lookup_k] = square_weight
# Update the non self destructive actions for ships where no action is
# planned yet
del ship_non_self_destructive_actions[ship_k]
rearrange_self_destruct_ships = []
if not all_ship_scores[ship_k][12]:
for j in range(i+1, num_ships):
other_ship_k = ordered_ship_plans[j]
if ship_plans[other_ship_k] != CONVERT:
other_row = ship_plans[other_ship_k][4]
other_col = ship_plans[other_ship_k][5]
# If I move to a distance of <= 1 of the other ship: update valid
# non self destruct actions
# Exception: end of episode base return
distance = DISTANCES[new_row, new_col][other_row, other_col]
if distance <= 1:
remove_dir = get_dir_from_target(
other_row, other_col, new_row, new_col, grid_size)[0]
if remove_dir in ship_non_self_destructive_actions[other_ship_k]:
ship_non_self_destructive_actions[other_ship_k].remove(
remove_dir)
if len(ship_non_self_destructive_actions[other_ship_k]) == 1:
rearrange_self_destruct_ships.append(other_ship_k)
# Place ships that only have a single non self destruct action to the
# front of the queue.
if rearrange_self_destruct_ships:
remaining_ships = [s for s in ordered_ship_plans[(i+1):] if (
s not in rearrange_self_destruct_ships)]
ordered_ship_plans = ordered_ship_plans[:(i+1)] + (
rearrange_self_destruct_ships) + remaining_ships
map_duration = time.time() - ship_map_start_time
return (ship_actions, remaining_budget, my_next_ships, obs_halite,
updated_ship_pos, -np.diff(action_overrides), map_duration)
def decide_existing_base_spawns(
config, observation, player_obs, my_next_bases, my_next_ships, obs_halite,
env_config, remaining_budget, verbose, ship_plans, updated_ship_pos,
weighted_base_mask, history, requested_save_conversion_budget,
victory_formation):
spawn_cost = env_config.spawnCost
convert_cost = env_config.convertCost
my_ship_count = my_next_ships.sum()
# Start saving for an alternative base when my base is spammed by opponents
# print(observation['step'], history['my_base_flooded_counter'])
if history['my_base_flooded_counter']:
min_flood_counter = np.array(
list(history['my_base_flooded_counter'].values())).min()
save_base_flood_fraction = min(1, (min_flood_counter/5))**0.5
save_restore_budget = save_base_flood_fraction*convert_cost
else:
save_restore_budget = 0
max_spawns = int((remaining_budget-save_restore_budget-(
requested_save_conversion_budget))/spawn_cost)
relative_step = observation['relative_step']
max_allowed_ships = config['max_initial_ships'] - relative_step*(
config['max_initial_ships'] - config['max_final_ships'])
# print(observation['step'], max_allowed_ships, my_ship_count)
if history['limit_ships_timeout']:
# In order to avoid weird timeouts
max_allowed_ships -= 15
total_ship_count = np.stack([
rbs[2] for rbs in observation['rewards_bases_ships']]).sum()
max_spawns = min(max_spawns, int(max_allowed_ships - my_ship_count))
# if observation['step'] == 60:
# import pdb; pdb.set_trace()
# Restrict the number of spawns if there is little halite remaining on the
# map when I am not winning or the game is almost over
current_scores = history['current_scores']
current_halite_sum = history['current_halite_sum']
not_winning = (current_scores[0] < current_scores.max()) or (
current_halite_sum[0] < (current_halite_sum[1:].max()+(3-10*(
0.8-max(0.8, observation['relative_step'])))*spawn_cost))
halite_sum_spawn_advantage = (current_halite_sum[0] - (
current_halite_sum[1:].max()))/spawn_cost
massively_ahead_in_halite = halite_sum_spawn_advantage > 8
game_almost_over = observation['relative_step'] >= 0.8
steps_remaining = env_config.episodeSteps-1-observation['step']
if not_winning or (game_almost_over and not massively_ahead_in_halite):
max_spawns = min(max_spawns, int(
min(3000, (obs_halite.sum())**0.8)/min(
total_ship_count+1e-9, (my_ship_count+1e-9)*2)/spawn_cost*(
1-relative_step)*(env_config.episodeSteps-2)/config[
'max_spawn_relative_step_divisor']))
elif game_almost_over and massively_ahead_in_halite and (
steps_remaining >= 20 or not victory_formation):
max_spawns = min(max_spawns, int(halite_sum_spawn_advantage-8))
last_episode_turn = observation['relative_step'] == 1
# if observation['step'] == 176:
# import pdb; pdb.set_trace()
if max_spawns <= 0 or not player_obs[1] or last_episode_turn:
return {}, remaining_budget
num_bases = len(player_obs[1])
spawn_scores = np.zeros(num_bases)
grid_size = obs_halite.shape[0]
smoothed_friendly_ship_halite = smooth2d(
observation['rewards_bases_ships'][0][3])
smoothed_halite = smooth2d(obs_halite)
for i, base_k in enumerate(player_obs[1]):
row, col = row_col_from_square_grid_pos(player_obs[1][base_k], grid_size)
# # Don't spawn if it is not the main base - OBSOLETE
# spawn_scores[i] -= 1e12*int(weighted_base_mask[row, col] < 1)
# Don't spawn when there will be a ship at the base
spawn_scores[i] -= 1e12*my_next_ships[row, col]
# Don't spawn when there is a returning ship that wants to enter the base
# in two steps
# Exception: if the base is not too crowded, it is ok to spawn in this
# scenario.
near_base_ship_count = np.logical_and(
my_next_ships, ROW_COL_MAX_DISTANCE_MASKS[(row, col, 3)]).sum()
if near_base_ship_count >= config['no_spawn_near_base_ship_limit']:
for k in ship_plans:
if ship_plans[k][0] == row and ship_plans[k][1] == col:
updated_distance = DISTANCES[row, col][updated_ship_pos[k]]
if updated_distance == 1:
spawn_scores[i] -= 1e6
break
# Spawn when the base is instructed to do so in the previous step (newly)
# created
if history['prev_step']['should_spawn_base_next_step'] == (row, col):
# import pdb; pdb.set_trace()
print(observation['step'], "NEW BASE SPAWN", (row, col))
spawn_scores[i] += 1e6
# Spawn less when the base is crowded with ships with a lot of halite
spawn_scores[i] -= smoothed_friendly_ship_halite[row, col]*(
config['nearby_ship_halite_spawn_constant'])
# Spawn more when there is a lot of nearby halite
spawn_scores[i] += smoothed_halite[row, col]*(
config['nearby_halite_spawn_constant'])
# Spawn more when there is a lot of remaining budget available
spawn_scores[i] += remaining_budget*(
config['remaining_budget_spawn_constant'])
if verbose:
print(smoothed_friendly_ship_halite[row, col]*(
config['nearby_ship_halite_spawn_constant']),
smoothed_halite[row, col]*(
config['nearby_halite_spawn_constant']),
remaining_budget*(config['remaining_budget_spawn_constant']),
)
if verbose:
print("Spawn scores and threshold: {}; {}".format(
spawn_scores, config['spawn_score_threshold']))
# Convert the ships with the top conversion scores that stay within the
# max conversion limits
num_above_threshold = (spawn_scores > config['spawn_score_threshold']).sum()
spawn_ids = np.argsort(-spawn_scores)[:min(
num_above_threshold, max_spawns)]
mapped_actions = {}
for i, base_k in enumerate(player_obs[1]):
if np.isin(i, spawn_ids):
mapped_actions[base_k] = SPAWN
remaining_budget -= spawn_cost
return mapped_actions, remaining_budget
def get_env_obs_ids(env_observation):
num_players = len(env_observation.players)
my_id = env_observation.player
env_obs_ids = [i for i in range(num_players)]
env_obs_ids.remove(my_id)
env_obs_ids = [my_id] + env_obs_ids
return env_obs_ids
def update_chase_counter(history, observation, env_observation, stacked_ships,
other_halite_ships, player_ids, env_obs_ids):
grid_size = stacked_ships.shape[1]
num_players = stacked_ships.shape[0]
if observation['step'] == 0:
history['chase_counter'] = [{} for _ in range(num_players)]
history['escort_to_base_list'] = []
else:
for player_id in range(num_players):
# Remove converted or destroyed ships from the chase counter
player_obs = env_observation.players[env_obs_ids[player_id]]
delete_keys = []
for ship_k in history['chase_counter'][player_id]:
if not ship_k in player_obs[2]:
# if player_id > 0 or (not history['prev_step']['my_ship_actions'][
# ship_k] == CONVERT):
# # import pdb; pdb.set_trace()
# print("Destroyed ship player", player_id, "step",
# observation['step'],
# history['chase_counter'][player_id][ship_k])
delete_keys.append(ship_k)
for del_k in delete_keys:
del history['chase_counter'][player_id][del_k]
# Increment the chase counter of ships that are 1 step away from a less
# halite ship, delete the counter for other ships
for ship_k in player_obs[2]:
row, col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
ship_halite = player_obs[2][ship_k][1]
step_1_mask = ROW_COL_DISTANCE_MASKS[row, col, 1]
threats = ship_halite > other_halite_ships[step_1_mask]
direct_threat = np.any(threats)
# if observation['step'] == 88 and row == 11 and col == 8:
# import pdb; pdb.set_trace()
if direct_threat:
chaser_ids = player_ids[step_1_mask][threats]
if (player_id == 0 and (
history['prev_step']['my_ship_actions'][ship_k] is None)) or (
not ship_k in history['chase_counter'][player_id]):
history['chase_counter'][player_id][ship_k] = (
chaser_ids, 1, row, col, None, None)
else:
prev_chasers, prev_count, prev_row, prev_col, _, _ = history[
'chase_counter'][player_id][ship_k]
prev_pos_opponent = player_ids[prev_row, prev_col]
if ship_halite > other_halite_ships[prev_row, prev_col] and (
prev_pos_opponent > 0) and prev_pos_opponent in prev_chasers:
# if player_id == 0 and prev_count > 20:
# import pdb; pdb.set_trace()
# print(prev_count, observation['step'], row, col)
history['chase_counter'][player_id][ship_k] = (
np.array([prev_pos_opponent]), prev_count+1, row, col,
prev_row, prev_col)
else:
history['chase_counter'][player_id][ship_k] = (
chaser_ids, 1, row, col, None, None)
else:
if ship_k in history['chase_counter'][player_id]:
del history['chase_counter'][player_id][ship_k]
return history
def list_of_combs(arr):
"""returns a list of all subsets of a list"""
combs = []
for i in range(len(arr)):
listing = [list(x) for x in itertools.combinations(arr, i+1)]
combs.extend(listing)
return combs
def infer_player_conversions(player_obs, prev_player_obs, env_config,
observation, env_obs_id):
# By considering the score change, the gather behavior and the number of
# spawns, the number of conversions can be aproximately inferred (not exact
# because of base attacks)
convert_cost = env_config.convertCost
score_change = prev_player_obs[0] - player_obs[0]
# Consider new, disappeared and remaining ships
current_ships = set(player_obs[2].keys())
prev_ships = set(prev_player_obs[2].keys())
disappeared_ships = list(prev_ships - current_ships)
new_ships = list(current_ships-prev_ships)
new_ship_count = len(new_ships)
spent_spawns = new_ship_count*env_config.spawnCost
not_destroyed_ships = set(current_ships & prev_ships)
# Consider new bases
current_bases = set(player_obs[1].keys())
current_base_pos = [player_obs[1][b] for b in current_bases]
prev_bases = set(prev_player_obs[1].keys())
new_bases = set(current_bases-prev_bases)
spent_new_bases = len(new_bases)*convert_cost
deposited = 0
for k in not_destroyed_ships:
if player_obs[2][k][0] in current_base_pos:
deposited += prev_player_obs[2][k][1]
prev_pos_to_k = {v[0]: k for k, v in prev_player_obs[2].items()}
converted_ships = [prev_pos_to_k[player_obs[1][b]] for b in new_bases]
for k in converted_ships:
deposited += prev_player_obs[2][k][1]
unexplained_score_change = (
score_change-spent_spawns-spent_new_bases+deposited)
if unexplained_score_change != 0:
unexplained_ship_scores = [convert_cost-prev_player_obs[2][k][1] for k in (
disappeared_ships)]
num_disappeared = len(disappeared_ships)
if num_disappeared < 7:
# This is to avoid spending too much time in a crazy conversion scenario
combination_found = False
for comb in list_of_combs(np.arange(num_disappeared)):
unexplained_sum = np.array(
[unexplained_ship_scores[c_id] for c_id in comb]).sum()
if unexplained_sum == unexplained_score_change:
converted_ships.extend([disappeared_ships[c_id] for c_id in comb])
combination_found = True
break
# One scenario where no combination can be found is when one of the ships
# self-collides when returning to a base
# if not combination_found:
# print("No convert resolution combination found", observation['step'],
# env_obs_id)
combination_found = combination_found
return converted_ships
def update_box_in_counter(history, observation, env_observation, stacked_ships,
env_obs_ids, env_config):
grid_size = stacked_ships.shape[1]
num_players = stacked_ships.shape[0]
convert_cost = env_config.convertCost
if observation['step'] in [0, env_config.episodeSteps-2]:
history['raw_box_data'] = [[] for _ in range(num_players)]
history['inferred_boxed_in_conv_threshold'] = [[
convert_cost/2, convert_cost] for _ in range(num_players)]
else:
prev_opponent_sensible_actions = history['prev_step'][
'opponent_ships_sensible_actions']
for player_id in range(1, num_players):
# Consider all boxed in ships and infer the action that each player took
env_obs_id = env_obs_ids[player_id]
player_obs = env_observation.players[env_obs_id]
prev_player_obs = history['prev_step']['env_observation'].players[
env_obs_id]
converted_ships = infer_player_conversions(
player_obs, prev_player_obs, env_config, observation,
env_obs_id)
for k in prev_player_obs[2]:
row, col = row_col_from_square_grid_pos(
prev_player_obs[2][k][0], grid_size)
if len(prev_opponent_sensible_actions[row, col]) == 0:
prev_halite = prev_player_obs[2][k][1]
prev_halite_score = prev_player_obs[0]
if (prev_halite + prev_halite_score) >= convert_cost:
did_convert = k in converted_ships
history['raw_box_data'][player_id].append(
(prev_halite, did_convert))
# Infer the halite convert threshold when being boxed in
if history['raw_box_data'][player_id]:
box_data = np.array(history['raw_box_data'][player_id])
current_thresholds = history['inferred_boxed_in_conv_threshold'][
player_id]
# The high estimate is the lowest halite value at which a ship was
# ever converted - defaults to convert_cost
if np.any(box_data[:, 1] == 0):
highest_not_converted = box_data[box_data[:, 1] == 0, 0].max()
else:
highest_not_converted = 0
if np.any(box_data[:, 1] == 1):
lowest_converted = box_data[box_data[:, 1] == 1, 0].min()
else:
lowest_converted = current_thresholds[1]
# This can happen if the opponent does not take the halite on board
# into account (non consistent convert threshold)
highest_not_converted = min(highest_not_converted, lowest_converted)
# The low estimate is the average between the high estimate and the
# highest halite at which a ship was *not* converted (defaults) to 0
history['inferred_boxed_in_conv_threshold'][player_id] = [
(lowest_converted+highest_not_converted)/2, lowest_converted]
return history
def update_zero_halite_ship_behavior(
config, history, observation, env_observation, stacked_ships, env_obs_ids,
env_config):
grid_size = stacked_ships.shape[1]
num_players = stacked_ships.shape[0]
near_base_distance = config['log_near_base_distance']
max_recent_considered_relevant = config[
'max_recent_considered_relevant_zero_move_count']
# Minimum number of required examples to be able to estimate the opponent's
# zero halite ship behavior. Format ('nearbase_shipdistance')
min_considered_types = {
'False_0': 0,
'False_1': 8,
'False_2': 15,
'True_0': 8,
'True_1': 8,
'True_2': config['near_base_2_step_risky_min_count'],
}
if observation['step'] == 0:
history['raw_zero_halite_move_data'] = [[] for _ in range(num_players)]
history['zero_halite_move_behavior'] = [{} for _ in range(num_players)]
history['my_zero_lost_ships_opponents'] = {}
initial_aggressive_behavior = {}
for is_near_base in [False, True]:
for considered_distance in [0, 1, 2]:
dict_k = str(is_near_base) + '_' + str(considered_distance)
dict_k_always_careful = dict_k + '_always_careful'
dict_k_real_count = dict_k + '_real_count'
dict_k_ever_risky = dict_k + '_ever_risky'
if min_considered_types[dict_k] == 0:
initial_aggressive_behavior[dict_k] = 0.0
initial_aggressive_behavior[dict_k_always_careful] = True
initial_aggressive_behavior[dict_k_real_count] = 0
initial_aggressive_behavior[dict_k_ever_risky] = False
else:
initial_aggressive_behavior[dict_k] = 1.0
initial_aggressive_behavior[dict_k_always_careful] = False
initial_aggressive_behavior[dict_k_real_count] = 0
initial_aggressive_behavior[dict_k_ever_risky] = False
for player_id in range(1, num_players):
history['zero_halite_move_behavior'][player_id] = (
copy.copy(initial_aggressive_behavior))
else:
prev_stacked_bases = history['prev_step']['stacked_bases']
all_prev_bases = prev_stacked_bases.sum(0) > 0
prev_stacked_ships = history['prev_step']['stacked_ships']
all_prev_ships = np.sum(prev_stacked_ships, 0) > 0
prev_base_locations = np.where(all_prev_bases)
prev_boxed_in_zero_halite_opponents = history['prev_step'][
'boxed_in_zero_halite_opponents']
num_prev_bases = all_prev_bases.sum()
if num_prev_bases > 0:
all_prev_base_distances = [DISTANCES[
prev_base_locations[0][i], prev_base_locations[1][i]] for i in range(
num_prev_bases)] + [
99*np.ones((grid_size, grid_size))]
stacked_prev_base_distances = np.stack(all_prev_base_distances)
nearest_prev_base_distances = stacked_prev_base_distances.min(0)
prev_base_player_ids = -1*np.ones((grid_size, grid_size), dtype=np.int)
for i in range(prev_stacked_bases.shape[0]):
prev_base_player_ids[prev_stacked_bases[i]] = i
prev_ship_player_ids = -1*np.ones((grid_size, grid_size), dtype=np.int)
for i in range(prev_stacked_ships.shape[0]):
prev_ship_player_ids[prev_stacked_ships[i]] = i
prev_halite_ships = history['prev_step']['halite_ships']
prev_opponent_sensible_actions_no_risk = history['prev_step'][
'opponent_ships_sensible_actions_no_risk']
my_prev_ship_pos_to_key = {
v[0]: k for k, v in history['prev_step']['env_observation'].players[
env_obs_ids[0]][2].items()}
all_prev_ship_pos_to_key = {}
all_ship_keys = []
for player_id in range(num_players):
env_obs_id = env_obs_ids[player_id]
all_prev_ship_pos_to_key.update({
v[0]: k for k, v in history['prev_step']['env_observation'].players[
env_obs_id][2].items()})
all_ship_keys.extend(list(env_observation.players[
env_obs_id][2].keys()))
history['my_zero_lost_ships_opponents'] = {}
for player_id in range(1, num_players):
# Consider all zero halite ships and infer the action that each player
# took
env_obs_id = env_obs_ids[player_id]
player_obs = env_observation.players[env_obs_id]
prev_player_obs = history['prev_step']['env_observation'].players[
env_obs_id]
for k in prev_player_obs[2]:
prev_row, prev_col = row_col_from_square_grid_pos(
prev_player_obs[2][k][0], grid_size)
ignore_boxed_zero_halite = (prev_row, prev_col) in (
prev_boxed_in_zero_halite_opponents)
if k in player_obs[2] and prev_player_obs[2][k][1] == 0 and (
not ignore_boxed_zero_halite):
row, col = row_col_from_square_grid_pos(
player_obs[2][k][0], grid_size)
nearest_prev_base_distance = nearest_prev_base_distances[
prev_row, prev_col]
nearest_prev_base_id = np.argmin(stacked_prev_base_distances[
:, prev_row, prev_col])
nearest_prev_base_row = prev_base_locations[0][
nearest_prev_base_id]
nearest_prev_base_col = prev_base_locations[1][
nearest_prev_base_id]
nearest_base_player = prev_base_player_ids[
nearest_prev_base_row, nearest_prev_base_col]
friendly_prev_nearest_base = (nearest_base_player == player_id)
if len(prev_opponent_sensible_actions_no_risk[
prev_row, prev_col]) < len(MOVE_DIRECTIONS):
# Loop over all zero halite opponent ships at a distance of max 2
# and log the distance, None action count, move towards count and
# move away count as well as the distance to the nearest base.
# Also record whether the nearest base is friendly or not.
considered_threat_data = []
for row_shift, col_shift, distance in (
D2_ROW_COL_SHIFTS_DISTANCES):
considered_row = (prev_row + row_shift) % grid_size
considered_col = (prev_col + col_shift) % grid_size
if all_prev_ships[considered_row, considered_col] and (
prev_ship_player_ids[
considered_row, considered_col] != player_id) and (
history['prev_step']['halite_ships'][
considered_row, considered_col] == 0):
# Compute the distance of the considered ship, relative
# to the threat
moved_distance = DISTANCES[row, col][
considered_row, considered_col]
considered_threat_data.append((
distance, moved_distance, nearest_prev_base_distance,
friendly_prev_nearest_base, observation['step'], True))
# Aggregate the per-ship behavior - only consider the nearest
# opponent threats
num_considered_threats = len(considered_threat_data)
if num_considered_threats == 1:
history['raw_zero_halite_move_data'][player_id].append(
considered_threat_data[0])
elif num_considered_threats > 0:
threat_data = np.array(considered_threat_data)
min_distance = threat_data[:, 0].min()
for row_id in range(num_considered_threats):
if threat_data[row_id, 0] == min_distance:
history['raw_zero_halite_move_data'][player_id].append(
considered_threat_data[row_id])
elif k not in player_obs[2] and prev_player_obs[2][k][1] == 0 and (
not ignore_boxed_zero_halite):
# The opponent lost their zero halite ship - infer what happened
# Investigate if there is no new base at the position where there
# used to be a ship (this would explain why the ship disappeared)
prev_pos = prev_player_obs[2][k][0]
if not (prev_pos in player_obs[1].values() and (
not prev_pos in prev_player_obs[1].values())):
prev_row, prev_col = row_col_from_square_grid_pos(
prev_pos, grid_size)
nearest_prev_base_distance = nearest_prev_base_distances[
prev_row, prev_col]
base_destroyed = False
if nearest_prev_base_distance == 1:
# Determine if a base at a distance of 1 from the previous
# position was destroyed
for destroyed_dir in NOT_NONE_DIRECTIONS:
base_near_row, base_near_col = move_ship_row_col(
prev_row, prev_col, destroyed_dir, grid_size)
base_near_pos = base_near_row*grid_size+base_near_col
if all_prev_bases[base_near_row, base_near_col]:
base_destroyed = True
for p_id in range(num_players):
if base_near_pos in env_observation.players[
p_id][1].values():
base_destroyed = False
break
if base_destroyed:
break
nearest_prev_base_id = np.argmin(stacked_prev_base_distances[
:, prev_row, prev_col])
nearest_prev_base_row = prev_base_locations[0][
nearest_prev_base_id]
nearest_prev_base_col = prev_base_locations[1][
nearest_prev_base_id]
nearest_base_player = prev_base_player_ids[
nearest_prev_base_row, nearest_prev_base_col]
friendly_prev_nearest_base = (nearest_base_player == player_id)
# Consider all potential move squares for the destroyed ship and
# all potential non friendly ship movements. For my ships: only
# consider the actions I actually selected.
potential_ship_collisions = []
for d1 in MOVE_DIRECTIONS:
# D1 are the potential actions of the opponent ship
row_d1, col_d1 = move_ship_row_col(
prev_row, prev_col, d1, grid_size)
for d2 in MOVE_DIRECTIONS:
# D2 are the potential collision directions, relative to the
# moved position of the opponent ship
row_d2, col_d2 = move_ship_row_col(
row_d1, col_d1, d2, grid_size)
if all_prev_ships[row_d2, col_d2] and (
prev_halite_ships[row_d2, col_d2] == 0) and (
prev_ship_player_ids[row_d2, col_d2] != player_id):
d2_pos = row_d2*grid_size+col_d2
my_ship_position = d2_pos in my_prev_ship_pos_to_key
consider_action = not my_ship_position
if not consider_action:
# Only consider the actions I actually selected for my
# ships
my_action = history['prev_step']['my_ship_actions'][
my_prev_ship_pos_to_key[d2_pos]]
consider_action = my_action is not CONVERT and (
my_action == OPPOSITE_MAPPING[d2])
if consider_action:
# Final condition: the ship at the considered location
# no longer exists in the current step
other_ship_k = all_prev_ship_pos_to_key[
row_d2*grid_size+col_d2]
if other_ship_k not in all_ship_keys:
distance = DISTANCES[prev_row, prev_col][
row_d2, col_d2]
threat_distance = DISTANCES[row_d2, col_d2][
row_d1, col_d1]
# Use the distance of the opponent ship to the base
# rather than the collision distance since it is
# ambiguous where the ships collided
potential_ship_collisions.append((
distance, threat_distance,
nearest_prev_base_distance,
friendly_prev_nearest_base, observation['step'],
my_ship_position, row_d2, col_d2))
# if observation['step'] == 99:
# import pdb; pdb.set_trace()
# Potential collisions at distance 2 come in pairs
# of items in potential_ship_collisions must be even.
# Investigate if it is 0 without a ship collisions or >= 4
num_potential_collisions = len(potential_ship_collisions)
to_add_data = []
if num_potential_collisions == 0:
if not base_destroyed and observation['relative_step'] < 0.9:
pass
# Unexplained ship loss - likely due to an opponent self
# collision
elif num_potential_collisions <= 2:
# Either a single d1/d2 potential collisions or two potential
# d2 collisions. Either way: the collision data would be
# identical
to_add_data = potential_ship_collisions[0][:-3]
else:
# In case of disagreement in the distance of the collision:
# pick the lowest distance one
collision_distances = np.array([
pc[0] for pc in potential_ship_collisions])
to_add_data = potential_ship_collisions[
np.argmin(collision_distances)][:-3]
certain_data = False
if num_potential_collisions == 1:
ship_collision = potential_ship_collisions[0]
certain_data = ship_collision[5]
if ship_collision[5]:
history['my_zero_lost_ships_opponents'][
(ship_collision[6], ship_collision[7])] = player_id
if to_add_data:
history['raw_zero_halite_move_data'][player_id].append(tuple(
list(to_add_data) + [certain_data]))
# Infer the zero halite behavior as a function of distance to opponent
# base and distance to other zero halite ships
if history['raw_zero_halite_move_data'][player_id]:
zero_halite_data = np.array(history['raw_zero_halite_move_data'][
player_id])
aggregate_data = {}
for is_near_base in [False, True]:
for considered_distance in [0, 1, 2]:
relevant_rows = (zero_halite_data[:, 0] == max(
1, considered_distance))
if is_near_base:
relevant_rows &= (zero_halite_data[:, 2] <= near_base_distance)
else:
relevant_rows &= (zero_halite_data[:, 2] > near_base_distance)
if considered_distance < 2:
relevant_rows &= (zero_halite_data[:, 5] == 1)
num_relevant = relevant_rows.sum()
if num_relevant > max_recent_considered_relevant:
num_relevant = max_recent_considered_relevant
relevant_ids = np.where(relevant_rows)[0]
relevant_rows[
relevant_ids[:-(max_recent_considered_relevant)]] = 0
aggressive_relevant_count = (
relevant_rows & (zero_halite_data[:, 1] <= min(
1, considered_distance))).sum()
dict_k = str(is_near_base) + '_' + str(considered_distance)
dict_k_always_careful = dict_k + '_always_careful'
dict_k_real_count = dict_k + '_real_count'
dict_k_ever_risky = dict_k + '_ever_risky'
min_considered = min_considered_types[dict_k]
num_aggressive_added = max(0, min_considered-num_relevant)
if num_relevant == 0 and num_aggressive_added == 0:
num_relevant = 1
elif num_aggressive_added > 0:
num_aggressive_added = min_considered-num_relevant
num_relevant += num_aggressive_added
aggressive_relevant_count += num_aggressive_added
# if player_id == 3 and considered_distance == 0 and is_near_base:
# print(observation['step'], num_relevant, num_aggressive_added,
# aggressive_relevant_count)
# # if observation['step'] == 72:
# # import pdb; pdb.set_trace()
# # x=1
aggregate_data[dict_k] = aggressive_relevant_count/max(
1e-9, num_relevant)
aggregate_data[dict_k_always_careful] = (
aggressive_relevant_count == 0)
aggregate_data[dict_k_real_count] = (
num_relevant - num_aggressive_added)
aggregate_data[dict_k_ever_risky] = (
aggressive_relevant_count > num_aggressive_added)
if is_near_base:
# If an opponent is aggressive away from the base, they likely
# are near the base too
# Approach: assume the most aggressive of near and away from
# the base behavior when the considered ship is near the base
dict_k_away_base = str(False) + '_' + str(considered_distance)
dict_k_always_careful_away_base = dict_k_away_base + (
'_always_careful')
dict_k_ever_risky_away_base = dict_k_away_base + '_ever_risky'
aggregate_data[dict_k] = max(
aggregate_data[dict_k], aggregate_data[dict_k_away_base])
aggregate_data[dict_k_always_careful] = (
aggregate_data[dict_k_always_careful]) and aggregate_data[
dict_k_always_careful_away_base]
aggregate_data[dict_k_ever_risky] = (
aggregate_data[dict_k_ever_risky]) or aggregate_data[
dict_k_ever_risky_away_base]
history['zero_halite_move_behavior'][player_id] = aggregate_data
return history
def update_base_camping_strategy(
config, history, observation, env_observation, stacked_ships, env_obs_ids,
env_config, np_rng, continued_camping_bonus=0.2, corner_camping_patience=3,
other_camping_patience=5, max_non_unique_campers=2,
max_campers_per_base=2, play_safe_aggression_limit=1,
my_base_flooded_patience=5, flood_patience_buffer=2,
min_ships_to_consider_camping=5, camping_risk_phase_2_7_multiplier=0.00):
grid_size = stacked_ships.shape[1]
num_players = stacked_ships.shape[0]
my_ships_obs = env_observation.players[env_obs_ids[0]][2]
stacked_bases = np.stack(
[rbs[1] for rbs in observation['rewards_bases_ships']])
# flood_base_convert_threshold = my_base_flooded_patience + (
# min(3, stacked_bases[0].sum()))**2 - 1
halite_ships = np.stack([
rbs[3] for rbs in observation['rewards_bases_ships']]).sum(0)
halite_ships[stacked_ships.sum(0) == 0] = -1e-9
all_bases = stacked_bases.sum(0) > 0
player_ids = -1*np.ones((grid_size, grid_size), dtype=np.int)
for i in range(stacked_ships.shape[0]):
player_ids[stacked_ships[i]] = i
player_ids[stacked_bases[i]] = i
# if observation['step'] == 116:
# import pdb; pdb.set_trace()
if observation['step'] == 0:
history['camping_ships_strategy'] = {}
history['camping_ships_targets'] = {}
history['remaining_camping_budget'] = config['max_camper_ship_budget']
history['aggression_stage_opponents_camping'] = [
0 for _ in range(num_players)]
history['aggression_opponents_camping_counter'] = [
0 for _ in range(num_players)]
history['camping_phase_opponents'] = [{} for _ in range(num_players)]
history['camping_attack_opponent_budget'] = [2 for _ in range(num_players)]
history['camping_phase_2_details_opponents'] = [(0, 0) for _ in range(
num_players)]
history['camping_phase_3_4_ignore_threats_counter'] = [0 for _ in range(
num_players)]
history['base_deposit_data'] = [[] for _ in range(num_players)]
history['obs_base_camping_behavior'] = {}
history['attack_opponent_campers'] = {}
history['my_base_not_attacked_positions'] = []
history['my_camped_base_not_attacked_positions'] = []
history['base_camping_override_positions'] = np.zeros((
grid_size, grid_size), dtype=np.bool)
history['my_base_flooded_counter'] = {}
history['current_scores'] = np.zeros(num_players)
history['current_halite_sum'] = np.zeros(num_players)
else:
# Compute the current approximate player score
scores = np.array(
[rbs[0] for rbs in observation['rewards_bases_ships']])
my_score = scores[0]
num_my_ships = stacked_ships[0].sum()
prev_stacked_bases = np.stack(
[rbs[1] for rbs in history['prev_step']['observation'][
'rewards_bases_ships']])
halite_cargos = np.array(
[rbs[3].sum() for rbs in observation['rewards_bases_ships']])
obs_halite = np.maximum(0, observation['halite'])
collect_rate = env_config.collectRate
obs_halite[obs_halite < 1/collect_rate] = 0
opponent_bases = stacked_bases[1:].sum(0) > 0
opponent_base_counts = stacked_bases[1:].sum((1, 2))
num_all_opponent_bases = opponent_base_counts.sum()
base_counts = stacked_bases.sum((1, 2))
convert_cost = env_config.convertCost
spawn_cost = env_config.spawnCost
steps_remaining = env_config.episodeSteps-1-observation['step']
if steps_remaining < grid_size:
# Our ships may be returning - aggressively attack nearby campers
corner_camping_patience = 1
other_camping_patience = 1
obs_halite_sum = observation['halite'].sum()
base_value = min(convert_cost+spawn_cost,
steps_remaining*obs_halite_sum**0.6/(
10*base_counts.sum()+1e-9))
ship_counts = stacked_ships.sum((1, 2))
all_ship_count = ship_counts.sum()
ship_value = max(min(1, 2*(1-observation['relative_step']))*(
env_config.spawnCost), min(
env_config.spawnCost, (steps_remaining-1)*obs_halite_sum**0.6/(
all_ship_count+1e-9)))
# A base is only valuable if there are ships to return to them
base_score_counts = np.minimum(base_counts, 1+ship_counts/5)
current_scores = scores+halite_cargos+ship_value*ship_counts+(
base_value*np.sqrt(np.maximum(0, base_score_counts-1))) + (
convert_cost+ship_value)*(base_score_counts > 0)*min(
1, (steps_remaining-1)/20)
# print(observation['step'], ship_value, base_value)
history['current_scores'] = current_scores
history['current_halite_sum'] = scores+halite_cargos
base_camping_override_positions = np.zeros((
grid_size, grid_size), dtype=np.bool)
# Keep track of the deposit location of all opponents (used to decide
# what bases to attack)
for player_id in range(1, num_players):
env_obs_id = env_obs_ids[player_id]
player_obs = env_observation.players[env_obs_id]
prev_player_obs = history['prev_step']['env_observation'].players[
env_obs_id]
# For all ships that are at a base location and had halite in the
# previous turn: add it to the memory
base_locations = list(player_obs[1].values())
player_base_pos_to_key = {v: k for k, v in player_obs[1].items()}
for ship_k in player_obs[2]:
ship_position = player_obs[2][ship_k][0]
if ship_position in base_locations and ship_k in prev_player_obs[2]:
halite_deposited = prev_player_obs[2][ship_k][1]
if halite_deposited > 0:
history['base_deposit_data'][player_id].append((
player_base_pos_to_key[ship_position], observation['step']))
# Keep track of the non friendly zero halite behavior near all bases
base_locations = np.where(all_bases)
prev_base_camping_behavior = history['obs_base_camping_behavior']
obs_base_camping_behavior = {}
num_bases = base_locations[0].size
for base_id in range(num_bases):
base_row = base_locations[0][base_id]
base_col = base_locations[1][base_id]
base_k = (base_row, base_col)
base_player_id = player_ids[base_k]
not_my_base = base_player_id > 0
around_base_mask = ROW_COL_BOX_MAX_DISTANCE_MASKS[base_row, base_col, 1]
zero_halite_near_base_mask = edge_aware_square_subset_mask(
(player_ids != base_player_id) & (halite_ships == 0), base_row,
base_col, window=1, box=around_base_mask, grid_size=grid_size)
zero_halite_near_base_players = edge_aware_square_subset_mask(
player_ids, base_row, base_col, window=1, box=around_base_mask,
grid_size=grid_size)
if zero_halite_near_base_mask.sum() > 0:
if base_k in prev_base_camping_behavior:
prev_camping = prev_base_camping_behavior[base_k]
prev_zero_halite_near_base_mask = prev_camping[3]
prev_corner_camping_counter = prev_camping[4]
prev_other_camping_counter = prev_camping[5]
prev_zero_halite_near_base_players = prev_camping[6]
else:
prev_zero_halite_near_base_mask = np.zeros_like(
zero_halite_near_base_mask)
prev_corner_camping_counter = np.zeros(9)
prev_other_camping_counter = np.zeros(9)
prev_zero_halite_near_base_players = -1*np.ones(9)
# Count the number of zero halite opponents that stay at the same
# location (switching zero halite ships within the same team would also
# qualify here)
stay_near_base = prev_zero_halite_near_base_mask & (
zero_halite_near_base_mask) & (
prev_zero_halite_near_base_players == (
zero_halite_near_base_players ))
num_stay_near_base = stay_near_base.sum()
num_my_stay_near_base = (
stay_near_base & (zero_halite_near_base_players == 0)).sum()
# Inspect if there is any ship that statically remains at a base corner
stay_base_corner = (np.mod(np.arange(9), 2) == 1) & (
stay_near_base)
num_my_stay_near_base_corner = (
stay_base_corner & (zero_halite_near_base_players == 0)).sum()
other_camping_counter = prev_other_camping_counter
other_camping_counter[stay_near_base] += 1
other_camping_counter[~stay_near_base] = 0
if np.any(stay_base_corner):
corner_camping_counter = prev_corner_camping_counter
corner_camping_counter[stay_base_corner] += 1
corner_camping_counter[~stay_base_corner] = 0
attack_corner_camper = np.any(
corner_camping_counter >= corner_camping_patience)
# LEGEND:
# obs_base_camping_behavior[k] = (consider_for_camping_target,
# attack_corner_camper, attack_non_corner_camper,
# zero_halite_near_base_mask, corner_camping_counter,
# zero_halite_near_base_players)
obs_base_camping_behavior[base_k] = (
not_my_base and num_my_stay_near_base_corner > 0,
attack_corner_camper, False, zero_halite_near_base_mask,
corner_camping_counter, other_camping_counter,
zero_halite_near_base_players)
else:
attack_other_camper = np.any(
other_camping_counter >= other_camping_patience)
obs_base_camping_behavior[base_k] = (
not_my_base and ((num_stay_near_base-num_my_stay_near_base) < 2),
False, attack_other_camper, zero_halite_near_base_mask,
np.zeros(9), other_camping_counter,
zero_halite_near_base_players)
else:
obs_base_camping_behavior[base_k] = (
not_my_base, False, False, zero_halite_near_base_mask, np.zeros(9),
np.zeros(9), -1*np.ones(9))
history['obs_base_camping_behavior'] = obs_base_camping_behavior
# Update the opponent camper attack budget based on my planned opponent
# camper ship attacks
for k in history['attack_opponent_campers']:
if k in env_observation.players[env_obs_ids[0]][2]:
# My attacking ship is still alive - add it back to the attack budget
opponent_id = history['attack_opponent_campers'][k][4]
history['camping_attack_opponent_budget'][opponent_id] += 1
history['attack_opponent_campers'] = {}
# Decide on which campers I should attack or if I should create a new base
num_my_bases = stacked_bases[0].sum()
my_zero_halite_excluded_from_camping = np.zeros_like(stacked_ships[0])
# Exclude ships that were used for base defense in the previous step and
# ships located at any of my bases
for ship_k in list(set(
history['prev_step']['base_defense_keys'] + history[
'prev_step']['prev_base_defense_keys'])):
if ship_k in my_ships_obs:
ship_position = my_ships_obs[ship_k][0]
row, col = row_col_from_square_grid_pos(ship_position, grid_size)
my_zero_halite_excluded_from_camping[row, col] = 1
my_zero_halite_ships_pos = np.where(
(halite_ships == 0) & stacked_ships[0] & (
~my_zero_halite_excluded_from_camping) & (~stacked_bases[0]))
my_num_zero_halite_ships = my_zero_halite_ships_pos[0].size
history['my_base_not_attacked_positions'] = []
history['my_camped_base_not_attacked_positions'] = []
#######################
### DEFENSIVE LOGIC ###
#######################
opponent_ships = stacked_ships[1:].sum(0) > 0
num_opponent_zero_halite_ships = (
opponent_ships & (halite_ships == 0)).sum()
opponent_zero_halite_ships_pos = np.where(
opponent_ships & (halite_ships == 0))
# if observation['step'] == 315:
# import pdb; pdb.set_trace()
if num_my_bases > 0:
ship_pos_to_key = {}
for i in range(num_players):
ship_pos_to_key.update({
v[0]: k for k, v in env_observation.players[
env_obs_ids[i]][2].items()})
for base_k in obs_base_camping_behavior:
if stacked_bases[0, base_k[0], base_k[1]]:
# Only attack campers around my previous step main base
opp_camping_behavior = obs_base_camping_behavior[base_k]
if num_my_bases == 1 or (
base_k in history['prev_step']['non_abandoned_base_locations']):
if opp_camping_behavior[1] or opp_camping_behavior[2]:
my_score_rank = (current_scores >= current_scores[0]).sum()
# Loop over the opponent camping ships which have to be punished
# It is not a camper if the ship sits at an opponent base
offending_ship_flat_pos = np.where((
opp_camping_behavior[4] >= corner_camping_patience) | (
opp_camping_behavior[5] >= other_camping_patience))[0]
offending_ship_rows = np.mod(
base_k[0] + (offending_ship_flat_pos//3) - 1, grid_size)
offending_ship_cols = np.mod(
base_k[1] + np.mod(offending_ship_flat_pos, 3) - 1, grid_size)
for i in range(offending_ship_flat_pos.size):
opponent_row = offending_ship_rows[i]
opponent_col = offending_ship_cols[i]
opponent_id = player_ids[opponent_row, opponent_col]
opponent_score_rank = (
current_scores >= current_scores[opponent_id]).sum()
# import pdb; pdb.set_trace()
if ((my_score < convert_cost and num_my_ships > 4) or (
opponent_score_rank+my_score_rank <= 3) or (
my_score_rank == 1) or (
opponent_score_rank+my_score_rank == 4 and (
observation['relative_step'] < 0.4)) or (
history['camping_attack_opponent_budget'][
opponent_id] > 0)) and num_my_ships > 4 and (
not opponent_bases[opponent_row, opponent_col]):
# Attack the opponent if there is a zero halite ship nearby
my_zero_halite_distances = DISTANCES[
opponent_row, opponent_col][my_zero_halite_ships_pos]
my_zero_halite_at_base = (DISTANCES[base_k][
my_zero_halite_ships_pos] == 0)
if my_num_zero_halite_ships != 0:
# Attack the ship if I have a non base ship at distance
# <= 3 with the non base ship.
# Otherwise: attack the ship with some probability with the
# base ship
safe_attack_ships = np.where((
my_zero_halite_distances <= 3) & (
~my_zero_halite_at_base))[0]
defender_id = -1
# If it takes too long for a safe attack: attack from the
# base anyway
edge_attack_from_base_prob = (
opp_camping_behavior[4].max()-(
corner_camping_patience+2))/4
if safe_attack_ships.size > 0 and (
edge_attack_from_base_prob) < np_rng.uniform():
defender_id = safe_attack_ships[
np.where(my_zero_halite_distances[
safe_attack_ships] == (my_zero_halite_distances[
safe_attack_ships].min()))[0][0]]
else:
# Attack the camper with a probability so that that it is
# hard to model (losing the base is always worse).
if np_rng.uniform() < 0.5:
defender_id = np.where(my_zero_halite_distances == (
my_zero_halite_distances.min()))[0][0]
if defender_id >= 0:
defender_row = my_zero_halite_ships_pos[0][defender_id]
defender_col = my_zero_halite_ships_pos[1][defender_id]
defender_k = ship_pos_to_key[
defender_row*grid_size + defender_col]
opponent_k = ship_pos_to_key[
opponent_row*grid_size + opponent_col]
opponent_distance = my_zero_halite_distances[defender_id]
base_camping_override_positions[
opponent_row, opponent_col] = 1
attack_risk_threshold = 0 if opponent_distance > 2 else (
0.5 if opponent_distance == 1 else 0.05)
history['attack_opponent_campers'][defender_k] = (
opponent_row, opponent_col, 1e10, opponent_k,
opponent_id, opponent_distance, attack_risk_threshold)
my_zero_halite_excluded_from_camping[
defender_row, defender_col] = True
history['camping_attack_opponent_budget'][
opponent_id] -= 1
else:
history['my_base_not_attacked_positions'].append(base_k)
history['my_camped_base_not_attacked_positions'].append(
base_k)
else:
if opp_camping_behavior[1] or opp_camping_behavior[2]:
# Flag the base as bad (there is a camper present), and don't
# consider it when returning to a base
# import pdb; pdb.set_trace()
history['my_base_not_attacked_positions'].append(base_k)
history['my_camped_base_not_attacked_positions'].append(base_k)
# Identify if a base is jammed by opponent ships making it hard for
# me to return to the base
# Only consider zero halite opponents
base_row, base_col = base_k
if (opponent_ships & (halite_ships == 0)).sum() > 2:
potential_threat_rows = opponent_zero_halite_ships_pos[0]
potential_threat_cols = opponent_zero_halite_ships_pos[1]
south_dist = np.where(
potential_threat_rows >= base_row,
potential_threat_rows-base_row,
potential_threat_rows-base_row+grid_size)
vert_dist = np.where(south_dist <= grid_size//2, south_dist,
grid_size-south_dist)
east_dist = np.where(
potential_threat_cols >= base_col,
potential_threat_cols-base_col,
potential_threat_cols-base_col+grid_size)
horiz_dist = np.where(east_dist <= grid_size//2, east_dist,
grid_size-east_dist)
dist = horiz_dist+vert_dist
considered_distance_ids = dist <= 4 # 12 considered squares
if considered_distance_ids.sum() > 1:
# Check each quadrant for threats
north_threat_ids = (south_dist[
considered_distance_ids] > grid_size//2) & (
vert_dist[considered_distance_ids] >= horiz_dist[
considered_distance_ids])
north_threat_score = (
1/dist[considered_distance_ids][north_threat_ids]).sum()
south_threat_ids = (south_dist[
considered_distance_ids] < grid_size//2) & (
vert_dist[considered_distance_ids] >= horiz_dist[
considered_distance_ids])
south_threat_score = (1/dist[
considered_distance_ids][south_threat_ids]).sum()
east_threat_ids = (east_dist[
considered_distance_ids] < grid_size//2) & (
vert_dist[considered_distance_ids] <= horiz_dist[
considered_distance_ids])
east_threat_score = (1/dist[
considered_distance_ids][east_threat_ids]).sum()
west_threat_ids = (east_dist[
considered_distance_ids] > grid_size//2) & (
vert_dist[considered_distance_ids] <= horiz_dist[
considered_distance_ids])
west_threat_score = (1/dist[
considered_distance_ids][west_threat_ids]).sum()
threat_scores = np.array([
north_threat_score, south_threat_score, east_threat_score,
west_threat_score])
min_threat_score = threat_scores.min()
else:
min_threat_score = 0
current_flood_counter = history[
'my_base_flooded_counter'].get(base_k, 0)
# Linear model on the expected min threat score of a sim study
expected_min_threat_score = 3.412e-03 - 1.047e-03*(
num_opponent_zero_halite_ships) + 8.706e-05*(
num_opponent_zero_halite_ships**2) - 2.878e-07*(
num_opponent_zero_halite_ships**3)
# print(num_opponent_zero_halite_ships, expected_min_threat_score)
current_flood_counter = max(0, min(
my_base_flooded_patience+flood_patience_buffer,
current_flood_counter+min_threat_score-expected_min_threat_score)
)
history['my_base_flooded_counter'][base_k] = (
current_flood_counter)
# print(observation['step'], threat_counts, current_flood_counter)
# import pdb; pdb.set_trace()
# x=1
if current_flood_counter >= my_base_flooded_patience and not (
base_k in history['my_base_not_attacked_positions']):
history['my_base_not_attacked_positions'].append(base_k)
# Delete the base flooded counter for destroyed bases
destroyed_bases = ~stacked_bases[0] & (
history['prev_step']['stacked_bases'][0])
if np.any(destroyed_bases):
destroyed_base_pos = np.where(destroyed_bases)
for destroyed_base_id in range(destroyed_base_pos[0].size):
destroyed_base_row = destroyed_base_pos[0][destroyed_base_id]
destroyed_base_col = destroyed_base_pos[1][destroyed_base_id]
destroyed_base_k = (destroyed_base_row, destroyed_base_col)
if destroyed_base_k in history['my_base_flooded_counter']:
del history['my_base_flooded_counter'][destroyed_base_k]
# Reset the 'my_camped_base_not_attacked_positions' for the bases that are
# flooded if all bases are flooded or camped and when I have a large number
# of bases to avoid creating too many bases
if (len(history['my_base_not_attacked_positions']) - len(
history['my_camped_base_not_attacked_positions'])) > 2 and len(
history['my_base_not_attacked_positions']) > 3:
history['my_base_not_attacked_positions'] = copy.copy(
history['my_camped_base_not_attacked_positions'])
########################
### AGGRESSIVE LOGIC ###
########################
if (observation['relative_step'] >= config['relative_step_start_camping']):
remaining_camping_budget = history['remaining_camping_budget']
prev_camping_ships_targets = history['camping_ships_targets']
number_already_camping = len(prev_camping_ships_targets)
camping_ships_strategy = {}
camping_ships_targets = {}
aggression_stage_opponents = copy.copy(
history['aggression_stage_opponents_camping'])
aggression_camping_counter = copy.copy(
history['aggression_opponents_camping_counter'])
camping_phase_opponents = copy.copy(history['camping_phase_opponents'])
prev_camping_phase_opponents = copy.copy(
history['camping_phase_opponents'])
prev_opponent_bases = history['prev_step']['stacked_bases'][1:].sum(0) > 0
my_zero_lost_ships_opponents = history['my_zero_lost_ships_opponents']
total_opponent_bases_count = stacked_bases.sum((1, 2))[1:]
max_camping_budget_this_step = int(observation['relative_step']*(
config['max_camper_ship_budget']*2))
if remaining_camping_budget >= 1 or (number_already_camping > 0):
# Aim higher than the current ranking: being behind is only counted half
# as much as being ahead to determine who to attack
score_diffs = current_scores[0]-current_scores[1:]
win_preferred_score_diff = np.abs(score_diffs)
win_preferred_score_diff[score_diffs < 0] /= 2
opponent_scores_scaled = 1-win_preferred_score_diff/max(
100, steps_remaining)/15-1e2*(
(scores[1:] < env_config.spawnCost) & (ship_counts[1:] == 0))
# Always keep targeting the number two when I am the number one
# Also keep targeting the number one if I am the number two and the
# number three is far behind
my_score_rank = (current_scores >= current_scores[0]).sum()
play_safe_aggression_limits = np.ones(num_players)*(
play_safe_aggression_limit)
if my_score_rank == 1 or (my_score_rank == 2 and np.all(
opponent_scores_scaled <= 0)):
argmax_id = np.argmax(opponent_scores_scaled)
opponent_scores_scaled[argmax_id] = max(
opponent_scores_scaled[argmax_id], 1e-9)
play_safe_aggression_limits[argmax_id+1] += 50
# Don't camp at an opponent that has more bases than the camping budget
# for this step
opponent_scores_scaled[
total_opponent_bases_count > min(
max_camping_budget_this_step, remaining_camping_budget)] = -100
if num_all_opponent_bases > 0:
# Compute target scores for each of the opponent bases
base_camping_scores = {}
for player_id in range(1, num_players):
opponent_bases = stacked_bases[player_id]
env_obs_id = env_obs_ids[player_id]
player_obs = env_observation.players[env_obs_id]
current_base_keys = list(player_obs[1].keys())
if opponent_bases.sum():
num_opponent_bases = opponent_bases.sum()
opponent_base_positions = np.where(opponent_bases)
deposit_data = np.array(history['base_deposit_data'][player_id])
# Delete old bases from the deposit data and consider at most the
# most recent 20 deposits
if deposit_data.size > 0:
actual_base_rows = np.array([
d in current_base_keys for d in deposit_data[:, 0]])
deposit_data = deposit_data[actual_base_rows][-20:]
player_base_pos_to_key = {v: k for k, v in player_obs[1].items()}
for base_id in range(num_opponent_bases):
base_row = opponent_base_positions[0][base_id]
base_col = opponent_base_positions[1][base_id]
base_key = player_base_pos_to_key[base_row*grid_size+base_col]
if deposit_data.size == 0:
relative_deposit_base_score = 1
else:
if opponent_scores_scaled[player_id-1] < 0:
relative_deposit_base_score = 1
else:
relative_deposit_base_score = min(1, (deposit_data[:, 0] == (
base_key)).mean() + 1e-2)
should_consider_camp_penalty = -1*int(
not obs_base_camping_behavior[(base_row, base_col)][0])
base_camping_score = opponent_scores_scaled[player_id-1]*(
relative_deposit_base_score)+should_consider_camp_penalty
base_camping_scores[(base_row, base_col)] = base_camping_score
# print(observation['step'], base_camping_scores)
all_opponent_bases = list(base_camping_scores.keys())
# Increment the score for the bases where we are already camping to
# avoid switching targets too often
prev_env_observation = history['prev_step']['env_observation']
my_prev_ships_obs = prev_env_observation.players[env_obs_ids[0]][2]
delete_keys = []
for ship_k in prev_camping_ships_targets:
ship_still_alive = ship_k in my_ships_obs
# Incorporate the past step opponent behavior to infer what phase of
# the camping each opponent is in (see details on the phases below)
base_target = prev_camping_ships_targets[ship_k]
prev_pos = my_prev_ships_obs[ship_k][0]
prev_row, prev_col = row_col_from_square_grid_pos(
prev_pos, grid_size)
prev_base_distance = DISTANCES[prev_row, prev_col][base_target]
opponent_id = np.where(prev_stacked_bases[
:, base_target[0], base_target[1]])[0][0]
opponent_prev_camping_phase = prev_camping_phase_opponents[
opponent_id][base_target]
aggression_already_added = False
if not ship_still_alive:
if (prev_row, prev_col) in my_zero_lost_ships_opponents:
aggression_occurred = my_zero_lost_ships_opponents[
(prev_row, prev_col)] == opponent_id
if aggression_occurred:
aggression_already_added = True
aggression_camping_counter[opponent_id] += 1
if aggression_camping_counter[opponent_id] >= (
play_safe_aggression_limits[opponent_id]):
aggression_stage_opponents[opponent_id] = 2
camping_phase_opponents[opponent_id][base_target] = 7
if prev_base_distance <= 2:
# Possible transitions (on a per-opponent level):
# - 2 -> 3: My ship does not get attacked at least M times and
# the opponent has returned at least N ships to the
# base and there is a zero halite square right next to
# the base
# - 3 -> 4: My ship is not attacked when there are > 1 opponent
# zero halite ships that can get to me
# - 3 -> 5: The opponent is ignoring my camper and stil returns
# to the base
# - 4 -> 5: The opponent is ignoring my camper and stil returns
# to the base
# - 2 -> 6: My camping ship is not aggressively attacked but
# there is no zero halite square at distance 1 of the
# base
# - 6 -> 7: My camping ship is aggressively attacked
# - 2 -> 7: My camping ship is aggressively attacked
if opponent_prev_camping_phase == 2 and (
camping_phase_opponents[opponent_id][base_target] == 2):
(num_halite_ships_returned, non_aggression_counter) = history[
'camping_phase_2_details_opponents'][opponent_id]
# Update the number of opponent non zero halite ships returned
# to the base
opponent_pos_to_ship = {v[0]: k for k, v in (
env_observation.players[env_obs_ids[opponent_id]][2]).items()}
opponent_prev_ships_obs = prev_env_observation.players[
env_obs_ids[opponent_id]][2]
target_base_pos = base_target[0]*grid_size + base_target[1]
if target_base_pos in opponent_pos_to_ship:
at_base_opponent_ship_k = opponent_pos_to_ship[target_base_pos]
if at_base_opponent_ship_k in opponent_prev_ships_obs:
num_halite_ships_returned += int(opponent_prev_ships_obs[
at_base_opponent_ship_k][1] > 0)
# Update the non aggression counter.
aggression_occurred = (prev_pos in opponent_pos_to_ship) and (
opponent_prev_ships_obs[
opponent_pos_to_ship[prev_pos]][1] == 0)
# If I lost my ship, it was likely due to an aggression
if not ship_still_alive and not aggression_occurred and (
prev_row, prev_col) in my_zero_lost_ships_opponents:
aggression_occurred = my_zero_lost_ships_opponents[
(prev_row, prev_col)] == opponent_id
# If an aggression occurred: move to phase 7.
if aggression_occurred:
if not aggression_already_added:
aggression_camping_counter[opponent_id] += 1
if aggression_camping_counter[opponent_id] >= (
play_safe_aggression_limits[opponent_id]):
aggression_stage_opponents[opponent_id] = 2
camping_phase_opponents[opponent_id][base_target] = 7
else:
non_aggression_counter += 1
# If the no aggression and ship return thresholds get exceeded:
# move to phase 3 or 6
if non_aggression_counter >= 10 and (
num_halite_ships_returned >= 5):
# Figure out if there is a non-zero halite square to camp at
# right next to the base to decide on the next camping phase
dist_1_zero_halite = (obs_halite == 0) & (DISTANCES[
base_target] == 1)
if aggression_stage_opponents[opponent_id] != 2:
aggression_stage_opponents[opponent_id] = 1
if dist_1_zero_halite.sum() > 0:
camping_phase_opponents[opponent_id][base_target] = 3
else:
camping_phase_opponents[opponent_id][base_target] = 6
history['camping_phase_2_details_opponents'][opponent_id] = (
num_halite_ships_returned, non_aggression_counter)
elif opponent_prev_camping_phase == 6:
# Some nice code duplication
# Update the number of opponent non zero halite ships returned
# to the base
opponent_pos_to_ship = {v[0]: k for k, v in (
env_observation.players[env_obs_ids[opponent_id]][2]).items()}
opponent_prev_ships_obs = prev_env_observation.players[
env_obs_ids[opponent_id]][2]
aggression_occurred = (prev_pos in opponent_pos_to_ship) and (
opponent_prev_ships_obs[
opponent_pos_to_ship[prev_pos]][1] == 0)
# If I lost my ship, it was likely due to an aggression
if not ship_still_alive and not aggression_occurred and (
prev_row, prev_col) in my_zero_lost_ships_opponents:
aggression_occurred = my_zero_lost_ships_opponents[
(prev_row, prev_col)] == opponent_id
# If an aggression occurred: move to phase 7.
if aggression_occurred:
# import pdb; pdb.set_trace()
camping_phase_opponents[opponent_id][base_target] = 7
elif opponent_prev_camping_phase in [3, 4]:
# If I remain at a zero halite square at a distance of 1 of the
# target and the opponent repeatedly ignores my threat: go to
# phase 5
ignore_camping_threats_counter = history[
'camping_phase_3_4_ignore_threats_counter'][opponent_id]
if ship_still_alive:
ship_position = my_ships_obs[ship_k][0]
row, col = row_col_from_square_grid_pos(
ship_position, grid_size)
if prev_row == row and prev_col == col and obs_halite[
row, col] == 0 and (prev_base_distance == 1):
opponent_pos_to_ship = {v[0]: k for k, v in (
env_observation.players[
env_obs_ids[opponent_id]][2]).items()}
opponent_prev_ships_obs = prev_env_observation.players[
env_obs_ids[opponent_id]][2]
target_base_pos = base_target[0]*grid_size + base_target[1]
if target_base_pos in opponent_pos_to_ship:
at_base_opponent_ship_k = opponent_pos_to_ship[
target_base_pos]
if at_base_opponent_ship_k in opponent_prev_ships_obs:
if opponent_prev_ships_obs[
at_base_opponent_ship_k][1] > 0:
ignore_camping_threats_counter += 1
if ignore_camping_threats_counter >= 3*0:
camping_phase_opponents[opponent_id][base_target] = 5
else:
# If a successful aggression occurred: move to phase 7.
camping_phase_opponents[opponent_id][base_target] = 7
history['camping_phase_3_4_ignore_threats_counter'][
opponent_id] = ignore_camping_threats_counter
if ship_still_alive:
if base_target in all_opponent_bases:
base_camping_scores[base_target] += continued_camping_bonus
else:
# Delete the camping ship from the dict if it was destroyed
# Subtract half a ship from the budget if I ended up attacking a
# base
# Subtract a full ship if I used the ship to construct a new base
delete_keys.append(ship_k)
number_already_camping -= 1
my_prev_move = history['prev_step']['my_ship_actions'][ship_k]
my_prev_row, my_prev_col = row_col_from_square_grid_pos(
history['prev_step']['env_observation'].players[
env_obs_ids[0]][2][ship_k][0], grid_size)
if my_prev_move == CONVERT:
remaining_camping_budget -= 1
else:
my_moved_row, my_moved_col = move_ship_row_col(
my_prev_row, my_prev_col, my_prev_move, grid_size)
if not prev_opponent_bases[my_moved_row, my_moved_col]:
remaining_camping_budget -= 1/2
# Move to stage 7 after exceeding the allowable aggression count
if aggression_camping_counter[opponent_id] >= (
play_safe_aggression_limits[opponent_id]):
camping_phase_opponents[opponent_id][base_target] = 7
for del_ship_k in delete_keys:
del prev_camping_ships_targets[del_ship_k]
# Camping ships participate in opponent hunts depending on the phase
max_campers_assigned = min([
max_camping_budget_this_step,
np.floor(remaining_camping_budget),
100*int(opponent_scores_scaled.max() > 0),
number_already_camping+100*int(
config['start_camp_if_not_winning'] or np.all(
(current_scores[1:] <= (current_scores[0] - 2*spawn_cost)))),
])
num_interesting_bases = (
np.array(list(base_camping_scores.values())) > 0).sum()
num_campers_assigned = int(min([
num_all_opponent_bases + max_non_unique_campers,
max(number_already_camping, max_campers_assigned),
my_num_zero_halite_ships-my_zero_halite_excluded_from_camping.sum(),
num_interesting_bases + max_non_unique_campers,
100*int(num_my_ships >= min_ships_to_consider_camping),
]))
# Assign the campers to the top identified opponent bases
camping_ships_targets = {}
if num_campers_assigned > 0 and num_all_opponent_bases > 0 and (
num_interesting_bases > 0):
my_ship_pos_to_key = {v[0]: k for k, v in my_ships_obs.items()}
my_zero_halite_ship_positions = np.where(
stacked_ships[0] & (halite_ships == 0) & (
~my_zero_halite_excluded_from_camping))
target_pos_scores = np.array([list(k) + [v] for k, v in (
base_camping_scores.items())])
target_pos_scores = np.concatenate([np.zeros(
(target_pos_scores.shape[0], 1)), target_pos_scores], 1)
for score_id in range(target_pos_scores.shape[0]):
base_row = int(target_pos_scores[score_id, 1])
base_col = int(target_pos_scores[score_id, 2])
opponent_id = np.where(stacked_bases[
:, base_row, base_col])[0][0]
if not (base_row, base_col) in camping_phase_opponents[
opponent_id]:
initial_phase = 2 if (
aggression_stage_opponents[opponent_id] <= 1) else 7
camping_phase_opponents[opponent_id][(base_row, base_col)] = (
initial_phase)
target_pos_scores[score_id, 0] = camping_phase_opponents[
opponent_id][(base_row, base_col)]
target_rows = np.argsort(-target_pos_scores[:, -1])[
:num_campers_assigned]
consider_non_unique_base_attack = np.any(
target_pos_scores[target_rows, -1] <= 0) or target_rows.size < (
num_campers_assigned)
if consider_non_unique_base_attack:
# Only continue if there is a target opponent which is in camping
# phase 6 or 7, and the number of assigned campers is strictly
# greater than the number of positive base camping scores.
# Update num_campers_assigned to the number of positive base
# camping scores if I don't find such a base
target_rows = target_rows[target_pos_scores[target_rows, -1] > 0]
num_valid_targeted_bases = target_rows.size
num_unassigned_campers = num_campers_assigned-(
num_valid_targeted_bases)
bases_ids_that_can_take_more_campers = target_rows[
target_pos_scores[target_rows, 0] >= 6]
num_can_add_bases_multiple_campers = (
bases_ids_that_can_take_more_campers.size)
if num_can_add_bases_multiple_campers > 0:
max_add_iterations = max_campers_per_base-1
add_iteration = 0
while num_unassigned_campers > 0 and (
add_iteration < max_add_iterations):
num_added = min(num_unassigned_campers,
num_can_add_bases_multiple_campers)
target_rows = np.concatenate([
target_rows, bases_ids_that_can_take_more_campers[
:num_added]])
num_unassigned_campers -= num_added
add_iteration += 1
num_campers_assigned = target_rows.size
else:
num_campers_assigned = num_valid_targeted_bases
# First handle the bases where we already have a camper to avoid
# releasing the pressure temporarily
if num_campers_assigned > 1:
all_prev_camping_bases = list(prev_camping_ships_targets.values())
already_camping = np.zeros(num_campers_assigned, dtype=np.bool)
for i in range(num_campers_assigned):
considered_row = target_rows[i]
base_row = int(target_pos_scores[considered_row, 1])
base_col = int(target_pos_scores[considered_row, 2])
camp_prev_step = (base_row, base_col) in all_prev_camping_bases
near_my_zh_ship = ((DISTANCES[base_row, base_col] <= 2) & (
halite_ships == 0) & (stacked_ships[0])).sum()
already_camping[i] = camp_prev_step and (near_my_zh_ship > 0)
target_rows = np.concatenate([target_rows[already_camping],
target_rows[~already_camping]])
camping_ships_targets_positions = {}
for target_id in range(num_campers_assigned):
target_row = target_rows[target_id]
# Compute the distance to all my zero halite ships and pick the
# closest to camp out
base_row = int(target_pos_scores[target_row, 1])
base_col = int(target_pos_scores[target_row, 2])
zero_halite_base_distances = DISTANCES[base_row, base_col][
my_zero_halite_ship_positions]
best_ship_id = np.argmin(zero_halite_base_distances)
camper_ship_row = my_zero_halite_ship_positions[0][best_ship_id]
camper_ship_col = my_zero_halite_ship_positions[1][best_ship_id]
ship_camper_k = my_ship_pos_to_key[
camper_ship_row*grid_size+camper_ship_col]
camping_ships_targets[ship_camper_k] = (base_row, base_col)
camping_ships_targets_positions[(base_row, base_col)] = (
camping_ships_targets_positions.get((base_row, base_col), [])) +(
[(camper_ship_row, camper_ship_col)])
# Delete the selected ship from my_zero_halite_ship_positions so
# that it does not get assigned twice
remaining_rows = np.delete(my_zero_halite_ship_positions[0],
best_ship_id)
remaining_cols = np.delete(my_zero_halite_ship_positions[1],
best_ship_id)
my_zero_halite_ship_positions = (remaining_rows, remaining_cols)
# Camping strategy - always risky so losing a ship is fine
# Phase 1: Aggressively navigate to the proximity of the base
# Navigate cautiously once I am at a distance 2 of the base, since the
# target opponent may start to get aggressive here.
# The subsequent phases are computed on a per-opponent basis.
# Phase 2: Aim at a static zero halite corner or circle around a base
# as long as not many opponent returns to the base. Shy away when a
# zero halite opponent ship threatens me.
# Phase 3: Aim at a zero halite square right next to the base but move
# away when > 1 zero halite ships threaten me.
# Phase 4: aim at a zero halite square right next to the base and do
# not move away.
# Phase 5: If my camper gets ignored: attack the base when it can not
# be protected.
# Phase 6: Aggressively circle around the base
# Phase 7: Keep circling annoyingly around the base but do so in a safe
# way.
for ship_k in camping_ships_targets:
target_base_row, target_base_col = camping_ships_targets[ship_k]
target_base_loc_tuple = (target_base_row, target_base_col)
ship_position = my_ships_obs[ship_k][0]
row, col = row_col_from_square_grid_pos(ship_position, grid_size)
current_base_distance = DISTANCES[row, col][
target_base_row, target_base_col]
opponent_id = np.where(stacked_bases[
:, target_base_row, target_base_col])[0][0]
camping_phase = camping_phase_opponents[opponent_id][(
target_base_row, target_base_col)]
zeros_grid_mask = np.zeros((grid_size, grid_size))
if current_base_distance > 2:
# Phase 1
# Aim towards the base neighborhood and prefer squares that are
# closer to my current square
collect_override_addition = 2e4/(DISTANCES[
target_base_row, target_base_col]+1)
collect_override_addition[target_base_row, target_base_col] = -1e5
collect_override_addition[row, col] = -1e5
# Increase the 0.0 to encourage more risky behavior when navigating
# towards an opponent base
camping_ships_strategy[ship_k] = (
0.01, collect_override_addition, zeros_grid_mask,
current_base_distance > 3, False, target_base_loc_tuple)
else:
dist_1_zero_halite = (obs_halite == 0) & (DISTANCES[
target_base_row, target_base_col] == 1)
if dist_1_zero_halite.sum() == 0 and camping_phase in [
3, 4, 5]:
camping_phase = 6
if camping_phase in [2, 6, 7]:
# Passively select targets around the base in phase 2 and 7.
# Set the targets aggressively in phase 6
# Avoid my other zero halite ships next to the base
target_box = ROW_COL_BOX_MAX_DISTANCE_MASKS[
target_base_row, target_base_col, 1]
collect_override_addition = 2e4*target_box
collect_override_addition[target_base_row, target_base_col] = (
-1e5)
if obs_halite[row, col] > 0:
collect_override_addition[row, col] = -1e5
# Prefer box corners (top left, top righ, bottom left, bottom
# right) that have an opposite corner with no halite
target_box_pos = np.where(target_box)
top_left = (target_box_pos[0][0], target_box_pos[1][0])
top_left_zero = obs_halite[top_left] == 0
top_right = (target_box_pos[0][2], target_box_pos[1][2])
top_right_zero = obs_halite[top_right] == 0
bottom_left = (target_box_pos[0][6], target_box_pos[1][6])
bottom_left_zero = obs_halite[bottom_left] == 0
bottom_right = (target_box_pos[0][8], target_box_pos[1][8])
bottom_right_zero = obs_halite[bottom_right] == 0
if top_left_zero and bottom_right_zero:
collect_override_addition[top_left] += 7e4
collect_override_addition[bottom_right] += 7e4
if top_right_zero and bottom_left_zero:
collect_override_addition[top_right] += 7e4
collect_override_addition[bottom_left] += 7e4
# Get the nearby mask of other zero halite ships so that my
# ships that camp at the same base stay out of each other's way
subtract_mask = np.zeros_like(collect_override_addition)
num_this_base_campers = len(camping_ships_targets_positions[(
target_base_row, target_base_col)])
my_ship_at_opposite_edge = False
for other_row, other_col in camping_ships_targets_positions[(
target_base_row, target_base_col)]:
if other_row != row or other_col != col:
base_row_diff = other_row-target_base_row
base_col_diff = other_col-target_base_col
ship_row_dir = np.sign(base_row_diff)
ship_col_dir = np.sign(base_col_diff)
row_central_mask = np.mod(target_base_row +
3*ship_row_dir, grid_size)
col_central_mask = np.mod(target_base_col +
3*ship_col_dir, grid_size)
other_row_central_mask = np.mod(target_base_row -
3*ship_row_dir, grid_size)
other_col_central_mask = np.mod(target_base_col -
3*ship_col_dir, grid_size)
mask_ship_dir = ROW_COL_BOX_MAX_DISTANCE_MASKS[
row_central_mask, col_central_mask, 3]
mask_other_ship_dir = ROW_COL_BOX_MAX_DISTANCE_MASKS[
other_row_central_mask, other_col_central_mask, 3]
subtract_mask += (2e3*(mask_ship_dir * DISTANCE_MASKS[
other_row, other_col] + ROW_COL_BOX_MAX_DISTANCE_MASKS[
other_row, other_col, 2]) - 1e4*mask_other_ship_dir)*(
DISTANCE_MASKS[row, col]**0.1)
# Very high bonus for camping out at opposite corners or a
# zero halite square next to the base
if np.abs(base_row_diff) == 1 and np.abs(base_col_diff) == 1:
opposite_corner_row = np.mod(
target_base_row - base_row_diff, grid_size)
opposite_corner_col = np.mod(
target_base_col - base_col_diff, grid_size)
# When I block an opponent with two static ships at zero
# halite corners: keep them there!
my_ship_at_opposite_edge = my_ship_at_opposite_edge or (
row == opposite_corner_row and (
col == opposite_corner_col))
subtract_mask[opposite_corner_row, opposite_corner_col] -=(
1e6)
collect_override_addition -= subtract_mask
# if observation['step'] == 107:
# import pdb; pdb.set_trace()
# Take more risky actions when I have > 1 campers
risk_threshold = camping_risk_phase_2_7_multiplier*(
num_this_base_campers-1) if (camping_phase in [2, 7]) else 0.1
consider_ship_other_tactics = not my_ship_at_opposite_edge
camping_ships_strategy[ship_k] = (
risk_threshold, collect_override_addition, zeros_grid_mask,
consider_ship_other_tactics, consider_ship_other_tactics,
target_base_loc_tuple)
elif camping_phase in [3, 4, 5]:
# Select a zero halite target right next to the base to camp
# The aggression level depends on the number of nearby zero
# halite ships and the camping phase
# Only attack the base when it is not protected in phase 5
base_protected = (DISTANCES[row, col][
target_base_row, target_base_col] > 1) or ((stacked_ships[
opponent_id]) & (halite_ships == 0) & (
DISTANCES[target_base_row, target_base_col] <= 1)
).sum() > 0
collect_override_addition = np.zeros((grid_size, grid_size))
if base_protected or camping_phase != 5:
target_camp_positions = np.where(dist_1_zero_halite)
for target_camp_id in range(target_camp_positions[0].size):
target_camp_row = target_camp_positions[0][target_camp_id]
target_camp_col = target_camp_positions[1][target_camp_id]
collect_override_addition += 2e4/((DISTANCES[
target_camp_row, target_camp_col]+1)**2)
collect_override_addition *= (DISTANCE_MASKS[row, col]**0.2)
collect_override_addition[
target_base_row, target_base_col] = -1e5
if obs_halite[row, col] > 0:
collect_override_addition[row, col] = -1e5
num_zero_halite_threats = ((stacked_ships[1:].sum(0) > 0) & (
halite_ships == 0) & (DISTANCES[row, col] == 1)).sum()
risk_threshold = 0.0 if (
camping_phase == 3 and num_zero_halite_threats > 1) else 0.1
camping_ships_strategy[ship_k] = (
risk_threshold, collect_override_addition, zeros_grid_mask,
False, camping_phase == 5, target_base_loc_tuple)
else:
# Successfully attack the base
print("Aggressively attacking base", target_base_row,
target_base_col, observation['step'])
collect_attack_addition = 1e5*(
DISTANCES[target_base_row, target_base_col] == 0)
camping_ships_strategy[ship_k] = (
0.0, collect_override_addition,
collect_attack_addition, False, True, target_base_loc_tuple)
history['camping_ships_targets'] = camping_ships_targets
history['camping_ships_strategy'] = camping_ships_strategy
history['remaining_camping_budget'] = remaining_camping_budget
history['aggression_stage_opponents_camping'] = aggression_stage_opponents
history['aggression_opponents_camping_counter'] = (
aggression_camping_counter)
history['camping_phase_opponents'] = camping_phase_opponents
history['base_camping_override_positions'] = (
base_camping_override_positions)
return history
def update_opponent_ships_move_directions(
history, observation, env_observation, env_obs_ids):
prev_step_opponent_ship_moves = {}
if observation['step'] > 0:
prev_env_players = history['prev_step']['env_observation'].players
num_players = len(prev_env_players)
grid_size = observation['halite'].shape[0]
for player_id in range(1, num_players):
env_obs_id = env_obs_ids[player_id]
player_ships = env_observation.players[env_obs_id][2]
prev_player_ships = prev_env_players[env_obs_id][2]
for ship_k in player_ships:
if ship_k in prev_player_ships:
prev_row, prev_col = row_col_from_square_grid_pos(
prev_player_ships[ship_k][0], grid_size)
row, col = row_col_from_square_grid_pos(
player_ships[ship_k][0], grid_size)
prev_action = get_dir_from_target(
prev_row, prev_col, row, col, grid_size)[0]
if prev_action is not None:
prev_step_opponent_ship_moves[ship_k] = prev_action
history['prev_step_opponent_ship_moves'] = prev_step_opponent_ship_moves
return history
def update_cycle_counters(
config, history, observation, player_obs, env_observation, env_obs_ids):
num_players = len(observation['rewards_bases_ships'])
stacked_bases = np.stack(
[rbs[1] for rbs in observation['rewards_bases_ships']])
all_bases = stacked_bases.sum(0) > 0
grid_size = all_bases.shape[0]
if observation['step'] == 0:
history['ship_action_cycle_counter'] = {}
history['avoid_cycle_actions'] = {}
history['opponent_cycle_counters'] = [{} for _ in range(num_players-1)]
history['empty_or_cycled_positions'] = np.zeros(
(grid_size, grid_size), dtype=np.bool)
history['cycled_position_counts'] = -1*np.ones((grid_size, grid_size))
history['opponent_ship_pos_to_key'] = {}
elif config['avoid_cycles'] > 0:
cycle_counters = history['ship_action_cycle_counter']
avoid_cycle_actions = {}
prev_ship_actions = history['prev_step']['my_ship_actions']
prev_step_rescue_ships = history['prev_step']['ships_on_rescue_mission']
opponent_ship_pos_to_key = {}
# Update the cycles and according cycle counters for all my ships that
# are still alive
for ship_k in prev_ship_actions:
if not ship_k in player_obs[2]:
# The ship died or was converted - delete it from the cycle data
if ship_k in cycle_counters:
del cycle_counters[ship_k]
else:
# If the ship is new: add the action and default counter to the cycle
# counters dict
prev_action = prev_ship_actions[ship_k]
if not ship_k in cycle_counters:
cycle_counters[ship_k] = (prev_action, -1, 0)
else:
prev_a_min_1, prev_a_min_2, cycle_count = cycle_counters[ship_k]
if cycle_count > 0 and (prev_a_min_2 != prev_action):
cycle_count = 0
cycle_counters[ship_k] = (prev_action, prev_a_min_1, cycle_count+1)
ship_halite = player_obs[2][ship_k][1]
cycle_limit = 12 if ship_halite > 0 else 20
if cycle_count > cycle_limit:
# Avoid the action if the action is not a rescue action and the
# ship is not near a base
row, col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
near_base = np.any(all_bases[ROW_COL_MAX_DISTANCE_MASKS[
row, col, 2]])
if not ship_k in prev_step_rescue_ships and not near_base:
avoid_cycle_actions[ship_k] = prev_a_min_1
history['ship_action_cycle_counter'] = cycle_counters
history['avoid_cycle_actions'] = avoid_cycle_actions
if observation['step'] > 0:
# Update the opponent cycle counters
prev_env_players = history['prev_step']['env_observation'].players
for player_id in range(1, num_players):
cycle_counters = history['opponent_cycle_counters'][player_id-1]
env_obs_id = env_obs_ids[player_id]
# Update the cycles and according cycle counters for all opponnent ships
# that are still alive
player_ships = env_observation.players[env_obs_id][2]
prev_player_ships = prev_env_players[env_obs_id][2]
grid_size = observation['halite'].shape[0]
for ship_k in prev_player_ships:
if not ship_k in player_ships:
# The ship died or was converted - delete it from the cycle data
if ship_k in cycle_counters:
del cycle_counters[ship_k]
else:
# If the ship is new: add the action and default counter to the cycle
# counters dict
prev_row, prev_col = row_col_from_square_grid_pos(
prev_player_ships[ship_k][0], grid_size)
row, col = row_col_from_square_grid_pos(
player_ships[ship_k][0], grid_size)
opponent_ship_pos_to_key[(row, col)] = ship_k
prev_action = get_dir_from_target(
prev_row, prev_col, row, col, grid_size)
if not ship_k in cycle_counters:
cycle_counters[ship_k] = (prev_action, -1, 0)
else:
prev_a_min_1, prev_a_min_2, cycle_count = cycle_counters[ship_k]
if cycle_count > 0 and (prev_a_min_2 != prev_action):
cycle_count = 0
cycle_counters[ship_k] = (prev_action, prev_a_min_1, cycle_count+1)
history['opponent_cycle_counters'][player_id-1] = cycle_counters
history['opponent_ship_pos_to_key'] = opponent_ship_pos_to_key
# Loop over all ships and mark their corresponding squares as empty or
# cycled
prev_stacked_bases = history['prev_step']['stacked_bases']
all_prev_bases = prev_stacked_bases.sum(0) > 0
base_positions_equal = all_prev_bases == all_bases
empty_or_cycled_positions = np.copy(base_positions_equal)
empty_or_extended_cycled_positions = np.copy(base_positions_equal)
cycled_position_counts = np.copy(base_positions_equal).astype(np.int)*-1
for player_id in range(num_players):
env_obs_id = env_obs_ids[player_id]
player_ships = env_observation.players[env_obs_id][2]
prev_player_ships = prev_env_players[env_obs_id][2]
if player_id == 0:
cycle_counters = history['ship_action_cycle_counter']
else:
cycle_counters = history['opponent_cycle_counters'][player_id-1]
for ship_k in player_ships:
row, col = row_col_from_square_grid_pos(
player_ships[ship_k][0], grid_size)
if not ship_k in prev_player_ships:
empty_or_cycled_positions[row, col] = 0
empty_or_extended_cycled_positions[row, col] = 0
cycled_position_counts[row, col] = 0
else:
cycle_duration = cycle_counters[ship_k][2]
ship_in_repeat_cycle = (cycle_duration >= config[
'surrounding_ships_cycle_extrapolate_step_count']) and ((
cycle_counters[ship_k][0] != cycle_counters[ship_k][1]) or (
cycle_counters[ship_k][0] is None))
ship_in_extended_repeat_cycle = (cycle_duration >= config[
'surrounding_ships_extended_cycle_extrapolate_step_count']) and ((
cycle_counters[ship_k][0] != cycle_counters[ship_k][1]) or (
cycle_counters[ship_k][0] is None))
cycled_position_counts[row, col] = cycle_duration*int(
ship_in_repeat_cycle)
empty_or_cycled_positions[row, col] = ship_in_repeat_cycle
empty_or_extended_cycled_positions[row, col] = (
ship_in_extended_repeat_cycle)
history['empty_or_cycled_positions'] = empty_or_cycled_positions
history['empty_or_extended_cycled_positions'] = (
empty_or_extended_cycled_positions)
history['cycled_position_counts'] = cycled_position_counts
return history
def update_destoyed_base_ship_count(history, observation, player_obs):
if observation['step'] == 0:
history['num_destroyed_bases'] = 0
history['num_destroyed_ships'] = 0
else:
grid_size = observation['rewards_bases_ships'][0][1].shape[0]
prev_bases = history[
'prev_step']['observation']['rewards_bases_ships'][0][1]
current_bases = observation['rewards_bases_ships'][0][1]
destroyed_base_count_this_step = (prev_bases& (~current_bases )).sum()
history['num_destroyed_bases'] += destroyed_base_count_this_step
prev_player_obs = history['prev_step']['player_obs']
current_ship_keys = list(player_obs[2].keys())
destroyed_ship_count_this_step = 0
for ship_k in prev_player_obs[2]:
if not ship_k in current_ship_keys:
row, col = row_col_from_square_grid_pos(
prev_player_obs[2][ship_k][0], grid_size)
if not current_bases[row, col]:
# import pdb; pdb.set_trace()
destroyed_ship_count_this_step += 1
history['num_destroyed_ships'] += destroyed_ship_count_this_step
return history
def update_returning_to_base_ships(history, observation, player_obs):
if observation['step'] == 0:
history['returning_to_base_ships'] = []
history['temporary_hoarding_collect_ships'] = []
else:
current_bases = observation['rewards_bases_ships'][0][1]
grid_size = current_bases.shape[0]
delete_keys = []
delete_temp_hoard_keys = []
# Temporary collect ships that don't collect should join the hoarding
for ship_k in history['temporary_hoarding_collect_ships']:
if not ship_k in player_obs[2]:
# My ship was destroyed
delete_temp_hoard_keys.append(ship_k)
else:
row, col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
ship_halite = player_obs[2][ship_k][1]
if ship_halite == 0:
delete_temp_hoard_keys.append(ship_k)
for ship_k in history['returning_to_base_ships']:
if not ship_k in player_obs[2]:
# My ship was destroyed
delete_keys.append(ship_k)
else:
row, col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
ship_halite = player_obs[2][ship_k][1]
if ship_halite == 0:
delete_keys.append(ship_k)
for ship_k in delete_keys:
history['returning_to_base_ships'].remove(ship_k)
if ship_k in history['temporary_hoarding_collect_ships'] and ship_k in (
player_obs[2]):
# Join the pack hunt if the base has a nearby opponent or if there
# is little halite to be mined nearby
row, col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
if history['prev_step']['safe_to_collect_margin'][row, col] < 4 or (
observation['halite'][ROW_COL_MAX_DISTANCE_MASKS[
row, col, 5]].sum() < 1000):
delete_temp_hoard_keys.append(ship_k)
# else:
# print(observation['step'], row, col,
# "Not joining the pack hunt quite yet")
for ship_k in delete_temp_hoard_keys:
if ship_k in history['temporary_hoarding_collect_ships']:
history['temporary_hoarding_collect_ships'].remove(ship_k)
return history
def update_early_best_opponent(config, history, observation):
if observation['step'] == 0:
history['ballistic_early_best_target_mode'] = False
history['ballistic_early_best_targets_sorted'] = None
elif history['ballistic_early_best_targets_sorted'] is None:
if observation['relative_step'] >= config[
'early_best_opponent_relative_step']:
current_scores = history['current_scores']
# import pdb; pdb.set_trace()
history['ballistic_early_best_targets_sorted'] = np.argsort(
-current_scores[1:])
# print(observation['step'], history['ballistic_early_best_targets_sorted'])
return history
def update_initial_collect_boost(history, observation, player_obs):
if observation['step'] == 0:
stacked_ships = np.stack([rbs[2] for rbs in observation[
'rewards_bases_ships']])
grid_size = stacked_ships.shape[1]
# Stack the collect scores for all my ships
ship_rows = []
ship_cols = []
ship_keys = []
for ship_k in player_obs[2]:
row, col = row_col_from_square_grid_pos(
player_obs[2][ship_k][0], grid_size)
ship_rows.append(row)
ship_cols.append(col)
ship_keys.append(ship_k)
my_num_ships = len(ship_rows)
my_ship_positions = (np.array(ship_rows), np.array(ship_cols))
opponent_ship_positions = np.where(stacked_ships[1:].sum(0) > 0)
# Obtain the nearest distances for my and opponent ships
my_nearest_ship_distances = 99*np.ones((grid_size, grid_size))
for ship_id in range(my_num_ships):
row = my_ship_positions[0][ship_id]
col = my_ship_positions[1][ship_id]
my_nearest_ship_distances = np.minimum(
my_nearest_ship_distances, DISTANCES[row, col])
opponent_nearest_ship_distances = 99*np.ones((grid_size, grid_size))
opponent_ships = stacked_ships[1:].sum(0) > 0
num_opponent_ships = opponent_ships.sum()
for ship_id in range(num_opponent_ships):
row = opponent_ship_positions[0][ship_id]
col = opponent_ship_positions[1][ship_id]
opponent_nearest_ship_distances = np.minimum(
opponent_nearest_ship_distances, DISTANCES[row, col])
nearest_ship_distance_difference = (
my_nearest_ship_distances - opponent_nearest_ship_distances)
original_position_multiplier = (1.5**(np.minimum(
3, nearest_ship_distance_difference)))**0.5
history['original_position_multiplier'] = original_position_multiplier
return history
def update_initial_ship_strategy(
history, observation, player_obs, not_collect_first_ships_near_base=6,
initial_near_base_collect_excluded_steps=20, initial_exclude_box_size=3):
grid_size = observation['halite'].shape[0]
if observation['step'] == 0:
history['first_base_position'] = None
history['initial_not_collect_near_base_ships'] = []
history['initial_not_collect_near_base_mask'] = np.zeros(
(grid_size, grid_size), dtype=np.bool)
else:
if history['first_base_position'] is None and len(player_obs[1]) > 0:
base_row, base_col = row_col_from_square_grid_pos(
list(player_obs[1].values())[0], grid_size)
history['first_base_position'] = (base_row, base_col)
history['initial_not_collect_near_base_mask'] = (
ROW_COL_BOX_MAX_DISTANCE_MASKS[
base_row, base_col, initial_exclude_box_size])
if history['first_base_position'] is not None:
# The first "not_collect_first_ships_near_base" are added to the excluded
# near base collect list. They are removed from the list after returning
# to a base after "initial_near_base_collect_excluded_steps" steps
# Detect new ships
if len(history['initial_not_collect_near_base_ships']) < (
not_collect_first_ships_near_base) and observation['step'] <= (
initial_near_base_collect_excluded_steps):
current_excluded = set(history['initial_not_collect_near_base_ships'])
exclusion_budget = not_collect_first_ships_near_base - len(
current_excluded)
current_ships = list(player_obs[2].keys())
new_ships = list(set(current_ships) - current_excluded)
if len(new_ships) > 0:
new_excluded = new_ships[:exclusion_budget]
history['initial_not_collect_near_base_ships'] += new_excluded
elif (
observation['step'] > initial_near_base_collect_excluded_steps) and (
len(history['initial_not_collect_near_base_ships']) > 0):
# Detect ships that have returned to a base
delete_keys = []
base_positions = list(player_obs[1].values())
for ship_k in history['initial_not_collect_near_base_ships']:
if not ship_k in player_obs[2]:
# The ship was destroyed
delete_keys.append(ship_k)
else:
ship_position = player_obs[2][ship_k][0]
if ship_position in base_positions:
delete_keys.append(ship_k)
for ship_k in delete_keys:
history['initial_not_collect_near_base_ships'].remove(ship_k)
# print(observation['step'], history['initial_not_collect_near_base_ships'])
return history
def update_history_start_step(
config, history, observation, env_observation, env_obs_ids, env_config,
np_rng):
history_start_time = time.time()
stacked_ships = np.stack([rbs[2] for rbs in observation[
'rewards_bases_ships']])
opponent_ships = stacked_ships[1:].sum(0) > 0
other_halite_ships = np.stack([
rbs[3] for rbs in observation['rewards_bases_ships']])[1:].sum(0)
other_halite_ships[~opponent_ships] = 1e9
grid_size = opponent_ships.shape[0]
player_obs = env_observation.players[env_obs_ids[0]]
player_ids = -1*np.ones((grid_size, grid_size), dtype=np.int)
for i in range(stacked_ships.shape[0]):
player_ids[stacked_ships[i]] = i
if observation['step'] == 0:
history['hunting_season_started'] = False
history['hunting_season_standard_ships'] = []
history['prev_step_boxing_in_ships'] = []
history['prev_step_hoarded_one_step_opponent_keys'] = []
history['my_prev_step_base_attacker_ships'] = []
history['request_increment_num_standard_hunting'] = 0
history['request_decrement_num_standard_hunting'] = 0
history['add_strategic_base'] = False
history['construct_strategic_base_position'] = None
history['ballistic_mode'] = False
history['my_time_averaged_vulnerable_score'] = np.zeros(
(grid_size, grid_size))
history['early_hunting_season_ended'] = False
history['targeted_hoard_mode'] = False
history['limit_ships_timeout'] = False
history['initial_collect_zero_halite_targets'] = {}
history['prev_ballistic_target_override'] = None
# Update the counter that keeps track of how long ships are chased
history = update_chase_counter(
history, observation, env_observation, stacked_ships, other_halite_ships,
player_ids, env_obs_ids)
# Update the data that keeps track of opponent behavior when being boxed in
history = update_box_in_counter(
history, observation, env_observation, stacked_ships, env_obs_ids,
env_config)
# Update the data that keeps track of zero halite ship opponent behavior as a
# function of opponent zero halite ships
history = update_zero_halite_ship_behavior(
config, history, observation, env_observation, stacked_ships, env_obs_ids,
env_config)
# Update the data that keeps track of camping behavior
history = update_base_camping_strategy(
config, history, observation, env_observation, stacked_ships, env_obs_ids,
env_config, np_rng)
# Update the data that keeps track of the early game best opponent (targeted
# at the end if I am winning massively)
history = update_early_best_opponent(config, history, observation)
# Update the move directions of all opponent ships
history = update_opponent_ships_move_directions(
history, observation, env_observation, env_obs_ids)
# Update the counters that keep track of my repetitive actions - avoid
# cycling in a cycle of max length 2 for more than X steps when I can afford
# other actions
history = update_cycle_counters(
config, history, observation, player_obs, env_observation, env_obs_ids)
# Update the count of my destroyed bases and ships
history = update_destoyed_base_ship_count(history, observation, player_obs)
# Update returning to base ships
history = update_returning_to_base_ships(history, observation, player_obs)
# Update the initial collect boost to make sure we focus on halite near the
# boundary of our influence sphere
history = update_initial_collect_boost(history, observation, player_obs)
# Update the logic to make sure that my first ships don't gather near my
# first base in the first N steps
history = update_initial_ship_strategy(history, observation, player_obs)
return history, (time.time()-history_start_time)
def update_history_end_step(
history, observation, ship_actions, opponent_ships_sensible_actions,
opponent_ships_sensible_actions_no_risk, ship_plans, player_obs,
env_observation, main_base_distances, on_rescue_mission,
boxed_in_zero_halite_opponents, ships_on_box_mission,
non_abandoned_base_pos, this_step_base_defense_keys,
should_spawn_base_next_step, ballistic_attack_base_targets,
safe_to_collect_margin, get_actions_start_time):
none_included_ship_actions = {k: (ship_actions[k] if (
k in ship_actions) else None) for k in player_obs[2]}
stacked_bases = np.stack([rbs[1] for rbs in observation[
'rewards_bases_ships']])
stacked_ships = np.stack([rbs[2] for rbs in observation[
'rewards_bases_ships']])
halite_ships = np.stack([
rbs[3] for rbs in observation['rewards_bases_ships']]).sum(0)
halite_ships[stacked_ships.sum(0) == 0] = -1e-9
grid_size = halite_ships.shape[0]
if main_base_distances.max() > 0:
base_zero_dist_locations = np.where(main_base_distances == 0)
my_main_base_location = (base_zero_dist_locations[0][0],
base_zero_dist_locations[1][0])
else:
my_main_base_location = (-1, -1)
ships_on_rescue_mission = []
rescue_positions = np.where(on_rescue_mission)
ship_pos_to_key = {v[0]: k for k, v in player_obs[2].items()}
if np.any(on_rescue_mission):
for i in range(rescue_positions[0].size):
position = grid_size*rescue_positions[0][i] + rescue_positions[1][i]
ships_on_rescue_mission.append(ship_pos_to_key[position])
non_abandoned_base_locations = []
if non_abandoned_base_pos:
for base_id in range(non_abandoned_base_pos[0].size):
non_abandoned_base_locations.append((
non_abandoned_base_pos[0][base_id],
non_abandoned_base_pos[1][base_id]))
if 'prev_step' in history:
prev_base_defense_keys = history['prev_step']['base_defense_keys']
else:
prev_base_defense_keys = []
# Reduce the max ship count when there is a risk of timing out (
# < 6 for local debugging) and no longer perform pack hunting
get_actions_duration = time.time() - get_actions_start_time
history['limit_ships_timeout'] = history['limit_ships_timeout'] or (
get_actions_duration > 2 and get_actions_duration < 6)
history['prev_step'] = {
'my_ship_actions': none_included_ship_actions,
'opponent_ships_sensible_actions': opponent_ships_sensible_actions,
'opponent_ships_sensible_actions_no_risk': (
opponent_ships_sensible_actions_no_risk),
'boxed_in_zero_halite_opponents': boxed_in_zero_halite_opponents,
'ship_plans': ship_plans,
'env_observation': env_observation,
'stacked_bases': stacked_bases,
'stacked_ships': stacked_ships,
'halite_ships': halite_ships,
'observation': observation,
'my_main_base_location': my_main_base_location,
'non_abandoned_base_locations': non_abandoned_base_locations,
'ships_on_rescue_mission': ships_on_rescue_mission,
'ships_on_box_mission': ships_on_box_mission,
'base_defense_keys': this_step_base_defense_keys,
'prev_base_defense_keys': prev_base_defense_keys,
'should_spawn_base_next_step': should_spawn_base_next_step,
'player_obs': player_obs,
'ballistic_attack_base_targets': ballistic_attack_base_targets,
'safe_to_collect_margin': safe_to_collect_margin,
}
return history
def get_numpy_random_generator(
config, observation, rng_action_seed, print_seed=False):
if rng_action_seed is None:
rng_action_seed = 0
if observation['step'] == 0 and print_seed:
print("Random acting seed: {}".format(rng_action_seed))
# Add the observation step to the seed so we are less predictable
step_seed = int(rng_action_seed+observation['step'])
return np.random.RandomState(step_seed)
def get_config_actions(config, observation, player_obs, env_observation,
env_config, history, rng_action_seed, verbose=False):
get_actions_start_time = time.time()
# if observation['step'] in [242] and (
# observation['rewards_bases_ships'][0][1].sum()) == 1:
# import pdb; pdb.set_trace()
# Set the random seed
np_rng = get_numpy_random_generator(
config, observation, rng_action_seed, print_seed=True)
# Obtain the ordered player ids (myself in the first position)
env_obs_ids = get_env_obs_ids(env_observation)
# Decide how many ships I can have attack bases aggressively
steps_remaining = env_config.episodeSteps-1-observation['step']
max_aggressive_attackers = int(len(player_obs[2]) - (3+0.25*steps_remaining))
ignore_bad_attack_directions = max_aggressive_attackers > 0
# Update the history based on what happened during the past observation
history, history_start_duration = update_history_start_step(
config, history, observation, env_observation, env_obs_ids, env_config,
np_rng)
# Compute the ship scores for all high level actions
(all_ship_scores, opponent_ships_sensible_actions,
opponent_ships_sensible_actions_no_risk, weighted_base_mask,
opponent_ships_scaled, main_base_distances, ship_scores_duration,
halite_ships, player_influence_maps, boxed_in_zero_halite_opponents,
ignore_convert_positions, ship_diff_smoothed,
ballistic_attack_base_targets, safe_to_return_halites,
safe_to_collect_margin, always_attack_opponent_id,
likely_convert_opponent_positions,
possible_convert_opponent_positions, my_base_distances,
nearest_base_distances, history) = get_ship_scores(
config, observation, player_obs, env_config, np_rng,
ignore_bad_attack_directions, history, env_obs_ids, env_observation,
verbose)
# if observation['step'] in [242] and (
# observation['rewards_bases_ships'][0][1].sum()) == 1:
# import pdb; pdb.set_trace()
# Compute the coordinated high level ship plan
(ship_plans, my_next_bases, plan_ship_scores, base_attackers,
box_in_duration, history, ship_plans_duration,
inner_loop_ship_plans_duration, recompute_ship_plan_order_duration,
on_rescue_mission, ships_on_box_mission,
requested_save_conversion_budget, non_abandoned_base_pos,
this_step_base_defense_keys, should_spawn_base_next_step,
ship_plans_reordered, victory_formation) = get_ship_plans(
config, observation, player_obs, env_config, verbose,
copy.deepcopy(all_ship_scores), np_rng, weighted_base_mask,
steps_remaining, opponent_ships_sensible_actions, opponent_ships_scaled,
main_base_distances, history, env_observation, player_influence_maps,
ignore_convert_positions, ship_diff_smoothed, safe_to_return_halites,
safe_to_collect_margin, always_attack_opponent_id,
likely_convert_opponent_positions, possible_convert_opponent_positions,
my_base_distances, nearest_base_distances)
# Translate the ship high level plans to basic move/convert actions
(mapped_actions, remaining_budget, my_next_ships, my_next_halite,
updated_ship_pos, action_overrides,
ship_map_duration) = map_ship_plans_to_actions(
config, observation, player_obs, env_observation, env_config, verbose,
plan_ship_scores, all_ship_scores, ship_plans, np_rng,
ignore_bad_attack_directions, base_attackers, steps_remaining,
opponent_ships_sensible_actions, opponent_ships_sensible_actions_no_risk,
history, env_obs_ids, opponent_ships_scaled, main_base_distances,
ignore_convert_positions, ballistic_attack_base_targets,
player_influence_maps)
ship_actions = copy.copy(mapped_actions)
# Decide for all bases whether to spawn or keep the base available
base_actions, remaining_budget = decide_existing_base_spawns(
config, observation, player_obs, my_next_bases, my_next_ships,
my_next_halite, env_config, remaining_budget, verbose, ship_plans,
updated_ship_pos, weighted_base_mask, history,
requested_save_conversion_budget, victory_formation)
# Add data to my history so I can update it appropriately at the beginning of
# the next step.
history = update_history_end_step(
history, observation, ship_actions, opponent_ships_sensible_actions,
opponent_ships_sensible_actions_no_risk, ship_plans, player_obs,
env_observation, main_base_distances, on_rescue_mission,
boxed_in_zero_halite_opponents, ships_on_box_mission,
non_abandoned_base_pos, this_step_base_defense_keys,
should_spawn_base_next_step, ballistic_attack_base_targets,
safe_to_collect_margin, get_actions_start_time)
mapped_actions.update(base_actions)
return mapped_actions, history, ship_plans
def get_base_pos(base_data, grid_size):
base_pos = np.zeros((grid_size, grid_size), dtype=np.bool)
for _, v in base_data.items():
row, col = row_col_from_square_grid_pos(v, grid_size)
base_pos[row, col] = 1
return base_pos
def get_ship_halite_pos(ship_data, grid_size):
ship_pos = np.zeros((grid_size, grid_size), dtype=np.bool)
ship_halite = np.zeros((grid_size, grid_size), dtype=np.float32)
for _, v in ship_data.items():
row, col = row_col_from_square_grid_pos(v[0], grid_size)
ship_pos[row, col] = 1
ship_halite[row, col] = v[1]
return ship_pos, ship_halite
def structured_env_obs(env_configuration, env_observation, active_id):
grid_size = env_configuration.size
halite = np.array(env_observation['halite']).reshape([
grid_size, grid_size])
num_episode_steps = env_configuration.episodeSteps
step = env_observation.step
relative_step = step/(num_episode_steps-2)
num_agents = len(env_observation.players)
rewards_bases_ships = []
for i in range(num_agents):
player_obs = env_observation.players[i]
reward = player_obs[0]
base_pos = get_base_pos(player_obs[1], grid_size)
ship_pos, ship_halite = get_ship_halite_pos(player_obs[2], grid_size)
rewards_bases_ships.append((reward, base_pos, ship_pos, ship_halite))
# Move the agent's rewards_bases_ships to the front of the list
agent_vals = rewards_bases_ships.pop(active_id)
rewards_bases_ships = [agent_vals] + rewards_bases_ships
return {
'halite': halite,
'relative_step': relative_step,
'rewards_bases_ships': rewards_bases_ships,
'step': step,
}
###############################################################################
HISTORY = {}
def my_agent(observation, env_config, **kwargs):
global HISTORY
rng_action_seed = kwargs.get('rng_action_seed', 0)
active_id = observation.player
current_observation = structured_env_obs(env_config, observation, active_id)
player_obs = observation.players[active_id]
mapped_actions, HISTORY, ship_plans = get_config_actions(
CONFIG, current_observation, player_obs, observation, env_config, HISTORY,
rng_action_seed)
if LOCAL_MODE:
# This is to allow for debugging of the history outside of the agent
return mapped_actions, copy.deepcopy(HISTORY)
else:
print(ship_plans)
return mapped_actions
|
the-stack_106_20331
|
# Copyright: 2009 PathsScale
# Copyright: 2010 Brian Harring <[email protected]>
# License: GPL2/BSD
import time
from snakeoil.data_source import text_data_source
from snakeoil.osutils import pjoin, unlink_if_exists
from snakeoil.process.spawn import spawn
from pkgcore.fs import tar, fs, contents
OPS = {
'>=': (True, '>='),
'!<': (False, '<<'),
}
def parsedeps(s):
#pkgs = s #(' ')
pkgs = s.split(' ')
deps = []
cons = []
for pkg in pkgs:
cat, name = pkg.split('/', 1)
name = name.split('-', 1)
if len(name) == 1:
name, ver = name, None
else:
name, ver = name
cstart = min(i for (i, c) in enumerate(cat) if c.isalpha())
op, cat = cat[:cstart], cat[cstart:]
if not op:
deps.append((name, None, None))
continue
dop = OPS[op]
if dop[0]:
deps.append((name, dop[1], ver))
else:
cons.append((name, dop[1], ver))
sdeps = []
for name, op, ver in deps:
if op is None or ver is None:
assert op is None and ver is None
sdeps.append(name)
continue
sdeps.append('%s (%s %s)' % (name, op, ver))
scons = []
for name, op, ver in cons:
if op is None or ver is None:
assert op is None and ver is None
cons.append(name)
continue
scons.append('%s (%s %s)' % (name, op, ver))
#return {'Depends': ', '.join(sdeps), 'Conflicts': ', '.join(scons)}
ret = {'Depends': ', '.join(sdeps)}
if scons and scons != "":
ret['Conflicts'] = ', '.join(scons)
return ret
def write(tempspace, finalpath, pkg, cset=None, platform='', maintainer='', compressor='gz'):
# The debian-binary file
if cset is None:
cset = pkg.contents
# The data.tar.gz file
data_path = pjoin(tempspace, 'data.tar.gz')
tar.write_set(cset, data_path, compressor='gz', absolute_paths=False)
# Control data file
control = {}
control['Package'] = pkg.package
#control['Section'] = pkg.category
control['Version'] = pkg.fullver
control['Architecture'] = platform
if maintainer:
control['Maintainer'] = maintainer
control['Description'] = pkg.description
pkgdeps = "%s" % (pkg.rdepend,)
if (pkgdeps is not None and pkgdeps != ""):
control.update(parsedeps(pkgdeps))
control_ds = text_data_source("".join("%s: %s\n" % (k, v)
for (k, v) in control.items()))
control_path = pjoin(tempspace, 'control.tar.gz')
tar.write_set(
contents.contentsSet([
fs.fsFile('control',
{'size':len(control_ds.text_fileobj().getvalue())},
data=control_ds,
uid=0, gid=0, mode=0o644, mtime=time.time())
]),
control_path, compressor='gz')
dbinary_path = pjoin(tempspace, 'debian-binary')
with open(dbinary_path, 'w') as f:
f.write("2.0\n")
ret = spawn(['ar', '-r', finalpath, dbinary_path, data_path, control_path])
if ret != 0:
unlink_if_exists(finalpath)
raise Exception("failed creating archive: return code %s" % (ret,))
|
the-stack_106_20332
|
# Copyright 2018 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the Cirq interface routines
"""
import cirq
import pytest
import pennylane as qml
from pennylane import numpy as np
from pennylane_cirq.cirq_interface import CirqOperation, unitary_matrix_gate
class TestCirqOperation:
"""Tests the CirqOperation class."""
def test_init(self):
"""Tests that the class is properly initialized."""
fun = lambda x: cirq.Ry(x)
operation = CirqOperation(fun)
assert operation.parametrized_cirq_gates is None
assert operation.parametrization == fun
assert not operation.is_inverse
def test_parametrize(self):
"""Tests that parametrize yields the correct queue of operations."""
operation = CirqOperation(
lambda a, b, c: [cirq.X, cirq.Ry(a), cirq.Rx(b), cirq.Z, cirq.Rz(c)]
)
operation.parametrize(0.1, 0.2, 0.3)
assert operation.parametrized_cirq_gates[0] == cirq.X
assert operation.parametrized_cirq_gates[1] == cirq.Ry(0.1)
assert operation.parametrized_cirq_gates[2] == cirq.Rx(0.2)
assert operation.parametrized_cirq_gates[3] == cirq.Z
assert operation.parametrized_cirq_gates[4] == cirq.Rz(0.3)
def test_apply(self):
"""Tests that the operations in the queue are correctly applied."""
operation = CirqOperation(
lambda a, b, c: [cirq.X, cirq.Ry(a), cirq.Rx(b), cirq.Z, cirq.Rz(c)]
)
operation.parametrize(0.1, 0.2, 0.3)
qubit = cirq.LineQubit(1)
gate_applications = list(operation.apply(qubit))
assert gate_applications[0] == cirq.X.on(qubit)
assert gate_applications[1] == cirq.Ry(0.1).on(qubit)
assert gate_applications[2] == cirq.Rx(0.2).on(qubit)
assert gate_applications[3] == cirq.Z.on(qubit)
assert gate_applications[4] == cirq.Rz(0.3).on(qubit)
def test_apply_not_parametrized(self):
"""Tests that the proper error is raised if an Operation is applied
that was not parametrized before."""
operation = CirqOperation(
lambda a, b, c: [cirq.X, cirq.Ry(a), cirq.Rx(b), cirq.Z, cirq.Rz(c)]
)
qubit = cirq.LineQubit(1)
with pytest.raises(
qml.DeviceError, match="CirqOperation must be parametrized before it can be applied."
):
operation.apply(qubit)
def test_inv(self):
"""Test that inv inverts the gate and applying inv twice yields the initial state."""
operation = CirqOperation(
lambda a, b, c: [cirq.X, cirq.Ry(a), cirq.Rx(b), cirq.Z, cirq.Rz(c)]
)
assert not operation.is_inverse
operation.inv()
assert operation.is_inverse
operation.inv()
assert not operation.is_inverse
def test_inv_apply(self):
"""Tests that the operations in the queue are correctly applied if the
CirqOperation is inverted."""
operation = CirqOperation(
lambda a, b, c: [cirq.X, cirq.Ry(a), cirq.Rx(b), cirq.Z, cirq.Rz(c)]
)
operation.inv()
operation.parametrize(0.1, 0.2, 0.3)
qubit = cirq.LineQubit(1)
gate_applications = list(operation.apply(qubit))
assert gate_applications[0] == cirq.Rz(-0.3).on(qubit)
assert gate_applications[1] == (cirq.Z ** -1).on(qubit)
assert gate_applications[2] == cirq.Rx(-0.2).on(qubit)
assert gate_applications[3] == cirq.Ry(-0.1).on(qubit)
assert gate_applications[4] == (cirq.X ** -1).on(qubit)
def test_inv_error(self):
"""Test that inv raises an error if the CirqOperation was already parametrized."""
operation = CirqOperation(
lambda a, b, c: [cirq.X, cirq.Ry(a), cirq.Rx(b), cirq.Z, cirq.Rz(c)]
)
operation.parametrize(0.1, 0.2, 0.3)
with pytest.raises(
qml.DeviceError, match="CirqOperation can't be inverted after it was parametrized"
):
operation.inv()
class TestMethods:
"""Tests the independent methods in the Cirq interface."""
@pytest.mark.parametrize(
"U,expected_cirq_operation",
[
([[1, 0], [0, -1]], cirq.SingleQubitMatrixGate(np.array([[1, 0], [0, -1]])),),
([[0, 1j], [-1j, 0]], cirq.SingleQubitMatrixGate(np.array([[0, 1j], [-1j, 0]])),),
(
[[0, 1j, 0, 0], [-1j, 0, 0, 0], [0, 0, 0, 1j], [0, 0, -1j, 0]],
cirq.TwoQubitMatrixGate(
np.array([[0, 1j, 0, 0], [-1j, 0, 0, 0], [0, 0, 0, 1j], [0, 0, -1j, 0]])
),
),
],
)
def test_unitary_matrix_gate(self, U, expected_cirq_operation):
"""Tests that the correct Cirq operation is returned for the unitary matrix gate."""
assert unitary_matrix_gate(np.array(U)) == expected_cirq_operation
@pytest.mark.parametrize("U", [np.eye(6), np.eye(10), np.eye(3), np.eye(3, 5)])
def test_unitary_matrix_gate_error(self, U):
"""Tests that an error is raised if the given matrix is of wrong format."""
with pytest.raises(
qml.DeviceError,
match="Cirq only supports single-qubit and two-qubit unitary matrix gates.",
):
unitary_matrix_gate(np.array(U))
|
the-stack_106_20335
|
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""NeuroM neuron checking functions.
Contains functions for checking validity of neuron neurites and somata.
"""
from itertools import chain, islice
import numpy as np
from neurom import NeuriteType
from neurom.check import CheckResult
from neurom.check.morphtree import get_flat_neurites
from neurom.core import Section, iter_neurites, iter_sections, iter_segments
from neurom.core.dataformat import COLS
from neurom.features import neuritefunc as _nf
from neurom.morphmath import section_length, segment_length
def _read_neurite_type(neurite):
"""Simply read the stored neurite type."""
return neurite.type
def has_axon(neuron, treefun=_read_neurite_type):
"""Check if a neuron has an axon.
Arguments:
neuron(Neuron): The neuron object to test
treefun: Optional function to calculate the tree type of
neuron's neurites
Returns:
CheckResult with result
"""
return CheckResult(NeuriteType.axon in (treefun(n) for n in neuron.neurites))
def has_apical_dendrite(neuron, min_number=1, treefun=_read_neurite_type):
"""Check if a neuron has apical dendrites.
Arguments:
neuron(Neuron): The neuron object to test
min_number: minimum number of apical dendrites required
treefun: Optional function to calculate the tree type of neuron's
neurites
Returns:
CheckResult with result
"""
types = [treefun(n) for n in neuron.neurites]
return CheckResult(types.count(NeuriteType.apical_dendrite) >= min_number)
def has_basal_dendrite(neuron, min_number=1, treefun=_read_neurite_type):
"""Check if a neuron has basal dendrites.
Arguments:
neuron(Neuron): The neuron object to test
min_number: minimum number of basal dendrites required
treefun: Optional function to calculate the tree type of neuron's
neurites
Returns:
CheckResult with result
"""
types = [treefun(n) for n in neuron.neurites]
return CheckResult(types.count(NeuriteType.basal_dendrite) >= min_number)
def has_no_flat_neurites(neuron, tol=0.1, method='ratio'):
"""Check that a neuron has no flat neurites.
Arguments:
neuron(Neuron): The neuron object to test
tol(float): tolerance
method(string): way of determining flatness, 'tolerance', 'ratio' \
as described in :meth:`neurom.check.morphtree.get_flat_neurites`
Returns:
CheckResult with result
"""
return CheckResult(len(get_flat_neurites(neuron, tol, method)) == 0)
def has_all_nonzero_segment_lengths(neuron, threshold=0.0):
"""Check presence of neuron segments with length not above threshold.
Arguments:
neuron(Neuron): The neuron object to test
threshold(float): value above which a segment length is considered to
be non-zero
Returns:
CheckResult with result including list of (section_id, segment_id)
of zero length segments
"""
bad_ids = []
for sec in _nf.iter_sections(neuron):
p = sec.points
for i, s in enumerate(zip(p[:-1], p[1:])):
if segment_length(s) <= threshold:
bad_ids.append((sec.id, i))
return CheckResult(len(bad_ids) == 0, bad_ids)
def has_all_nonzero_section_lengths(neuron, threshold=0.0):
"""Check presence of neuron sections with length not above threshold.
Arguments:
neuron(Neuron): The neuron object to test
threshold(float): value above which a section length is considered
to be non-zero
Returns:
CheckResult with result including list of ids of bad sections
"""
bad_ids = [s.id for s in _nf.iter_sections(neuron.neurites)
if section_length(s.points) <= threshold]
return CheckResult(len(bad_ids) == 0, bad_ids)
def has_all_nonzero_neurite_radii(neuron, threshold=0.0):
"""Check presence of neurite points with radius not above threshold.
Arguments:
neuron(Neuron): The neuron object to test
threshold: value above which a radius is considered to be non-zero
Returns:
CheckResult with result including list of (section ID, point ID) pairs
of zero-radius points
"""
bad_ids = []
seen_ids = set()
for s in _nf.iter_sections(neuron):
for i, p in enumerate(s.points):
info = (s.id, i)
if p[COLS.R] <= threshold and info not in seen_ids:
seen_ids.add(info)
bad_ids.append(info)
return CheckResult(len(bad_ids) == 0, bad_ids)
def has_nonzero_soma_radius(neuron, threshold=0.0):
"""Check if soma radius not above threshold.
Arguments:
neuron(Neuron): The neuron object to test
threshold: value above which the soma radius is considered to be non-zero
Returns:
CheckResult with result
"""
return CheckResult(neuron.soma.radius > threshold)
def has_no_jumps(neuron, max_distance=30.0, axis='z'):
"""Check if there are jumps (large movements in the `axis`).
Arguments:
neuron(Neuron): The neuron object to test
max_distance(float): value above which consecutive z-values are
considered a jump
axis(str): one of x/y/z, which axis to check for jumps
Returns:
CheckResult with result list of ids of bad sections
"""
bad_ids = []
axis = {'x': COLS.X, 'y': COLS.Y, 'z': COLS.Z, }[axis.lower()]
for neurite in iter_neurites(neuron):
section_segment = ((sec, seg) for sec in iter_sections(neurite)
for seg in iter_segments(sec))
for sec, (p0, p1) in islice(section_segment, 1, None): # Skip neurite root segment
if max_distance < abs(p0[axis] - p1[axis]):
bad_ids.append((sec.id, [p0, p1]))
return CheckResult(len(bad_ids) == 0, bad_ids)
def has_no_root_node_jumps(neuron, radius_multiplier=2):
"""Check that the neurites have no root node jumps.
Their first point not should not be further than `radius_multiplier * soma radius` from the
soma center
"""
bad_ids = []
for neurite in iter_neurites(neuron):
p0 = neurite.root_node.points[0, COLS.XYZ]
distance = np.linalg.norm(p0 - neuron.soma.center)
if distance > radius_multiplier * neuron.soma.radius:
bad_ids.append((neurite.root_node.id, [p0]))
return CheckResult(len(bad_ids) == 0, bad_ids)
def has_no_fat_ends(neuron, multiple_of_mean=2.0, final_point_count=5):
"""Check if leaf points are too large.
Arguments:
neuron(Neuron): The neuron object to test
multiple_of_mean(float): how many times larger the final radius
has to be compared to the mean of the final points
final_point_count(int): how many points to include in the mean
Returns:
CheckResult with result list of ids of bad sections
Note:
A fat end is defined as a leaf segment whose last point is larger
by a factor of `multiple_of_mean` than the mean of the points in
`final_point_count`
"""
bad_ids = []
for leaf in _nf.iter_sections(neuron.neurites, iterator_type=Section.ileaf):
mean_radius = np.mean(leaf.points[1:][-final_point_count:, COLS.R])
if mean_radius * multiple_of_mean <= leaf.points[-1, COLS.R]:
bad_ids.append((leaf.id, leaf.points[-1:]))
return CheckResult(len(bad_ids) == 0, bad_ids)
def has_no_narrow_start(neuron, frac=0.9):
"""Check if neurites have a narrow start.
Arguments:
neuron(Neuron): The neuron object to test
frac(float): Ratio that the second point must be smaller than the first
Returns:
CheckResult with a list of all first segments of neurites with a narrow start
"""
bad_ids = [(neurite.root_node.id, neurite.root_node.points[np.newaxis, 1])
for neurite in neuron.neurites
if neurite.root_node.points[0][COLS.R] < frac * neurite.root_node.points[1][COLS.R]]
return CheckResult(len(bad_ids) == 0, bad_ids)
def has_no_dangling_branch(neuron):
"""Check if the neuron has dangling neurites.
Are considered dangling
- dendrites whose first point is too far from the soma center
- axons whose first point is too far from the soma center AND from
any point belonging to a dendrite
Arguments:
neuron(Neuron): The neuron object to test
Returns:
CheckResult with a list of all first segments of dangling neurites
"""
soma_center = neuron.soma.points[:, COLS.XYZ].mean(axis=0)
recentered_soma = neuron.soma.points[:, COLS.XYZ] - soma_center
radius = np.linalg.norm(recentered_soma, axis=1)
soma_max_radius = radius.max()
dendritic_points = np.array(list(chain.from_iterable(n.points
for n in iter_neurites(neuron)
if n.type != NeuriteType.axon)))
def is_dangling(neurite):
"""Is the neurite dangling ?."""
starting_point = neurite.points[0][COLS.XYZ]
if np.linalg.norm(starting_point - soma_center) - soma_max_radius <= 12.:
return False
if neurite.type != NeuriteType.axon:
return True
distance_to_dendrites = np.linalg.norm(dendritic_points[:, COLS.XYZ] - starting_point,
axis=1)
return np.all(distance_to_dendrites >= 2 * dendritic_points[:, COLS.R] + 2)
bad_ids = [(n.root_node.id, [n.root_node.points[0]])
for n in iter_neurites(neuron) if is_dangling(n)]
return CheckResult(len(bad_ids) == 0, bad_ids)
def has_no_narrow_neurite_section(neuron,
neurite_filter,
radius_threshold=0.05,
considered_section_min_length=50):
"""Check if the neuron has dendrites with narrow sections.
Arguments:
neuron(Neuron): The neuron object to test
neurite_filter(callable): filter the neurites by this callable
radius_threshold(float): radii below this are considered narro
considered_section_min_length(float): sections with length below
this are not taken into account
Returns:
CheckResult with result. result.info contains the narrow section ids and their
first point
"""
considered_sections = (sec for sec in iter_sections(neuron, neurite_filter=neurite_filter)
if sec.length > considered_section_min_length)
def narrow_section(section):
"""Select narrow sections."""
return section.points[:, COLS.R].mean() < radius_threshold
bad_ids = [(section.id, section.points[np.newaxis, 1])
for section in considered_sections if narrow_section(section)]
return CheckResult(len(bad_ids) == 0, bad_ids)
def has_multifurcation(neuron):
"""Check if a section has more than 3 children."""
bad_ids = [(section.id, section.points[np.newaxis, -1]) for section in iter_sections(neuron)
if len(section.children) > 3]
return CheckResult(len(bad_ids) == 0, bad_ids)
def has_no_single_children(neuron):
"""Check if the neuron has sections with only one child section."""
bad_ids = [section.id for section in iter_sections(neuron) if len(section.children) == 1]
return CheckResult(len(bad_ids) == 0, bad_ids)
|
the-stack_106_20336
|
"""
molecule.py
A python package for the MolSSI Software Summer School.
Contains a molecule class
"""
import numpy as np
from .measure import calculate_angle, calculate_distance
class Molecule:
def __init__(self, name, symbols, coordinates):
if isinstance(name, str):
self.name = name
else:
raise TypeError("Name is not a string.")
self.symbols = symbols
self._coordinates = coordinates
self.bonds = self.build_bond_list()
@property
def num_atoms(self):
return len(self._coordinates)
@property
def coordinates(self):
return self._coordinates
@coordinates.setter
def coordinates(self, new_coordinates):
self._coordinates = new_coordinates #setting the coordinates variable to private
self.bonds = self.build_bond_list()
def build_bond_list(self, max_bond=2.93, min_bond=0):
"""
Build a list of bonds based on a distance criteria.
Atoms within a specified distance of one another will be considered bonded.
Parameters
----------
max_bond : float, optional
min_bond : float, optional
Returns
-------
bond_list : list
List of bonded atoms. Returned as list of tuples where the values are the atom indices.
"""
bonds = {}
for atom1 in range(self.num_atoms):
for atom2 in range(atom1, self.num_atoms):
distance = calculate_distance(self.coordinates[atom1], self.coordinates[atom2])
if distance > min_bond and distance < max_bond:
bonds[(atom1, atom2)] = distance
return bonds
if __name__ == "__main__":
# Do something if this file is invoked on its own
random_coordinates = np.random.random([3, 3])
name = "my_molecule"
symbols = ["H", "O", "H"]
my_molecule = Molecule(name, symbols, random_coordinates)
print(F'There are {len(my_molecule.bonds)} bonds')
print(F'The coordinates are {my_molecule.coordinates}')
random_coordinates[0] += 100
my_molecule.coordinates = random_coordinates
print(F'\n\nThere are {len(my_molecule.bonds)} bonds')
print(F'The coordinates are {my_molecule.coordinates}')
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.