filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_27967 | import datetime
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.callbacks import TensorBoard, LearningRateScheduler
from tensorflow.keras.layers import GlobalMaxPooling2D, Dense, Dropout
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Conv2D, Dense, Flatten, Dropout
from tensorflow.keras.layers import GlobalMaxPooling2D, MaxPooling2D, BatchNormalization
HEIGHT = 32
WIDTH = 32
NUM_CHANNELS = 3
NUM_CLASSES = 10
NUM_GPUS = 1
BATCH_SIZE = 128
NUM_EPOCHS = 100
NUM_TRAIN_SAMPLES = 50000
EXPERIMENT_NAME = "cifar_basic_normalize_V2_100_epochs_"
(x, y), (x_test, y_test) = keras.datasets.cifar10.load_data()
train_dataset = tf.data.Dataset.from_tensor_slices((x, y))
def augmentation(x, y):
x = tf.image.resize_with_crop_or_pad(
x, HEIGHT + 8, WIDTH + 8)
x = tf.image.random_crop(x, [HEIGHT, WIDTH, NUM_CHANNELS])
# x = tf.image.random_flip_left_right(x)
return x, y
def normalize(x, y):
x = tf.cast(x, tf.float32)
x /= 255.0 # normalize to [0,1] range
return x, y
train_dataset = (train_dataset.map(normalize)
.shuffle(50000)
.batch(128, drop_remainder=True))
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
test_dataset = (test_dataset.map(normalize).batch(128, drop_remainder=True))
# def schedule(epoch):
# initial_learning_rate = BASE_LEARNING_RATE * BATCH_SIZE / 128
# learning_rate = initial_learning_rate
# for mult, start_epoch in LR_SCHEDULE:
# if epoch >= start_epoch:
# learning_rate = initial_learning_rate * mult
# else:
# break
# tf.summary.scalar('learning rate', data=learning_rate, step=epoch)
# return learning_rate
i = Input(shape=(HEIGHT, WIDTH, NUM_CHANNELS))
x = Conv2D(128, (3, 3), activation='relu')(i)
x = MaxPooling2D((2, 2))(x)
x = Conv2D(256, (3, 3), activation='relu')(x)
x = MaxPooling2D((2, 2))(x)
x = Conv2D(512, (3, 3), activation='relu')(x)
x = Flatten()(x)
x = Dense(128, activation='relu')(x)
# last hidden layer i.e.. output layer
x = Dense(NUM_CLASSES)(x)
model = Model(i, x)
model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False,name='Adam'),
metrics=['accuracy'])
tmp_dir = '/raid/developers/uib49306/tmp/'
log_dir= tmp_dir + EXPERIMENT_NAME + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
file_writer = tf.summary.create_file_writer(log_dir + "/metrics")
file_writer.set_as_default()
tensorboard_callback = TensorBoard(
log_dir=log_dir,
update_freq='batch',
histogram_freq=1)
filepath= log_dir + "/model_checkpoints"
checkpoint_filepath = filepath + "/saved-model-{epoch:02d}-{val_accuracy:.2f}.h5"
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
save_weights_only=False,
monitor='val_accuracy',
mode='max',
save_best_only=True)
model.fit(train_dataset,
epochs=NUM_EPOCHS,
validation_data=test_dataset,
validation_freq=1,
callbacks=[tensorboard_callback, model_checkpoint_callback])
|
the-stack_106_27968 | # -*- coding: utf-8 -*-
import calendar
from bson import ObjectId
from datetime import datetime
from modularodm import fields, Q
from framework.mongo import StoredObject
from framework.guid.model import GuidStoredObject
from website.settings import DOMAIN
from website.util import web_url_for, api_url_for
from website.addons.badges.util import acquire_badge_image
class Badge(GuidStoredObject):
redirect_mode = 'proxy'
_id = fields.StringField(primary=True)
creator = fields.ForeignField('badgesusersettings', backref='creator')
is_system_badge = fields.BooleanField(default=False)
#Open Badge protocol
name = fields.StringField()
description = fields.StringField()
image = fields.StringField()
criteria = fields.StringField()
#TODO implement tags and alignment
alignment = fields.DictionaryField(list=True)
tags = fields.StringField(list=True)
@classmethod
def get_system_badges(cls):
return cls.find(Q('is_system_badge', 'eq', True))
@classmethod
def create(cls, user_settings, badge_data, save=True):
badge = cls()
badge.creator = user_settings
badge.name = badge_data['badgeName']
badge.description = badge_data['description']
badge.criteria = badge_data['criteria']
badge._ensure_guid()
badge.image = acquire_badge_image(badge_data['imageurl'], badge._id)
if not badge.image:
raise IOError
if save:
badge.save()
return badge
@property
def description_short(self):
words = self.description.split(' ')
if len(words) < 9:
return ' '.join(words)
return '{}...'.format(' '.join(words[:9]))
#TODO Auto link urls?
@property
def criteria_list(self):
tpl = '<ul>{}</ul>'
stpl = '<li>{}</li>'
lines = self.criteria.split('\n')
return tpl.format(' '.join([stpl.format(line) for line in lines if line])) # Please dont kill me Steve
@property
def assertions(self):
return self.badgeassertion__assertion
@property
def awarded_count(self):
return len(self.assertions)
@property
def unique_awards_count(self):
return len({assertion.node._id for assertion in self.assertions})
@property
def deep_url(self):
return web_url_for('view_badge', bid=self._id)
@property
def url(self):
return web_url_for('view_badge', bid=self._id)
def make_system_badge(self, save=True):
self.is_system_badge = True
self.save()
def to_json(self):
return {
'id': self._id,
'name': self.name,
'description': self.description,
'image': self.image,
'criteria': self.criteria,
'alignment': self.alignment,
'tags': self.tags,
}
def to_openbadge(self):
return {
'name': self.name,
'description': self.description,
'image': self.image,
'criteria': self.criteria,
'issuer': api_url_for('get_organization_json', _absolute=True, uid=self.creator.owner._id),
'url': '{0}{1}/json/'.format(DOMAIN, self._id), # web url for and GUIDs?
'alignment': self.alignment,
'tags': self.tags,
}
#TODO verification hosted and signed
class BadgeAssertion(StoredObject):
_id = fields.StringField(default=lambda: str(ObjectId()))
#Backrefs
badge = fields.ForeignField('badge', backref='assertion')
node = fields.ForeignField('node', backref='awarded')
_awarder = fields.ForeignField('badgesusersettings')
#Custom fields
revoked = fields.BooleanField(default=False)
reason = fields.StringField()
#Required
issued_on = fields.IntegerField(required=True)
#Optional
evidence = fields.StringField()
expires = fields.StringField()
@classmethod
def create(cls, badge, node, evidence=None, save=True, awarder=None):
b = cls()
b.badge = badge
b.node = node
b.evidence = evidence
b.issued_on = calendar.timegm(datetime.utctimetuple(datetime.utcnow()))
b._awarder = awarder
if save:
b.save()
return b
@property
def issued_date(self):
return datetime.fromtimestamp(self.issued_on).strftime('%Y/%m/%d')
@property
def verify(self, vtype='hosted'):
return {
'type': 'hosted',
'url': api_url_for('get_assertion_json', _absolute=True, aid=self._id)
}
@property
def recipient(self):
return {
'idenity': self.node._id,
'type': 'osfnode', # TODO Could be an email?
'hashed': False
}
@property
def awarder(self):
if self.badge.is_system_badge and self._awarder:
return self._awarder
return self.badge.creator
def to_json(self):
return {
'uid': self._id,
'recipient': self.node._id,
'badge': self.badge._id,
'verify': self.verify,
'issued_on': self.issued_date,
'evidence': self.evidence,
'expires': self.expires
}
def to_openbadge(self):
return {
'uid': self._id,
'recipient': self.recipient,
'badge': '{}{}/json/'.format(DOMAIN, self.badge._id), # GUIDs Web url for
'verify': self.verify,
'issuedOn': self.issued_on,
'evidence': self.evidence,
'expires': self.expires
} |
the-stack_106_27969 | #!/usr/bin/env python
"""
Read the contents of the "show_arp.txt" file. Using a for loop, iterate over the lines of this
file. Process the lines of the file and separate out the ip_addr and mac_addr for each entry into a
separate variable.
Add a conditional statement that searches for '10.220.88.1'. If 10.220.88.1 is found, print out the
string "Default gateway IP/Mac" and the corresponding IP address and MAC Address.
Using a conditional statement, also search for '10.220.88.30'. If this IP address is found, then
print out "Arista3 IP/Mac is" and the corresponding ip_addr and mac_addr.
Keep track of whether you have found both the Default Gateway and the Arista3 switch. Once you have
found both of these devices, 'break' out of the for loop.
"""
from __future__ import unicode_literals, print_function
with open("show_arp.txt") as f:
show_arp = f.read()
print()
found1, found2 = (False, False)
for line in show_arp.splitlines():
if "protocol" in line.lower():
continue
fields = line.split()
ip_addr = fields[1]
mac_addr = fields[3]
if ip_addr == "10.220.88.1":
print("Default gateway IP/Mac is: {}/{}".format(ip_addr, mac_addr))
found1 = True
elif ip_addr == "10.220.88.30":
print("Arista3 IP/Mac is: {}/{}".format(ip_addr, mac_addr))
found2 = True
if found1 and found2:
print("Exiting...")
break
print()
|
the-stack_106_27970 | """A CRFSuite-based mention annotator."""
import pickle
import time
from pathlib import Path
from typing import IO, Dict, Iterable, List, Optional, Sequence, Union
from attr import attrib, attrs
from attr.validators import instance_of
from nerpy.annotator import SequenceMentionAnnotator
from nerpy.document import Document, Mention, MentionType
from nerpy.encoding import MentionEncoder
from nerpy.features import SentenceFeatureExtractor, SequenceFeatures, SequenceLabels
from sequencemodels import ViterbiStructuredPerceptron
# Due to the model object, cannot be frozen
@attrs
class SequenceModelsAnnotator(SequenceMentionAnnotator):
_mention_type: MentionType = attrib(validator=instance_of(MentionType))
_feature_extractor: SentenceFeatureExtractor = attrib(
validator=instance_of(SentenceFeatureExtractor)
)
# Mypy raised a false positive about a concrete class being needed
_mention_encoder: MentionEncoder = attrib(
validator=instance_of(MentionEncoder) # type: ignore
)
_model: ViterbiStructuredPerceptron = attrib(
validator=instance_of(ViterbiStructuredPerceptron)
)
# TODO: Make serialization model work sanely across all annotators
@classmethod
def from_model(
cls,
mention_type: MentionType,
feature_extractor: SentenceFeatureExtractor,
mention_encoder: MentionEncoder,
model_path: Union[str, Path],
) -> "SequenceModelsAnnotator":
with open(model_path, "rb") as model_file:
model = pickle.load(model_file)
return cls(mention_type, feature_extractor, mention_encoder, model)
@classmethod
def for_training(
cls,
mention_type: MentionType,
feature_extractor: Optional[SentenceFeatureExtractor],
mention_encoder: MentionEncoder,
) -> "SequenceModelsAnnotator":
model = ViterbiStructuredPerceptron()
return cls(mention_type, feature_extractor, mention_encoder, model)
def mentions(self, doc: Document) -> Sequence[Mention]:
mentions: List[Mention] = []
for sentence in doc.sentences:
sent_x = self._feature_extractor.extract(sentence, doc)
pred_y = self._model.predict(sent_x)
mentions.extend(self._mention_encoder.decode_mentions(sentence, pred_y))
return mentions
@property
def mention_encoder(self) -> MentionEncoder:
return self._mention_encoder
@property
def feature_extractor(self) -> SentenceFeatureExtractor:
return self._feature_extractor
def train(
self,
docs: Iterable[Document],
*,
epochs: int,
averaged: bool = True,
verbose: bool = False,
log_file: Optional[IO[str]] = None,
) -> None:
mention_count = 0
token_count = 0
document_count = 0
sentence_count = 0
print("Extracting features", file=log_file)
start_time = time.perf_counter()
features: List[SequenceFeatures] = []
labels: List[SequenceLabels] = []
for doc in docs:
for sentence, mentions in doc.sentences_with_mentions():
sent_x = self._feature_extractor.extract(sentence, doc)
sent_y = self._mention_encoder.encode_mentions(sentence, mentions)
assert len(sent_x) == len(sent_y)
features.append(sent_x)
labels.append(sent_y)
mention_count += len(mentions)
token_count += len(sent_x)
sentence_count += 1
document_count += 1
print(
"Feature extraction took {} seconds".format(time.perf_counter() - start_time),
file=log_file,
)
print(
f"Extracted features for {document_count} documents, {sentence_count} sentences, "
f"{token_count} tokens, {mention_count} mentions",
file=log_file,
)
print("Training", file=log_file)
start_time = time.perf_counter()
self._model.train(
features, labels, epochs=epochs, averaged=averaged, verbose=verbose
)
print(
"Training took {} seconds".format(time.perf_counter() - start_time),
file=log_file,
)
def train_seqmodels(
mention_encoder: MentionEncoder,
feature_extractor: SentenceFeatureExtractor,
mention_type: MentionType,
train_docs: Iterable[Document],
train_params: Dict,
*,
verbose: bool = False,
) -> SequenceModelsAnnotator:
epochs = train_params["max_iterations"]
averaged = bool(train_params.get("averaged", True))
annotator = SequenceModelsAnnotator.for_training(
mention_type, feature_extractor, mention_encoder
)
annotator.train(
train_docs, epochs=epochs, averaged=averaged, verbose=verbose,
)
return annotator
|
the-stack_106_27972 | """
Implementation of "Attention is All You Need"
"""
import os
import random
from typing import Any, Dict, List, Optional, Tuple
import fairseq
import torch.nn as nn
import torch
from fairseq.models.bart import BARTModel
from onmt.encoders.encoder import EncoderBase
import logging
class BARTEncoder(EncoderBase):
"""
large: 24-layer, 1024-hidden, 16-heads, 355M parameters
"""
def __init__(self, model_name, embeddings, cache_dir, max_src_length, vocab_size, opt,
bart_model=None, prev_checkpoint=None):
super(BARTEncoder, self).__init__()
self.model_name = model_name
self.opt = opt
if bart_model is None:
bart_dir = os.path.join(opt.cache_dir, 'bart.large')
bart_path = os.path.join(bart_dir, 'model.pt')
assert os.path.exists(bart_path), 'BART checkpoint is not found! %s ' % bart_path
logging.getLogger().info('Loading BART encoder from %s' % bart_path)
bart_model = BARTModel.from_pretrained(bart_dir, checkpoint_file='model.pt')
else:
bart_model = bart_model
if prev_checkpoint:
print("Load checkpoint state dict for encoder")
bart_model.model.load_state_dict(prev_checkpoint['model'], strict=True)
self.model = bart_model.model.encoder
self.embed_tokens = self.model.embed_tokens
self.embed_positions = self.model.embed_positions
self.embed_fields = self.model.embed_tokens
# override the forward_embedding() function to support src label embedding
self.model.forward_embedding = forward_embedding
self.model.forward = forward_bart_encoder
# BART default max length of position embedding is 1024 (max_source_positions and max_target_positions)
pos_emb_len = self.embed_positions.num_embeddings
if max_src_length > pos_emb_len:
emb_len = max_src_length + 8
# new pos_embedding must be longer than src_length by at least 2 (1 for heading CLS, 1 for an offset)
# Does fairseq start position at 2? b/c it's padding_idx is 1
new_pos_embedding = fairseq.modules.LearnedPositionalEmbedding(emb_len, self.embed_positions.embedding_dim, padding_idx=self.embed_positions.padding_idx)
nn.init.normal_(new_pos_embedding.weight, mean=0, std=self.embed_positions.embedding_dim ** -0.5)
nn.init.constant_(new_pos_embedding.weight[self.embed_positions.padding_idx], 0)
new_pos_embedding.weight.data[:pos_emb_len] = self.model.embed_positions.weight.data
self.model.embed_positions = new_pos_embedding
self.embed_positions = new_pos_embedding
self.model.max_source_positions = max_src_length
logging.getLogger().info('Adjusted position size to %d, position_embed.shape=%s'
% (self.embed_positions.num_embeddings, str(self.embed_positions.weight.shape)))
# Expand token embeddings if necessary
# @eric-zhizu: Commented out because causing problems
"""
token_emb_len = self.embed_tokens.num_embeddings
if vocab_size > token_emb_len:
new_token_embedding = nn.Embedding(vocab_size, self.embed_tokens.embedding_dim, padding_idx=self.embed_tokens.padding_idx)
nn.init.normal_(new_token_embedding.weight, mean=0, std=self.embed_tokens.embedding_dim ** -0.5)
nn.init.constant_(new_token_embedding.weight[self.embed_tokens.padding_idx], 0)
new_token_embedding.weight.data[:token_emb_len] = self.model.embed_tokens.weight.data
print("New token embedding weights data", new_token_embedding.weight.data.shape)
print("New token embedding weights", new_token_embedding.weight.shape)
self.model.embed_tokens = new_token_embedding
self.embed_tokens = new_token_embedding
# set embed_fields to be word_embeddings, to call token embeddings easily
self.embed_fields = new_token_embedding
logging.getLogger().info('Adjusted vocab size to %d, token_embed.shape=%s'
% (self.embed_tokens.num_embeddings, str(self.embed_tokens.weight.shape)))
"""
@classmethod
def from_opt(cls, opt, embeddings, **kwargs):
"""Alternate constructor."""
return cls(
model_name='bart',
embeddings=embeddings,
cache_dir=opt.cache_dir,
max_src_length=opt.src_seq_length_trunc,
# vocab_size should be additionally added (after reloading fields news_dataset.reload_news_fields())
vocab_size=opt.vocab_size,
opt=opt,
**kwargs
)
def forward(self, src, src_lengths):
"""
:returns
last_hidden_state:
Sequence of hidden-states at the output of the last layer of the model.
pooler_output: Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function.
The Linear layer weights are trained from the next sentence prediction (classification) objective during Bert pretraining.
This output is usually not a good summary of the semantic content of the input,
you’re often better with averaging or pooling the sequence of hidden-states for the whole input sequence.
"""
# input to BART must be batch_first, src should be (batch_size, sequence_length)
src = src.permute(1, 0, 2)
# don't know how to add token_type_ids because embedding is processed inside
src_tokens = src[:, :, 0]
if src.shape[2] > 1:
src_labels = src[:, :, 1]
else:
src_labels = None
# 'encoder_out', state of the last layer # T x B x C
# 'encoder_padding_mask', # B x T
# 'encoder_embedding', token embeddings (w/o positional embeddings) # B x T x C
# 'encoder_states', states of each layer if return_all_hiddens=True # List[T x B x C]
encoder_output = self.model(self.model, src_tokens, src_lengths,
return_all_hiddens=False, src_labels=src_labels)
# return last_hidden_state and memory_bank in shape of [src_len, batch_size, hid_dim] and length as is
last_hidden_state = encoder_output['encoder_out'][0]
return last_hidden_state, last_hidden_state, src_lengths, encoder_output
def forward_embedding(self, src_tokens, src_labels, token_embedding: Optional[torch.Tensor] = None):
'''
See fairseq.models.transformer.py L376, forward_embedding()
Embed tokens and positions, both shape=[batch_size, src_len] and weights in embed_tokens
:param self: BART model object
:param src_tokens: text tokens
:param src_labels: feature labels
:return:
'''
if token_embedding is None:
token_embedding = self.embed_tokens(src_tokens)
x = embed = self.embed_scale * token_embedding
if self.embed_positions is not None:
x = embed + self.embed_positions(src_tokens)
if src_labels is not None:
x += self.embed_tokens(src_labels)
if self.layernorm_embedding:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
if self.quant_noise is not None:
x = self.quant_noise(x)
return x, embed
def forward_bart_encoder(self,
src_tokens,
src_lengths,
return_all_hiddens: bool = False,
src_labels=None,
token_embeddings: Optional[torch.Tensor] = None,
**unused):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
Returns:
namedtuple:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
- **encoder_embedding** (Tensor): the (scaled) embedding lookup
of shape `(batch, src_len, embed_dim)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *return_all_hiddens* is True.
"""
x, encoder_embedding = self.forward_embedding(self, src_tokens, src_labels, token_embeddings)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
encoder_states = []
# encoder layers
for layer in self.layers:
x = layer(x, encoder_padding_mask)
if return_all_hiddens:
assert encoder_states is not None
encoder_states.append(x)
if self.layer_norm is not None:
x = self.layer_norm(x)
return {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [encoder_padding_mask], # B x T
"encoder_embedding": [encoder_embedding], # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [],
"src_lengths": [],
}
# return EncoderOut(
# encoder_out=x, # T x B x C
# encoder_padding_mask=encoder_padding_mask, # B x T
# encoder_embedding=None, # B x T x C
# encoder_states=None, # List[T x B x C]
# # encoder_embedding=encoder_embedding, # B x T x C
# # encoder_states=encoder_states, # List[T x B x C]
# src_tokens=None,
# src_lengths=None,
# )
|
the-stack_106_27977 | # -*- coding: utf-8 -*-
# file: checkpoint_manager.py
# time: 2021/6/11 0011
# author: yangheng <[email protected]>
# github: https://github.com/yangheng95
# Copyright (C) 2021. All Rights Reserved.
import json
import os
import sys
import zipfile
from autocuda import auto_cuda
from findfile import find_files, find_dir, find_file
from google_drive_downloader import GoogleDriveDownloader as gdd
from termcolor import colored
from pyabsa import __version__
from pyabsa.core.apc.prediction.sentiment_classifier import SentimentClassifier
from pyabsa.core.atepc.prediction.aspect_extractor import AspectExtractor
from pyabsa.core.tc.prediction.text_classifier import TextClassifier
from pyabsa.utils.pyabsa_utils import get_device
def unzip_checkpoint(zip_path):
try:
print('Find zipped checkpoint: {}, unzipping...'.format(zip_path))
sys.stdout.flush()
with zipfile.ZipFile(zip_path, 'r') as z:
z.extractall(zip_path.replace('.zip', ''))
print('Done.')
except zipfile.BadZipfile:
print('Unzip failed'.format(zip_path))
return zip_path.replace('.zip', '')
class CheckpointManager:
pass
class APCCheckpointManager(CheckpointManager):
@staticmethod
def get_sentiment_classifier(checkpoint: str = None,
sentiment_map: dict = None,
auto_device=True):
"""
:param checkpoint: zipped checkpoint name, or checkpoint path or checkpoint name queried from google drive
:param sentiment_map: label to text index map
:param auto_device: True or False, otherwise 'cuda', 'cpu' works
:return:
"""
checkpoint_config = find_file(os.getcwd(), [checkpoint, '.config'])
if checkpoint_config:
checkpoint = os.path.dirname(checkpoint_config)
elif checkpoint.endswith('.zip'):
checkpoint = unzip_checkpoint(find_file(os.getcwd(), checkpoint))
else:
checkpoint = APCCheckpointManager.get_checkpoint(checkpoint)
sent_classifier = SentimentClassifier(checkpoint, sentiment_map=sentiment_map)
device, device_name = get_device(auto_device)
sent_classifier.to(device)
return sent_classifier
@staticmethod
def get_checkpoint(checkpoint: str = 'Chinese'):
aspect_sentiment_classification_checkpoint = available_checkpoints('APC')
if checkpoint.lower() in [k.lower() for k in aspect_sentiment_classification_checkpoint.keys()]:
print(colored('Downloading checkpoint:{} from Google Drive...'.format(checkpoint), 'green'))
else:
print(colored(
'Checkpoint:{} is not found, you can raise an issue for requesting shares of checkpoints'.format(
checkpoint), 'red'))
sys.exit(-1)
return download_pretrained_model(task='apc',
language=checkpoint.lower(),
archive_path=aspect_sentiment_classification_checkpoint[checkpoint.lower()]['id'])
class ATEPCCheckpointManager(CheckpointManager):
@staticmethod
def get_aspect_extractor(checkpoint: str = None,
sentiment_map: dict = None,
auto_device=True):
"""
:param checkpoint: zipped checkpoint name, or checkpoint path or checkpoint name queried from google drive
:param sentiment_map: label to text index map
:param auto_device: True or False, otherwise 'cuda', 'cpu' works
:return:
"""
checkpoint_config = find_file(os.getcwd(), [checkpoint, '.config'])
if checkpoint_config:
checkpoint = os.path.dirname(checkpoint_config)
elif checkpoint.endswith('.zip'):
checkpoint = unzip_checkpoint(find_file(os.getcwd(), checkpoint))
else:
checkpoint = ATEPCCheckpointManager.get_checkpoint(checkpoint)
aspect_extractor = AspectExtractor(checkpoint, sentiment_map=sentiment_map)
device, device_name = get_device(auto_device)
aspect_extractor.to(device)
return aspect_extractor
@staticmethod
def get_checkpoint(checkpoint: str = 'Chinese'):
atepc_checkpoint = available_checkpoints('ATEPC')
if checkpoint.lower() in [k.lower() for k in atepc_checkpoint.keys()]:
print(colored('Downloading checkpoint:{} from Google Drive...'.format(checkpoint), 'green'))
else:
print(colored('Checkpoint:{} is not found.'.format(checkpoint), 'red'))
sys.exit(-1)
return download_pretrained_model(task='atepc',
language=checkpoint.lower(),
archive_path=atepc_checkpoint[checkpoint]['id'])
class TextClassifierCheckpointManager(CheckpointManager):
@staticmethod
def get_text_classifier(checkpoint=None,
label_map=None,
auto_device=True):
"""
:param checkpoint: zipped checkpoint name, or checkpoint path or checkpoint name queried from google drive
:param label_map: label to text index map
:param auto_device: True or False, otherwise 'cuda', 'cpu' works
:return:
"""
checkpoint_config = find_file(os.getcwd(), [checkpoint, '.config'])
if checkpoint_config:
checkpoint = os.path.dirname(checkpoint_config)
elif checkpoint.endswith('.zip'):
checkpoint = unzip_checkpoint(find_file(os.getcwd(), checkpoint))
else:
checkpoint = TextClassifierCheckpointManager.get_checkpoint(checkpoint)
text_classifier = TextClassifier(checkpoint, label_map=label_map)
device, device_name = get_device(auto_device)
text_classifier.to(device)
return text_classifier
@staticmethod
def get_checkpoint(checkpoint: str = 'Chinese'):
text_classification_checkpoint = available_checkpoints('TextClassification')
if checkpoint.lower() in [k.lower() for k in text_classification_checkpoint.keys()]:
print(colored('Downloading checkpoint:{} from Google Drive...'.format(checkpoint), 'green'))
else:
print(colored('Checkpoint:{} is not found.'.format(checkpoint), 'red'))
sys.exit(-1)
return download_pretrained_model(task='atepc',
language=checkpoint.lower(),
archive_path=text_classification_checkpoint[checkpoint.lower()]['id'])
def compare_version(version1, version2):
# 1 means greater, 0 means equal, -1 means lower
if version1 and not version2:
return 1
elif version2 and not version1:
return -1
else:
version1 = version1.split('.')
version2 = version2.split('.')
for v1, v2 in zip(version1, version2):
if len(v1) == len(v2):
if v1 > v2:
return 1
if v2 > v1:
return -1
else:
if v1.startswith(v2):
return -1
elif v2.startswith(v1):
return 1
elif v1 == v2:
return 0
else:
return int(v1 > v2)
return 0
def parse_checkpoint_info(t_checkpoint_map, task='APC'):
print('*' * 23, colored('Available {} model checkpoints for Version:{} (this version)'.format(task, __version__), 'green'), '*' * 23)
for i, checkpoint in enumerate(t_checkpoint_map):
print('-' * 100)
print("{}. Checkpoint Name: {}\nModel: {}\nDataset: {} \nVersion: {} \nDescription:{} \nAuthor: {}".format(
i + 1,
checkpoint,
t_checkpoint_map[checkpoint]['model']
if 'model' in t_checkpoint_map[checkpoint] else '',
t_checkpoint_map[checkpoint]['dataset_manager']
if 'dataset_manager' in t_checkpoint_map[checkpoint] else '',
t_checkpoint_map[checkpoint]['version']
if 'version' in t_checkpoint_map[checkpoint] else '',
t_checkpoint_map[checkpoint]['description']
if 'description' in t_checkpoint_map[checkpoint] else '',
t_checkpoint_map[checkpoint]['author']
if 'author' in t_checkpoint_map[checkpoint] else ''
))
print('-' * 100)
return t_checkpoint_map
def available_checkpoints(task='', from_local=False):
try:
if not from_local:
checkpoint_url = '1jjaAQM6F9s_IEXNpaY-bQF9EOrhq0PBD'
if os.path.isfile('./checkpoints.json'):
os.remove('./checkpoints.json')
gdd.download_file_from_google_drive(file_id=checkpoint_url, dest_path='./checkpoints.json')
checkpoint_map = json.load(open('./checkpoints.json', 'r'))
current_version_map = {}
for t_map in checkpoint_map:
if '-' in t_map:
min_ver, _, max_ver = t_map.partition('-')
elif '+' in t_map:
min_ver, _, max_ver = t_map.partition('-')
else:
min_ver = t_map
max_ver = ''
max_ver = max_ver if max_ver else 'N.A.'
if compare_version(min_ver, __version__) <= 0 and compare_version(__version__, max_ver) <= 0:
current_version_map.update(checkpoint_map[t_map]) # add checkpoint_map[t_map]
t_checkpoint_map = {}
if task:
t_checkpoint_map = dict(current_version_map)[task.upper()] if task in current_version_map else {}
parse_checkpoint_info(t_checkpoint_map, task)
else:
for task_map in current_version_map:
parse_checkpoint_info(current_version_map[task_map], task_map)
# os.remove('./checkpoints.json')
return t_checkpoint_map if task else current_version_map
except Exception as e:
print('\nFailed to query checkpoints (Error: {}), you can try manually download the checkpoints from: \n'.format(e) +
'[1]\tGoogle Drive\t: https://drive.google.com/drive/folders/1yiMTucHKy2hAx945lgzhvb9QeHvJrStC\n'
'[2]\tBaidu NetDisk\t: https://pan.baidu.com/s/1K8aYQ4EIrPm1GjQv_mnxEg (Access Code: absa)\n')
sys.exit(-1)
def download_pretrained_model(task='apc', language='chinese', archive_path='', model_name='any_model'):
print(colored('Notice: The pretrained model are used for testing, '
'neither trained using fine-tuned the hyper-parameters nor trained with enough steps, '
'it is recommended to train the model on your own custom datasets', 'red')
)
# if not os.path.exists('./checkpoints'):
# os.mkdir('./checkpoints')
tmp_dir = '{}_{}_TRAINED_MODEL'.format(task.upper(), language.upper())
dest_path = os.path.join('./checkpoints', tmp_dir)
if not os.path.exists(dest_path):
os.makedirs(dest_path)
if (find_files(dest_path, '.model') or find_files(dest_path, '.state_dict')) and find_files(dest_path, '.config'):
print('Checkpoint already downloaded, skip...')
return dest_path
save_path = os.path.join(dest_path, '{}.zip'.format(model_name))
try:
if '/' in archive_path:
archive_path = archive_path.split('/')[-2]
gdd.download_file_from_google_drive(file_id=archive_path,
dest_path=save_path,
unzip=True,
showsize=True)
except ConnectionError as e:
raise ConnectionError("Fail to download checkpoint: {}".format(e))
os.remove(save_path)
return dest_path
def load_sentiment_classifier(checkpoint: str = None,
sentiment_map: dict = None,
auto_device: bool = True):
infer_model = SentimentClassifier(checkpoint, sentiment_map=sentiment_map)
infer_model.to(auto_cuda()) if auto_device else infer_model.cpu()
return infer_model
|
the-stack_106_27979 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import setuptools
# Package metadata.
name = "google-cloud-language"
description = "Google Cloud Natural Language API client library"
version = "2.3.1"
# Should be one of:
# 'Development Status :: 3 - Alpha'
# 'Development Status :: 4 - Beta'
# 'Development Status :: 5 - Production/Stable'
release_status = "Development Status :: 5 - Production/Stable"
dependencies = [
# NOTE: Maintainers, please do not require google-api-core>=2.x.x
# Until this issue is closed
# https://github.com/googleapis/google-cloud-python/issues/10566
"google-api-core[grpc] >= 1.28.0, <3.0.0dev",
"proto-plus >= 1.10.0",
]
extras = {"libcst": "libcst >= 0.2.5"}
# Setup boilerplate below this line.
package_root = os.path.abspath(os.path.dirname(__file__))
readme_filename = os.path.join(package_root, "README.rst")
with io.open(readme_filename, encoding="utf-8") as readme_file:
readme = readme_file.read()
# Only include packages under the 'google' namespace. Do not include tests,
# benchmarks, etc.
packages = [
package
for package in setuptools.PEP420PackageFinder.find()
if package.startswith("google")
]
# Determine which namespaces are needed.
namespaces = ["google"]
if "google.cloud" in packages:
namespaces.append("google.cloud")
setuptools.setup(
name=name,
version=version,
description=description,
long_description=readme,
author="Google LLC",
author_email="[email protected]",
license="Apache 2.0",
url="https://github.com/googleapis/python-language",
classifiers=[
release_status,
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Operating System :: OS Independent",
"Topic :: Internet",
],
platforms="Posix; MacOS X; Windows",
packages=packages,
namespace_packages=namespaces,
install_requires=dependencies,
extras_require=extras,
python_requires=">=3.6",
scripts=[
"scripts/fixup_language_v1_keywords.py",
"scripts/fixup_language_v1beta2_keywords.py",
],
include_package_data=True,
zip_safe=False,
)
|
the-stack_106_27980 | # Copyright 2019 Intelligent Robotics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from launch import LaunchDescription
from launch_ros.actions import Node
def generate_launch_description():
bt_controller_cmd = Node(
package='navigation_experiments_mc_bts',
node_executable='bt_controller',
node_name='bt_controller',
output='screen',
parameters=[])
ld = LaunchDescription()
ld.add_action(bt_controller_cmd)
return ld
|
the-stack_106_27985 | # Copyright 2017 Lenovo, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cyborg Generic driver implementation.
"""
from modules import generic
class GenericDriver(object):
"""Abstract base class representing the generic driver for Cyborg.
This class provides a reference implementation for a Cyborg driver.
"""
def __init__(self):
self.discover = generic.discover()
self.list = generic.list()
self.update = generic.update
self.attach = generic.attach()
self.detach = generic.detach()
|
the-stack_106_27986 | import sys
import threading
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
from PyQt5 import QtWidgets, QtGui, QtCore
class SeleniumManager(QtCore.QObject):
started = QtCore.pyqtSignal()
finished = QtCore.pyqtSignal()
def start(self):
threading.Thread(target=self._execute, daemon=True).start()
def _execute(self):
self.started.emit()
link = "../Chromedriver/chromedriver.exe"
browser = webdriver.Chrome(link)
browser.get("https://twitter.com/login")
time.sleep(1)
# do more stuff in project instead i add more url
browser.get("https://twitter.com/explore")
time.sleep(1)
browser.get("https://twitter.com/login")
time.sleep(1)
browser.close()
time.sleep(1)
self.finished.emit()
class LoadingScreen(QtWidgets.QWidget):
def __init__(self):
super().__init__()
self.setFixedSize(200, 200)
self.setWindowFlags(
QtCore.Qt.WindowStaysOnTopHint | QtCore.Qt.CustomizeWindowHint
)
self.label_animation = QtWidgets.QLabel(self)
self.movie = QtGui.QMovie("loading.gif")
self.label_animation.setMovie(self.movie)
def startAnimation(self):
self.movie.start()
self.show()
QtCore.QTimer.singleShot(2 * 1000, self.stopAnimation)
def stopAnimation(self):
self.movie.stop()
self.hide()
class Demo(QtWidgets.QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("Loading Overlay with Selenium Problem")
self.resize(500, 500)
self.center()
self.twitter_icon = QtWidgets.QLabel("")
self.twitter_icon.setAlignment(QtCore.Qt.AlignCenter)
self.pixmap = QtGui.QPixmap("twitter.png")
self.pixmap = self.pixmap.scaled(
64, 64, QtCore.Qt.KeepAspectRatio, QtCore.Qt.FastTransformation
)
self.twitter_icon.setPixmap(self.pixmap)
self.twt_btn = QtWidgets.QPushButton("Twitter")
v_box = QtWidgets.QVBoxLayout(self)
v_box.addStretch()
v_box.addWidget(self.twitter_icon)
v_box.addWidget(self.twt_btn)
v_box.addStretch()
self.loading = LoadingScreen()
self._manager = SeleniumManager()
self._manager.started.connect(self.loading.startAnimation)
self._manager.finished.connect(self.loading.stopAnimation)
self.twt_btn.clicked.connect(self._manager.start)
self._manager.started.connect(self.hide)
self._manager.finished.connect(self.show)
def center(self):
qr = self.frameGeometry()
cp = QtWidgets.QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
dm = Demo()
dm.show()
app.exit((app.exec_())) |
the-stack_106_27988 | import os
import nni
import csv
import json
import time
import warnings
import argparse
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from utils import *
from model import get_model
from dataset import load_data
warnings.filterwarnings("ignore", category=Warning)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def evaluate_synthetic(model, data_loder, param, log_dir, plot=False):
with torch.no_grad():
logits_all = []
labels_all = []
model.eval()
for _, (g, labels, gt_adjs) in enumerate(data_loder):
model.g = g
features = g.ndata['feat'].to(device)
labels = labels.to(device)
logits = model(features, mode='test')
logits_all.append(logits.detach())
labels_all.append(labels.detach())
logits_all = torch.cat(tuple(logits_all), 0)
labels_all = torch.cat(tuple(labels_all), 0)
micro_f1 = evaluate_f1(logits_all, labels_all)
att_score = model.GraphLearning.att.detach().cpu().numpy()
f_score = evaluate_att(att_score, param, log_dir, plot)
return micro_f1, f_score
def main_synthetic(param):
set_seed(param['seed'])
log_dir = "./log/{}/".format(param['ExpName'])
os.makedirs(log_dir, exist_ok=True)
json.dump(param, open("{}param.json".format(log_dir), 'a'), indent=2)
data = load_data(param)
train_data = data[:int(len(data)*0.7)]
val_data = data[int(len(data)*0.7) : int(len(data)*0.8)]
test_data = data[int(len(data)*0.8):]
train_loader = DataLoader(train_data, batch_size=param['batch_size'], shuffle=True, collate_fn=collate)
val_loader = DataLoader(val_data, batch_size=param['batch_size'], shuffle=True, collate_fn=collate)
test_loader = DataLoader(test_data, batch_size=param['batch_size'], shuffle=False, collate_fn=collate)
model = get_model(param).to(device)
loss_fcn = torch.nn.BCEWithLogitsLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=param['lr'], weight_decay=param['weight_decay'])
best_test_acc = 0
best_val_acc = 0
best_val_test_acc = 0
best_epoch_acc = 0
best_test_score = 0
best_epoch_score = 0
for epoch in range(param['epochs']):
loss_1 = []
loss_2 = []
loss_3 = []
train_loss = []
for _, (g, labels, gt_adjs) in enumerate(train_loader):
model.train()
optimizer.zero_grad()
model.g = g
features = g.ndata['feat'].to(device)
labels = labels.to(device)
logits = model(features)
loss_cla = loss_fcn(logits, labels)
loss_graph, loss_node = model.compute_disentangle_loss()
loss = loss_cla + loss_graph * param['ratio_graph'] + loss_node * param['ratio_node']
loss.backward()
optimizer.step()
loss_1.append(loss_cla.item())
loss_2.append(loss_graph.item() * param['ratio_graph'])
loss_3.append(loss_node.item() * param['ratio_node'])
train_loss.append(loss.item())
train_acc, _ = evaluate_synthetic(model, train_loader, param, log_dir)
val_acc, _ = evaluate_synthetic(model, val_loader, param, log_dir)
test_acc, test_score = evaluate_synthetic(model, test_loader, param, log_dir)
if test_acc > best_test_acc:
best_test_acc = test_acc
best_epoch_acc = epoch
torch.save({'epoch': epoch, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'param': param}, log_dir + 'best_model.pt')
if val_acc > best_val_acc:
best_val_acc = val_acc
best_val_test_acc = test_acc
if test_score > best_test_score:
best_test_score = test_score
best_epoch_score = epoch
_, _ = evaluate_synthetic(model, test_loader, param, log_dir='{}Feature/Epoch{}_Score{}/'.format(log_dir, epoch, int(test_score)), plot=True)
print("\033[0;30;46m Epoch: {} | Loss: {:.6f}, {:.6f}, {:.12f}, {:.6f} | Acc: {:.5f}, {:.5f}, {:.5f}, {:.5f}({}) | Num: {}, {} \033[0m".format(
epoch, np.mean(loss_1), np.mean(loss_2), np.mean(loss_3), np.mean(train_loss), train_acc, val_acc, test_acc, best_test_acc, best_epoch_acc, test_score, best_test_score))
nni.report_final_result(best_test_score)
outFile = open('./log/PerformMetrics.csv','a+', newline='')
writer = csv.writer(outFile, dialect='excel')
results = [time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())]
for v, k in param.items():
results.append(k)
results.append(str(test_acc))
results.append(str(best_val_test_acc))
results.append(str(best_test_acc))
results.append(str(best_epoch_acc))
results.append(str(best_test_score))
results.append(str(best_epoch_score))
path = './log/{}/best_model.pt'.format(param['ExpName'])
best_model = torch.load(path)
cscore, ged_m, ged_s = evaluate_graph(best_model)
results.append(str(cscore))
results.append(str(ged_m))
results.append(str(ged_s))
writer.writerow(results)
def evaluate_zinc(model, data_loader):
loss_fcn = torch.nn.L1Loss()
model.eval()
loss = 0
mae = 0
with torch.no_grad():
for batch_idx, (batch_graphs, batch_targets, batch_snorm_n) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device)
batch_targets = batch_targets.to(device)
batch_snorm_n = batch_snorm_n.to(device)
model.g = batch_graphs
batch_scores = model.forward(batch_x, batch_snorm_n)
eval_loss = loss_fcn(batch_scores, batch_targets).item()
eval_mae = F.l1_loss(batch_scores, batch_targets).item()
loss += eval_loss
mae += eval_mae
loss /= (batch_idx + 1)
mae /= (batch_idx + 1)
return loss, mae
def main_zinc(param):
set_seed(param['seed'])
log_dir = "./log/{}/".format(param['ExpName'])
os.makedirs(log_dir, exist_ok=True)
json.dump(param, open("{}param.json".format(log_dir), 'a'), indent=2)
zinc_data = load_data(param)
train_loader = DataLoader(zinc_data.train, batch_size=1000, shuffle=True, collate_fn=zinc_data.collate)
val_loader = DataLoader(zinc_data.val, batch_size=1000, shuffle=False, collate_fn=zinc_data.collate)
test_loader = DataLoader(zinc_data.test, batch_size=1000, shuffle=False, collate_fn=zinc_data.collate)
model = get_model(param).to(device)
loss_fcn = torch.nn.L1Loss()
optimizer = torch.optim.Adam(model.parameters(), lr=param['lr'], weight_decay=param['weight_decay'])
best_test_mae = 1e6
best_val_mae = 1e6
best_val_test_mae = 1e6
best_epoch_mae = 0
for epoch in range(param['epochs']):
model.train()
epoch_loss = 0
epoch_train_mae = 0
for batch_idx, (batch_graphs, batch_targets, batch_snorm_n) in enumerate(train_loader):
batch_x = batch_graphs.ndata['feat'].to(device)
batch_targets = batch_targets.to(device)
batch_snorm_n = batch_snorm_n.to(device)
optimizer.zero_grad()
model.g = batch_graphs
batch_scores = model.forward(batch_x, batch_snorm_n)
loss_mae = loss_fcn(batch_scores, batch_targets)
loss_graph, loss_node = model.compute_disentangle_loss()
loss = loss_mae + loss_graph * param['ratio_graph'] + loss_node * param['ratio_node']
loss.backward()
optimizer.step()
loss_1 = loss_mae.item()
loss_2 = loss_graph.item() * param['ratio_graph']
loss_3 = loss_node.item() * param['ratio_node']
train_loss = loss.item()
train_mae = F.l1_loss(batch_scores, batch_targets).item()
epoch_loss += train_loss
epoch_train_mae += train_mae
# print("Epoch: {} | [{}/{}] | Loss: {}, {}, {}, {}".format(epoch, batch_idx+1, 10, loss_1, loss_2, loss_3, train_loss))
epoch_loss /= (batch_idx + 1)
epoch_train_mae /= (batch_idx + 1)
val_loss, val_mae = evaluate_zinc(model, val_loader)
test_loss, test_mae = evaluate_zinc(model, test_loader)
if test_mae < best_test_mae:
best_test_mae = test_mae
best_epoch_mae = epoch
torch.save({'epoch': epoch, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'param': param}, log_dir + 'best_model.pt')
if val_mae < best_val_mae:
best_val_mae = val_mae
best_val_test_mae = test_mae
print("\033[0;30;46m Epoch: {} | Loss: {:.6f} | Mae: {:.5f}, {:.5f}, {:.5f}, {:.5f}({}), {:.5f} \033[0m".format(
epoch, epoch_loss, epoch_train_mae, val_mae, test_mae, best_test_mae, best_epoch_mae, best_val_test_mae))
outFile = open('./log/PerformMetrics.csv','a+', newline='')
writer = csv.writer(outFile, dialect='excel')
results = [time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())]
for v, k in param.items():
results.append(k)
results.append(str(test_mae))
results.append(str(best_val_test_mae))
results.append(str(best_test_mae))
results.append(str(best_epoch_mae))
path = './log/{}/best_model.pt'.format(param['ExpName'])
best_model = torch.load(path)
cscore, ged_m, ged_s = evaluate_graph(best_model)
results.append(str(cscore))
results.append(str(ged_m))
results.append(str(ged_s))
nni.report_final_result(best_val_test_mae)
writer.writerow(results)
def evaluate(model, features, labels, mask):
model.eval()
with torch.no_grad():
logits = model(features, mode='test')
logits = logits[mask]
labels = labels[mask]
_, pred = torch.max(logits, dim=1)
correct = torch.sum(pred == labels)
return correct.item() * 1.0 / len(labels)
def main(param):
set_seed(param['seed'])
g, features, labels, train_mask, val_mask, test_mask = load_data(param)
param['input_dim'] = features.shape[1]
param['output_dim'] = torch.max(labels) + 1
model = get_model(param).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=param['lr'], weight_decay=param['weight_decay'])
test_best = 0
test_val = 0
val_best = 0
val_best_epoch = 0
for epoch in range(param['epochs']):
model.train()
optimizer.zero_grad()
model.g = g
logits = model(features)
pred = F.log_softmax(logits, 1)
loss_cla = F.nll_loss(pred[train_mask], labels[train_mask])
loss_graph, loss_node = model.compute_disentangle_loss()
loss = loss_cla + loss_graph * param['ratio_graph'] + loss_node * param['ratio_node']
loss.backward()
optimizer.step()
loss_1 = loss_cla.item()
loss_2 = loss_graph.item() * param['ratio_graph']
loss_3 = loss_node.item() * param['ratio_node']
train_loss = loss.item()
train_acc = evaluate(model, features, labels, train_mask)
val_acc = evaluate(model, features, labels, val_mask)
test_acc = evaluate(model, features, labels, test_mask)
# nni.report_intermediate_result(test_acc)
if test_acc > test_best:
test_best = test_acc
if val_acc > val_best:
val_best = val_acc
test_val = test_acc
val_best_epoch = epoch
print("\033[0;30;46m Epoch: {} | Loss: {:.6f}, {:.6f}, {:.12f}, {:.6f} | Acc: {:.5f}, {:.5f}, {:.5f}, {:.5f}({}), {:.5f} \033[0m".format(
epoch, loss_1, loss_2, loss_3, train_loss, train_acc, val_acc, test_acc, test_val, val_best_epoch, test_best))
nni.report_final_result(test_val)
outFile = open('./results/PerformMetrics.csv','a+', newline='')
writer = csv.writer(outFile, dialect='excel')
results = [time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())]
for v, k in param.items():
results.append(k)
results.append(str(test_acc))
results.append(str(test_best))
results.append(str(test_val))
results.append(str(val_best))
results.append(str(val_best_epoch))
writer.writerow(results)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument("--ExpName", type=str, default='run0000')
parser.add_argument("--model", type=str, default='MDGNN')
parser.add_argument("--dataset", type=str, default="synthetic", choices=['cora', 'citeseer', 'pubmed', 'synthetic', 'zinc'])
parser.add_argument("--input_dim", type=int, default=30)
parser.add_argument("--out_dim", type=int, default=6)
parser.add_argument("--percent", type=float, default=0.03)
parser.add_argument("--mode", type=int, default=1)
parser.add_argument("--ablation_mode", type=int, default=0)
parser.add_argument("--num_graph", type=int, default=6)
parser.add_argument("--hidden_dim", type=int, default=18)
parser.add_argument("--graph_dim", type=int, default=18)
parser.add_argument("--dropout", type=float, default=0.5)
parser.add_argument("--sigma", type=float, default=8.0)
parser.add_argument("--ratio_graph", type=float, default=1.0)
parser.add_argument("--ratio_node", type=float, default=1.0)
parser.add_argument("--num_hop", type=int, default=3)
parser.add_argument("--beta", type=float, default=0.2)
parser.add_argument("--epochs", type=int, default=200)
parser.add_argument("--lr", type=float, default=0.005)
parser.add_argument('--weight_decay', type=float, default=5e-4)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--graph_size', type=int, default=30)
parser.add_argument('--graph_num', type=int, default=10000)
parser.add_argument('--feature_num', type=int, default=5)
parser.add_argument('--std', type=float, default=5.0)
parser.add_argument('--batch_size', type=int, default=1000)
parser.add_argument('--init', type=float, default=0.2)
parser.add_argument('--selected_num', type=int, default=5)
args = parser.parse_args()
if args.dataset == 'synthetic':
jsontxt = open("./param/param_synthetic.json", 'r').read()
param = json.loads(jsontxt)
elif args.dataset == 'cora':
jsontxt = open("./param/param_cora.json", 'r').read()
param = json.loads(jsontxt)
elif args.dataset == 'citeseer':
jsontxt = open("./param/param_citeseer.json", 'r').read()
param = json.loads(jsontxt)
elif args.dataset == 'pubmed':
jsontxt = open("./param/param_pubmed.json", 'r').read()
param = json.loads(jsontxt)
elif args.dataset == 'zinc':
jsontxt = open("./param/param_zinc.json", 'r').read()
param = json.loads(jsontxt)
else:
param = args.__dict__
param.update(nni.get_next_parameter())
print(param)
if args.dataset == 'synthetic':
main_synthetic(param)
elif args.dataset == 'zinc':
main_zinc(param)
else:
main(param) |
the-stack_106_27990 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
from bpy.types import Panel
from bpy.props import StringProperty
bl_info = {
"name": "KTX Selectbuffer",
"description": "Enable boolean operations on selections",
"author": "Roel Koster, @koelooptiemanna, irc:kostex",
"version": (1, 3, 2),
"blender": (2, 7, 0),
"location": "View3D > Properties",
"warning": "",
"wiki_url": "https://github.com/kostex/blenderscripts/",
"tracker_url": "https://developer.blender.org/maniphest/task/edit/form/2/",
"category": "3D View"}
class Oldbuffer:
data = []
class KTX_Selectbuffer_Mutate(bpy.types.Operator):
bl_label = "select buffer mutate"
bl_idname = "ktx.selectbuffer_mutate"
bl_description = ("A.union(B) elements from both A and B\n"
"A.difference(B) elements in A but not in B\n"
"A.symmetric_difference(B) elements in either A or B but not both\n"
"A.intersection(B) elements common to A and B")
operation = StringProperty()
def execute(self, context):
old_buffer = bpy.context.scene.ktx_selectbuffer
emode = bpy.context.tool_settings.mesh_select_mode
c_mode = bpy.context.object.mode
bpy.ops.object.mode_set(mode='OBJECT')
if emode[0]:
all_vefs = bpy.context.object.data.vertices
elif emode[1]:
all_vefs = bpy.context.object.data.edges
elif emode[2]:
all_vefs = bpy.context.object.data.polygons
selected_vefs = [vef for vef in all_vefs if vef.select]
selected_vefs_buffer = []
for vef in selected_vefs:
selected_vefs_buffer.append(vef.index)
if self.operation == 'union':
resulting_vefs = set(old_buffer.data).union(selected_vefs_buffer)
elif self.operation == 'difference':
resulting_vefs = set(old_buffer.data).difference(selected_vefs_buffer)
elif self.operation == 'sym_difference':
resulting_vefs = set(old_buffer.data).symmetric_difference(selected_vefs_buffer)
elif self.operation == 'intersection':
resulting_vefs = set(old_buffer.data).intersection(selected_vefs_buffer)
elif self.operation == 'set':
resulting_vefs = selected_vefs_buffer
elif self.operation == 'clear':
resulting_vefs = []
old_buffer.data = resulting_vefs
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT')
for vef in resulting_vefs:
all_vefs[vef].select = True
bpy.ops.object.mode_set(mode=c_mode)
bpy.ops.ed.undo_push()
return {'FINISHED'}
class KTX_Selectbuffer(bpy.types.Panel):
bl_label = "KTX Selectbuffer"
bl_idname = "ktx.selectbuffer"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
def draw(self, context):
obj = bpy.context.object
layout = self.layout
row = layout.row()
col = row.column()
if obj == None:
col.label(text='Select/Create something first')
else:
if obj.type == 'MESH':
c_mode = bpy.context.object.mode
if c_mode == 'EDIT':
col.operator("ktx.selectbuffer_mutate", text="Set").operation = 'set'
col.operator("ktx.selectbuffer_mutate", text="Clear").operation = 'clear'
col.operator("ktx.selectbuffer_mutate", text="Union").operation = 'union'
col.operator("ktx.selectbuffer_mutate", text="Difference").operation = 'difference'
col.operator("ktx.selectbuffer_mutate", text="Symmetric Difference").operation = 'sym_difference'
col.operator("ktx.selectbuffer_mutate", text="Intersection").operation = 'intersection'
else:
col.label(text='Enter EDIT Mode to use')
else:
col.label(text='Select a Mesh Object')
def register():
bpy.utils.register_module(__name__)
bpy.types.Scene.ktx_selectbuffer = Oldbuffer
def unregister():
bpy.utils.unregister_module(__name__)
del bpy.types.Scene.ktx_selectbuffer
if __name__ == "__main__":
register()
|
the-stack_106_27991 | from __future__ import unicode_literals
import json
import re
import os
import subprocess
from collections import OrderedDict
from distutils.spawn import find_executable
from functools import partial
from itertools import chain
from typing import Text, Iterable, Union, Dict, Set, Sequence, Any
import six
import yaml
from time import time
from attr import attrs, attrib, Factory
from pathlib2 import Path
from clearml_agent.external.requirements_parser import parse
from clearml_agent.external.requirements_parser.requirement import Requirement
from clearml_agent.errors import CommandFailedError
from clearml_agent.helper.base import rm_tree, NonStrictAttrs, select_for_platform, is_windows_platform, ExecutionInfo
from clearml_agent.helper.process import Argv, Executable, DEVNULL, CommandSequence, PathLike
from clearml_agent.helper.package.requirements import SimpleVersion
from clearml_agent.session import Session
from .base import PackageManager
from .pip_api.venv import VirtualenvPip
from .requirements import RequirementsManager, MarkerRequirement
from ...backend_api.session.defs import ENV_CONDA_ENV_PACKAGE
package_normalize = partial(re.compile(r"""\[version=['"](.*)['"]\]""").sub, r"\1")
def package_set(packages):
return set(map(package_normalize, packages))
def _package_diff(path, packages):
# type: (Union[Path, Text], Iterable[Text]) -> Set[Text]
return package_set(Path(path).read_text().splitlines()) - package_set(packages)
class CondaPip(VirtualenvPip):
def __init__(self, source=None, *args, **kwargs):
super(CondaPip, self).__init__(*args, interpreter=Path(kwargs.get('path'), "python.exe")
if is_windows_platform() and kwargs.get('path') else None, **kwargs)
self.source = source
def run_with_env(self, command, output=False, **kwargs):
if not self.source:
return super(CondaPip, self).run_with_env(command, output=output, **kwargs)
command = CommandSequence(self.source, Argv("pip", *command))
return (command.get_output if output else command.check_call)(
stdin=DEVNULL, **kwargs
)
class CondaAPI(PackageManager):
"""
A programmatic interface for controlling conda
"""
MINIMUM_VERSION = "4.3.30"
def __init__(self, session, path, python, requirements_manager, execution_info=None, **kwargs):
# type: (Session, PathLike, float, RequirementsManager, ExecutionInfo, Any) -> None
"""
:param python: base python version to use (e.g python3.6)
:param path: path of env
"""
super(CondaAPI, self).__init__()
self.session = session
self.python = python
self.source = None
self.requirements_manager = requirements_manager
self.path = path
self.env_read_only = False
self.extra_channels = self.session.config.get('agent.package_manager.conda_channels', [])
self.conda_env_as_base_docker = \
self.session.config.get('agent.package_manager.conda_env_as_base_docker', None) or \
bool(ENV_CONDA_ENV_PACKAGE.get())
if ENV_CONDA_ENV_PACKAGE.get():
self.conda_pre_build_env_path = ENV_CONDA_ENV_PACKAGE.get()
else:
self.conda_pre_build_env_path = execution_info.docker_cmd if execution_info else None
self.pip = CondaPip(
session=self.session,
source=self.source,
python=self.python,
requirements_manager=self.requirements_manager,
path=self.path,
)
try:
self.conda = (
find_executable("conda") or
Argv(select_for_platform(windows="where", linux="which"), "conda").get_output(
shell=select_for_platform(windows=True, linux=False)).strip()
)
except Exception:
raise ValueError("ERROR: package manager \"conda\" selected, "
"but \'conda\' executable could not be located")
try:
output = Argv(self.conda, "--version").get_output(stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
raise CommandFailedError(
"Unable to determine conda version: {ex}, output={ex.output}".format(
ex=ex
)
)
self.conda_version = self.get_conda_version(output)
if SimpleVersion.compare_versions(self.conda_version, '<', self.MINIMUM_VERSION):
raise CommandFailedError(
"conda version '{}' is smaller than minimum supported conda version '{}'".format(
self.conda_version, self.MINIMUM_VERSION
)
)
@staticmethod
def get_conda_version(output):
match = re.search(r"(\d+\.){0,2}\d+", output)
if not match:
raise CommandFailedError("Unidentified conda version string:", output)
return match.group(0)
@property
def bin(self):
return self.pip.bin
# noinspection SpellCheckingInspection
def upgrade_pip(self):
# do not change pip version if pre built environement is used
if self.env_read_only:
print('Conda environment in read-only mode, skipping pip upgrade.')
return ''
return self._install(select_for_platform(windows='pip{}', linux='pip{}').format(self.pip.get_pip_version()))
def create(self):
"""
Create a new environment
"""
if self.conda_env_as_base_docker and self.conda_pre_build_env_path:
if Path(self.conda_pre_build_env_path).is_dir():
self._init_existing_environment(self.conda_pre_build_env_path)
return self
elif Path(self.conda_pre_build_env_path).is_file():
print("Restoring Conda environment from {}".format(self.conda_pre_build_env_path))
tar_path = find_executable("tar")
self.path.mkdir(parents=True, exist_ok=True)
output = Argv(
tar_path,
"-xzf",
self.conda_pre_build_env_path,
"-C",
self.path,
).get_output()
self.source = self.pip.source = ("conda", "activate", self.path.as_posix())
conda_env = self._get_conda_sh()
self.source = self.pip.source = CommandSequence(('source', conda_env.as_posix()), self.source)
# unpack cleanup
print("Fixing prefix in Conda environment {}".format(self.path))
CommandSequence(('source', conda_env.as_posix()),
((self.path / 'bin' / 'conda-unpack').as_posix(), )).get_output()
return self
else:
raise ValueError("Could not restore Conda environment, cannot find {}".format(
self.conda_pre_build_env_path))
output = Argv(
self.conda,
"create",
"--yes",
"--mkdir",
"--prefix",
self.path,
"python={}".format(self.python),
).get_output(stderr=DEVNULL)
match = re.search(
r"\W*(.*activate) ({})".format(re.escape(str(self.path))), output
)
self.source = self.pip.source = (
tuple(match.group(1).split()) + (match.group(2),)
if match
else ("conda", "activate", self.path.as_posix())
)
conda_env = self._get_conda_sh()
if conda_env.is_file() and not is_windows_platform():
self.source = self.pip.source = CommandSequence(('source', conda_env.as_posix()), self.source)
return self
def _init_existing_environment(self, conda_pre_build_env_path):
print("Using pre-existing Conda environment from {}".format(conda_pre_build_env_path))
self.path = Path(conda_pre_build_env_path)
self.source = ("conda", "activate", self.path.as_posix())
self.pip = CondaPip(
session=self.session,
source=self.source,
python=self.python,
requirements_manager=self.requirements_manager,
path=self.path,
)
conda_env = self._get_conda_sh()
self.source = self.pip.source = CommandSequence(('source', conda_env.as_posix()), self.source)
self.env_read_only = True
def remove(self):
"""
Delete a conda environment.
Use 'conda env remove', then 'rm_tree' to be safe.
Conda seems to load "vcruntime140.dll" from all its environment on startup.
This means environment have to be deleted using 'conda env remove'.
If necessary, conda can be fooled into deleting a partially-deleted environment by creating an empty file
in '<ENV>\conda-meta\history' (value found in 'conda.gateways.disk.test.PREFIX_MAGIC_FILE').
Otherwise, it complains that said directory is not a conda environment.
See: https://github.com/conda/conda/issues/7682
"""
try:
self._run_command(("env", "remove", "-p", self.path))
except Exception:
pass
rm_tree(self.path)
# if we failed removing the path, change it's name
if is_windows_platform() and Path(self.path).exists():
try:
Path(self.path).rename(Path(self.path).as_posix() + '_' + str(time()))
except Exception:
pass
def _install_from_file(self, path):
"""
Install packages from requirement file.
"""
self._install("--file", path)
def _install(self, *args):
# type: (*PathLike) -> ()
# if we are in read only mode, do not install anything
if self.env_read_only:
print('Conda environment in read-only mode, skipping package installing: {}'.format(args))
return
channels_args = tuple(
chain.from_iterable(("-c", channel) for channel in self.extra_channels)
)
self._run_command(("install", "-p", self.path) + channels_args + args)
def _get_pip_packages(self, packages):
# type: (Iterable[Text]) -> Sequence[Text]
"""
Return subset of ``packages`` which are not available on conda
"""
pips = []
while True:
with self.temp_file("conda_reqs", packages) as path:
try:
self._install_from_file(path)
except PackageNotFoundError as e:
pips.append(e.pkg)
packages = _package_diff(path, {e.pkg})
else:
break
return pips
def install_packages(self, *packages):
# type: (*Text) -> ()
return self._install(*packages)
def uninstall_packages(self, *packages):
# if we are in read only mode, do not uninstall anything
if self.env_read_only:
print('Conda environment in read-only mode, skipping package uninstalling: {}'.format(packages))
return ''
return self._run_command(("uninstall", "-p", self.path))
def install_from_file(self, path):
"""
Try to install packages from conda. Install packages which are not available from conda with pip.
"""
requirements = {}
# assume requirements.txt
with open(path, 'rt') as f:
requirements['pip'] = f.read()
self.load_requirements(requirements)
def freeze(self, freeze_full_environment=False):
requirements = self.pip.freeze()
req_lines = []
conda_lines = []
# noinspection PyBroadException
try:
pip_lines = requirements['pip']
conda_packages_json = json.loads(
self._run_command((self.conda, "list", "--json", "-p", self.path), raw=True))
for r in conda_packages_json:
# check if this is a pypi package, if it is, leave it outside
if not r.get('channel') or r.get('channel') == 'pypi':
name = (r['name'].replace('-', '_'), r['name'])
pip_req_line = [l for l in pip_lines
if l.split('==', 1)[0].strip() in name or l.split('@', 1)[0].strip() in name]
if pip_req_line and \
('@' not in pip_req_line[0] or
not pip_req_line[0].split('@', 1)[1].strip().startswith('file://')):
req_lines.append(pip_req_line[0])
continue
req_lines.append(
'{}=={}'.format(name[1], r['version']) if r.get('version') else '{}'.format(name[1]))
continue
# check if we have it in our required packages
name = r['name']
# hack support pytorch/torch different naming convention
if name == 'pytorch':
name = 'torch'
# skip over packages with _
if name.startswith('_'):
continue
conda_lines.append('{}=={}'.format(name, r['version']) if r.get('version') else '{}'.format(name))
# make sure we see the conda packages, put them into the pip as well
if conda_lines:
req_lines = ['# Conda Packages', ''] + conda_lines + ['', '# pip Packages', ''] + req_lines
requirements['pip'] = req_lines
requirements['conda'] = conda_lines
except Exception:
pass
if freeze_full_environment:
# noinspection PyBroadException
try:
conda_env_json = json.loads(
self._run_command((self.conda, "env", "export", "--json", "-p", self.path), raw=True))
conda_env_json.pop('name', None)
conda_env_json.pop('prefix', None)
conda_env_json.pop('channels', None)
requirements['conda_env_json'] = json.dumps(conda_env_json)
except Exception:
pass
return requirements
def _load_conda_full_env(self, conda_env_dict, requirements):
# noinspection PyBroadException
try:
cuda_version = int(self.session.config.get('agent.cuda_version', 0))
except Exception:
cuda_version = 0
conda_env_dict['channels'] = self.extra_channels
if 'dependencies' not in conda_env_dict:
conda_env_dict['dependencies'] = []
new_dependencies = OrderedDict()
pip_requirements = None
for line in conda_env_dict['dependencies']:
if isinstance(line, dict):
pip_requirements = line.pop('pip', None)
continue
name = line.strip().split('=', 1)[0].lower()
if name == 'pip':
continue
elif name == 'python':
line = 'python={}'.format('.'.join(line.split('=')[1].split('.')[:2]))
elif name == 'tensorflow-gpu' and cuda_version == 0:
line = 'tensorflow={}'.format(line.split('=')[1])
elif name == 'tensorflow' and cuda_version > 0:
line = 'tensorflow-gpu={}'.format(line.split('=')[1])
elif name in ('cupti', 'cudnn'):
# cudatoolkit should pull them based on the cudatoolkit version
continue
elif name.startswith('_'):
continue
new_dependencies[line.split('=', 1)[0].strip()] = line
# fix packages:
conda_env_dict['dependencies'] = list(new_dependencies.values())
with self.temp_file("conda_env", yaml.dump(conda_env_dict), suffix=".yml") as name:
print('Conda: Trying to install requirements:\n{}'.format(conda_env_dict['dependencies']))
result = self._run_command(
("env", "update", "-p", self.path, "--file", name)
)
# check if we need to remove specific packages
bad_req = self._parse_conda_result_bad_packges(result)
if bad_req:
print('failed installing the following conda packages: {}'.format(bad_req))
return False
if pip_requirements:
# create a list of vcs packages that we need to replace in the pip section
vcs_reqs = {}
if 'pip' in requirements:
pip_lines = requirements['pip'].splitlines() \
if isinstance(requirements['pip'], six.string_types) else requirements['pip']
for line in pip_lines:
try:
marker = list(parse(line))
except Exception:
marker = None
if not marker:
continue
m = MarkerRequirement(marker[0])
if m.vcs:
vcs_reqs[m.name] = m
try:
pip_req_str = [str(vcs_reqs.get(r.split('=', 1)[0], r)) for r in pip_requirements
if not r.startswith('pip=') and not r.startswith('virtualenv=')]
print('Conda: Installing requirements: step 2 - using pip:\n{}'.format(pip_req_str))
PackageManager._selected_manager = self.pip
self.pip.load_requirements({'pip': '\n'.join(pip_req_str)})
except Exception as e:
print(e)
raise e
finally:
PackageManager._selected_manager = self
self.requirements_manager.post_install(self.session)
def load_requirements(self, requirements):
# if we are in read only mode, do not uninstall anything
if self.env_read_only:
print('Conda environment in read-only mode, skipping requirements installation.')
return None
# if we have a full conda environment, use it and pass the pip to pip
if requirements.get('conda_env_json'):
# noinspection PyBroadException
try:
conda_env_json = json.loads(requirements.get('conda_env_json'))
print('Conda restoring full yaml environment')
return self._load_conda_full_env(conda_env_json, requirements)
except Exception:
print('Could not load fully stored conda environment, falling back to requirements')
# create new environment file
conda_env = dict()
conda_env['channels'] = self.extra_channels
reqs = []
if isinstance(requirements['pip'], six.string_types):
requirements['pip'] = requirements['pip'].split('\n')
if isinstance(requirements.get('conda'), six.string_types):
requirements['conda'] = requirements['conda'].split('\n')
has_torch = False
has_matplotlib = False
has_cudatoolkit = False
try:
# notice this is an integer version: 112 (means 11.2)
cuda_version = int(self.session.config.get('agent.cuda_version', 0))
except:
cuda_version = 0
# notice 'conda' entry with empty string is a valid conda requirements list, it means pip only
# this should happen if experiment was executed on non-conda machine or old trains client
conda_supported_req = requirements['pip'] if requirements.get('conda', None) is None else requirements['conda']
conda_supported_req_names = []
pip_requirements = []
for r in conda_supported_req:
try:
marker = list(parse(r))
except:
marker = None
if not marker:
continue
m = MarkerRequirement(marker[0])
# conda does not support version control links
if m.vcs:
pip_requirements.append(m)
continue
# Skip over pip
if m.name in ('pip', 'virtualenv', ):
continue
# python version, only major.minor
if m.name == 'python' and m.specs:
m.specs = [(m.specs[0][0], '.'.join(m.specs[0][1].split('.')[:2])), ]
if '.' not in m.specs[0][1]:
continue
if m.name.lower() == 'cudatoolkit':
# skip cuda if we are running on CPU
if not cuda_version:
continue
has_cudatoolkit = True
# cuda version, only major.minor
requested_cuda_version = '.'.join(m.specs[0][1].split('.')[:2])
# make sure that the cuda_version we support can install the requested cuda (major version)
if int(float(requested_cuda_version)) > int(float(cuda_version)/10.0):
continue
m.specs = [(m.specs[0][0], str(requested_cuda_version)), ]
conda_supported_req_names.append(m.name.lower())
if m.req.name.lower() == 'matplotlib':
has_matplotlib = True
elif m.req.name.lower().startswith('torch'):
has_torch = True
if m.req.name.lower() in ('torch', 'pytorch'):
has_torch = True
m.req.name = 'pytorch'
if m.req.name.lower() in ('tensorflow_gpu', 'tensorflow-gpu', 'tensorflow'):
has_torch = True
m.req.name = 'tensorflow-gpu' if cuda_version > 0 else 'tensorflow'
reqs.append(m)
if not has_cudatoolkit and cuda_version:
m = MarkerRequirement(Requirement("cudatoolkit == {}".format(float(cuda_version) / 10.0)))
reqs.append(m)
# if we have a conda list, the rest should be installed with pip,
# this means any experiment that was executed with pip environment,
# will be installed using pip
if requirements.get('conda', None) is not None:
for r in requirements['pip']:
try:
marker = list(parse(r))
except:
marker = None
if not marker:
continue
m = MarkerRequirement(marker[0])
# skip over local files (we cannot change the version to a local file)
if m.local_file:
continue
m_name = (m.name or '').lower()
if m_name in conda_supported_req_names:
# this package is in the conda list,
# make sure that if we changed version and we match it in conda
## conda_supported_req_names.remove(m_name)
for cr in reqs:
if m_name.lower().replace('_', '-') == cr.name.lower().replace('_', '-'):
# match versions
cr.specs = m.specs
# # conda always likes "-" not "_" but only on pypi packages
# cr.name = cr.name.lower().replace('_', '-')
break
else:
# not in conda, it is a pip package
pip_requirements.append(m)
if m_name == 'matplotlib':
has_matplotlib = True
# Conda requirements Hacks:
if has_matplotlib:
reqs.append(MarkerRequirement(Requirement.parse('graphviz')))
reqs.append(MarkerRequirement(Requirement.parse('python-graphviz')))
reqs.append(MarkerRequirement(Requirement.parse('kiwisolver')))
# remove specific cudatoolkit, it should have being preinstalled.
# allow to override default cudatoolkit, but not the derivative packages, cudatoolkit should pull them
reqs = [r for r in reqs if r.name not in ('cudnn', 'cupti')]
if has_torch and cuda_version == 0:
reqs.append(MarkerRequirement(Requirement.parse('cpuonly')))
# make sure we have no double entries
reqs = list(OrderedDict((r.name, r) for r in reqs).values())
# conform conda packages (version/name)
for r in reqs:
# change _ to - in name but not the prefix _ (as this is conda prefix)
if r.name and not r.name.startswith('_') and not requirements.get('conda', None):
r.name = r.name.replace('_', '-')
if has_cudatoolkit and r.specs and len(r.specs[0]) > 1 and r.name == 'cudatoolkit':
# select specific cuda version if it came from the requirements
r.specs = [(r.specs[0][0].replace('==', '='), r.specs[0][1].split('.post')[0])]
elif r.specs and r.specs[0] and len(r.specs[0]) > 1:
# remove .post from version numbers it fails with ~= version, and change == to ~=
r.specs = [(r.specs[0][0].replace('==', '~='), r.specs[0][1].split('.post')[0])]
while reqs:
# notice, we give conda more freedom in version selection, to help it choose best combination
def clean_ver(ar):
if not ar.specs:
return ar.tostr()
ar.specs = [(ar.specs[0][0], ar.specs[0][1] + '.0' if '.' not in ar.specs[0][1] else ar.specs[0][1])]
return ar.tostr()
conda_env['dependencies'] = [clean_ver(r) for r in reqs]
with self.temp_file("conda_env", yaml.dump(conda_env), suffix=".yml") as name:
print('Conda: Trying to install requirements:\n{}'.format(conda_env['dependencies']))
if self.session.debug_mode:
print('{}:\n{}'.format(name, yaml.dump(conda_env)))
result = self._run_command(
("env", "update", "-p", self.path, "--file", name)
)
# check if we need to remove specific packages
bad_req = self._parse_conda_result_bad_packges(result)
if not bad_req:
break
solved = False
for bad_r in bad_req:
name = bad_r.split('[')[0].split('=')[0].split('~')[0].split('<')[0].split('>')[0]
# look for name in requirements
for r in reqs:
if r.name.lower() == name.lower():
pip_requirements.append(r)
reqs.remove(r)
solved = True
break
# we couldn't remove even one package,
# nothing we can do but try pip
if not solved:
pip_requirements.extend(reqs)
break
if pip_requirements:
try:
pip_req_str = [r.tostr() for r in pip_requirements if r.name not in ('pip', 'virtualenv', )]
print('Conda: Installing requirements: step 2 - using pip:\n{}'.format(pip_req_str))
PackageManager._selected_manager = self.pip
if self.session.debug_mode:
print('pip requirements.txt:\n{}'.format('\n'.join(pip_req_str)))
self.pip.load_requirements({'pip': '\n'.join(pip_req_str)})
except Exception as e:
print(e)
raise e
finally:
PackageManager._selected_manager = self
self.requirements_manager.post_install(self.session)
return True
def _parse_conda_result_bad_packges(self, result_dict):
if not result_dict:
return None
if 'bad_deps' in result_dict and result_dict['bad_deps']:
return result_dict['bad_deps']
if result_dict.get('error'):
error_lines = result_dict['error'].split('\n')
if error_lines[0].strip().lower().startswith("unsatisfiableerror:"):
empty_lines = [i for i, l in enumerate(error_lines) if not l.strip()]
if len(empty_lines) >= 2:
deps = error_lines[empty_lines[0]+1:empty_lines[1]]
try:
return yaml.load('\n'.join(deps), Loader=yaml.SafeLoader)
except:
return None
return None
def _run_command(self, command, raw=False, **kwargs):
# type: (Iterable[Text], bool, Any) -> Union[Dict, Text]
"""
Run a conda command, returning JSON output.
The command is prepended with 'conda' and run with JSON output flags.
:param command: command to run
:param raw: return text output and don't change command
:param kwargs: kwargs for Argv.get_output()
:return: JSON output or text output
"""
def escape_ansi(line):
ansi_escape = re.compile(r'(?:\x1B[@-_]|[\x80-\x9F])[0-?]*[ -/]*[@-~]')
return ansi_escape.sub('', line)
# make sure we are not running it with our own PYTHONPATH
env = dict(**os.environ)
env.pop('PYTHONPATH', None)
command = Argv(*command) # type: Executable
if not raw:
command = (self.conda,) + command + ("--quiet", "--json")
try:
print('Executing Conda: {}'.format(command.serialize()))
result = command.get_output(stdin=DEVNULL, env=env, **kwargs)
if self.session.debug_mode:
print(result)
except Exception as e:
result = e.output if hasattr(e, 'output') else ''
if self.session.debug_mode:
print(result)
if raw:
raise
if raw:
return result
result = json.loads(escape_ansi(result)) if result else {}
if result.get('success', False):
print('Pass')
elif result.get('error'):
print('Conda error: {}'.format(result.get('error')))
return result
def get_python_command(self, extra=()):
if not self.source:
self._init_existing_environment(self.path)
return CommandSequence(self.source, self.pip.get_python_command(extra=extra))
def _get_conda_sh(self):
# type () -> Path
base_conda_env = Path(self.conda).parent.parent / 'etc' / 'profile.d' / 'conda.sh'
if base_conda_env.is_file():
return base_conda_env
for path in os.environ.get('PATH', '').split(select_for_platform(windows=';', linux=':')):
conda = find_executable("conda", path=path)
if not conda:
continue
conda_env = Path(conda).parent.parent / 'etc' / 'profile.d' / 'conda.sh'
if conda_env.is_file():
return conda_env
return base_conda_env
# enable hashing with cmp=False because pdb fails on un-hashable exceptions
exception = attrs(str=True, cmp=False)
@exception
class CondaException(Exception, NonStrictAttrs):
command = attrib()
message = attrib(default=None)
@exception
class UnknownCondaError(CondaException):
data = attrib(default=Factory(dict))
@exception
class PackagesNotFoundError(CondaException):
"""
Conda 4.5 exception - this reports all missing packages.
"""
packages = attrib(default=())
@exception
class PackageNotFoundError(CondaException):
"""
Conda 4.3 exception - this reports one missing package at a time,
as a singleton YAML list.
"""
pkg = attrib(default="", converter=lambda val: yaml.load(val, Loader=yaml.SafeLoader)[0].replace(" ", ""))
|
the-stack_106_27992 | import time
import torch
from lib.Utility.metrics import AverageMeter
from lib.Utility.metrics import accuracy
def train(dataset, model, criterion, epoch, optimizer, lr_scheduler, device, args):
"""
Trains/updates the model for one epoch on the training dataset.
Parameters:
train_loader (torch.utils.data.DataLoader): The trainset dataloader
model (torch.nn.module): Model to be trained
criterion (torch.nn.criterion): Loss function
epoch (int): Continuous epoch counter
optimizer (torch.optim.optimizer): optimizer instance like SGD or Adam
lr_scheduler (Training.LearningRateScheduler): class implementing learning rate schedules
device (str): device name where data is transferred to
args (dict): Dictionary of (command line) arguments.
Needs to contain learning_rate (float), momentum (float),
weight_decay (float), nesterov momentum (bool), lr_dropstep (int),
lr_dropfactor (float), print_freq (int) and expand (bool).
"""
batch_time = AverageMeter()
data_time = AverageMeter()
cl_losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(dataset.train_loader):
input, target = input.to(device), target.to(device)
# measure data loading time
data_time.update(time.time() - end)
# adjust the learning rate if applicable
lr_scheduler.adjust_learning_rate(optimizer, i + 1)
# compute output
output = model(input)
# making targets one-hot for using BCEloss
target_temp = target
one_hot = torch.zeros(target.size(0), output.size(1)).to(device)
one_hot.scatter_(1, target.long().view(target.size(0), -1), 1)
target = one_hot
# compute loss and accuracy
loss = criterion(output, target)
prec1, prec5 = accuracy(output, target_temp, (1,5))
# measure accuracy and record loss
cl_losses.update(loss.item(), input.size(0))
prec1, prec5 = accuracy(output, target_temp, (1,5))
top1.update(prec1.item(), input.size(0))
top5.update(prec5.item(), input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
del output, input, target
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})\t'.format(
epoch, i, len(dataset.train_loader), batch_time=batch_time,
data_time=data_time, loss=cl_losses, top1=top1, top5 = top5))
lr_scheduler.scheduler_epoch += 1
print(' * Train: Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'.format(top1=top1, top5=top5))
print('=' * 80)
|
the-stack_106_27993 | from typing_extensions import Final
from opentrons.hardware_control.emulation.settings import (
Settings, SmoothieSettings, PipetteSettings
)
from g_code_test_data.g_code_configuration import ProtocolGCodeConfirmConfig
import pytest
###################
# Shared Settings #
###################
SWIFT_SMOOTHIE_SETTINGS = Settings(
smoothie=SmoothieSettings(
left=PipetteSettings(model="p20_single_v2.0", id="P20SV202020070101"),
right=PipetteSettings(model="p300_multi_v2.1", id="P20SV202020070101"),
),
)
# Set up the temperature ramp.
SWIFT_SMOOTHIE_SETTINGS.thermocycler.lid_temperature.degrees_per_tick = 50
SWIFT_SMOOTHIE_SETTINGS.thermocycler.plate_temperature.degrees_per_tick = 50
SWIFT_SMOOTHIE_SETTINGS.tempdeck.temperature.degrees_per_tick = 50
S3_BASE: Final = "dev/protocol"
"""Base path of files in s3."""
##################
# Configurations #
##################
BASIC_SMOOTHIE = ProtocolGCodeConfirmConfig(
name='basic_smoothie',
path="protocol/protocols/fast/smoothie_protocol.py",
s3_path=f"{S3_BASE}/basic_smoothie.txt",
settings=Settings(
smoothie=SmoothieSettings(
left=PipetteSettings(model="p20_single_v2.0", id="P20SV202020070101"),
right=PipetteSettings(model="p20_single_v2.0", id="P20SV202020070101"),
)
)
)
BECKMAN = ProtocolGCodeConfirmConfig(
name="beckman_coulter_rna_advance_viral_rna_isolation",
path="protocol/protocols/fast/beckman_coulter_rna_advance_viral_rna_isolation.py",
s3_path=f"{S3_BASE}/beckman_coulter_rna_advance_viral_rna_isolation.txt",
settings=Settings(
smoothie=SmoothieSettings(
left=PipetteSettings(model="p300_multi_v2.1", id="P20SV202020070101"),
right=PipetteSettings(model="p20_multi_v2.1", id="P20SV202020070101")
)
)
)
CHERRY_PICKING = ProtocolGCodeConfirmConfig(
name='cherrypicking',
path="protocol/protocols/fast/cherrypicking.py",
s3_path=f"{S3_BASE}/cherrypicking.txt",
settings=Settings(
smoothie=SmoothieSettings(
left=PipetteSettings(model="p300_single_v2.1", id="P20SV202020070101"),
right=PipetteSettings(model="p20_single_v2.0", id="P20SV202020070101"),
)
)
)
CUSTOMIZABLE_SERIAL_DILUTION = ProtocolGCodeConfirmConfig(
name="customizable_serial_dilution_ot2",
path="protocol/protocols/fast/customizable_serial_dilution_ot2.py",
s3_path=f"{S3_BASE}/customizable_serial_dilution_ot2.txt",
settings=Settings(
smoothie=SmoothieSettings(
left=PipetteSettings(model="p300_single_v2.1", id="P20SV202020070101"),
right=PipetteSettings(model="p20_multi_v2.1", id="P20SV202020070101"),
),
)
)
TWO_SINGLE_CHANNEL = ProtocolGCodeConfirmConfig(
name='2_single_channel',
path="protocol/protocols/fast/2_single_channel_v2.py",
s3_path=f"{S3_BASE}/2_single_channel.txt",
settings=Settings(
smoothie=SmoothieSettings(
left=PipetteSettings(model="p20_single_v2.0", id="P20SV202020070101"),
right=PipetteSettings(model="p300_single_v2.1", id="P20SV202020070101"),
)
)
)
SET_MAX_SPEED = ProtocolGCodeConfirmConfig(
name="set_max_speed",
path="protocol/protocols/fast/set_max_speed.py",
s3_path=f"{S3_BASE}/set_max_speed.txt",
settings=SWIFT_SMOOTHIE_SETTINGS
)
TWO_MODULES = ProtocolGCodeConfirmConfig(
name='2_modules',
path="protocol/protocols/slow/2_modules_1s_1m_v2.py",
s3_path=f"{S3_BASE}/2_modules.txt",
settings=Settings(
smoothie=SmoothieSettings(
left=PipetteSettings(model="p300_single_v2.1", id="P20SV202020070101"),
right=PipetteSettings(model="p20_multi_v2.1", id="P20SV202020070101"),
),
)
)
OPENTRONS_LOGO = ProtocolGCodeConfirmConfig(
name="opentrons_logo",
path="protocol/protocols/fast/opentrons_logo.py",
s3_path=f"{S3_BASE}/opentrons_logo.txt",
settings=Settings(
smoothie=SmoothieSettings(
left=PipetteSettings(model="p20_multi_v2.1", id="P20SV202020070101"),
right=PipetteSettings(model="p300_single_v2.1", id="P20SV202020070101"),
),
)
)
OMEGA = ProtocolGCodeConfirmConfig(
name="omega_biotek_magbind_totalpure_ngs",
path="protocol/protocols/slow/omega_biotek_magbind_totalpure_ngs.py",
s3_path=f"{S3_BASE}/omega_biotek_magbind_totalpure_ngs.txt",
settings=Settings(
smoothie=SmoothieSettings(
left=PipetteSettings(model="p1000_single_v2.1", id="P20SV202020070101"),
right=PipetteSettings(model="p300_single_v2.1", id="P20SV202020070101"),
),
)
)
ILLUMINA = ProtocolGCodeConfirmConfig(
name="illumina_nextera_xt_library_prep_part1",
path="protocol/protocols/fast/illumina_nextera_xt_library_prep_part1.py",
s3_path=f"{S3_BASE}/illumina_nextera_xt_library_prep_part1.txt",
settings=Settings(
smoothie=SmoothieSettings(
left=PipetteSettings(model="p20_multi_v2.1", id="P20SV202020070101"),
right=PipetteSettings(model="p20_single_v2.0", id="P20SV202020070101")
)
)
)
PCR_PREP_PART_1 = ProtocolGCodeConfirmConfig(
name="pcr_prep_part_1",
path="protocol/protocols/fast/pcr_prep_part_1.py",
s3_path=f"{S3_BASE}/pcr_prep_part_1.txt",
settings=Settings(
smoothie=SmoothieSettings(
left=PipetteSettings(model="p1000_single_v2.1", id="P20SV202020070101"),
right=PipetteSettings(model="p1000_single_v2.1", id="P20SV202020070101")
)
)
)
PCR_PREP_PART_2 = ProtocolGCodeConfirmConfig(
name="pcr_prep_part_2",
path="protocol/protocols/fast/pcr_prep_part_2.py",
s3_path=f"{S3_BASE}/pcr_prep_part_2.txt",
settings=Settings(
smoothie=SmoothieSettings(
left=PipetteSettings(model="p300_multi_v2.1", id="P20SV202020070101"),
right=PipetteSettings(model="p300_multi_v2.1", id="P20SV202020070101")
)
)
)
SWIFT_SMOKE = ProtocolGCodeConfirmConfig(
name='swift_smoke',
path="protocol/protocols/slow/swift_smoke.py",
s3_path=f"{S3_BASE}/swift_smoke.txt",
settings=SWIFT_SMOOTHIE_SETTINGS
)
SWIFT_TURBO = ProtocolGCodeConfirmConfig(
name='swift_turbo',
path="protocol/protocols/slow/swift_turbo.py",
s3_path=f"{S3_BASE}/swift_turbo.txt",
settings=SWIFT_SMOOTHIE_SETTINGS
)
SLOW_PROTOCOLS = [
OMEGA,
SWIFT_SMOKE,
SWIFT_TURBO,
TWO_MODULES,
]
for configuration in SLOW_PROTOCOLS:
configuration.add_mark(user_mark=pytest.mark.slow)
FAST_PROTOCOLS = [
BASIC_SMOOTHIE,
BECKMAN,
CHERRY_PICKING,
CUSTOMIZABLE_SERIAL_DILUTION,
ILLUMINA,
OPENTRONS_LOGO,
PCR_PREP_PART_1,
PCR_PREP_PART_2,
SET_MAX_SPEED,
TWO_SINGLE_CHANNEL,
]
PROTOCOL_CONFIGURATIONS = SLOW_PROTOCOLS + FAST_PROTOCOLS
|
the-stack_106_27994 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from collections import defaultdict
from typing import Any, Callable, Dict, List, Optional, Tuple
from weakref import proxy
import torch
import pytorch_lightning as pl
from pytorch_lightning.core.step_result import Result
from pytorch_lightning.trainer.states import TrainerState
from pytorch_lightning.utilities import DistributedType, LightningEnum
from pytorch_lightning.utilities.warnings import WarningCache
log = logging.getLogger(__name__)
class MetricWarningCache(WarningCache):
def __init__(self):
super().__init__()
self.warned_metrics = []
warning_cache = MetricWarningCache()
class ResultStoreType(LightningEnum):
INSIDE_BATCH_TRAIN_LOOP = "inside_batch_train_loop"
OUTSIDE_BATCH_TRAIN_LOOP = "outside_batch_train_loop"
class HookResultStore:
"""
This class is defined for internal usage.
It holds all metrics logged using the self.log function
in the scope of ModelHooks or Callback functions.
We need to differentiate 3 different scenarios:
- (1): We are outside of a batch loop
* It means no dataloader_idx, no optimizer idx, etc..
- (2): We are inside the training batch loop
* We have an optimizer idx and split idx to track
- (3): We are inside the evaluation loop
* We have a dataloader_idx to track
The data store `Result` objects for those 3 scenarios in `self._internals`.
(1): self._internals = {dataloader_idx: [Result(), ..., Result()]}
* dataloader_idx not being defined, it is set to 0 b default
(2): self._internals = {dataloader_idx: {optimizer_idx: {batch_idx: [Result(), ..., Result()]}}}
(3): Same as (1) for simplicity
Those data structures enables us to reduce properly Result object when batch loop is finished.
"""
def __init__(self, fx_name: str, all_gather_fn: Callable, should_warn: bool) -> None:
self._fx_name = fx_name
self._all_gather_fn = all_gather_fn
self._should_warn = should_warn
self._internals = {}
self._internals_reduced = {}
self._internal_type = None
self.has_reduced = False
self._latest_ref = {}
@property
def has_several_dataloaders(self) -> bool:
return self.num_dataloaders > 1
@property
def num_dataloaders(self) -> int:
inter = self._internals_reduced if self.has_reduced else self._internals
return len(inter)
def check_dataloader_idx(self, result: Result) -> bool:
random_key = list(result.keys())[-1]
return result["meta"][random_key]["dataloader_idx"] is not None
def get_latest_from_func_name(self, latest_result_opt, func_name: str, *args, **kwargs) -> Dict:
results = {}
for opt_idx in latest_result_opt:
latest_result = latest_result_opt[opt_idx]
add_dataloader_idx = self.check_dataloader_idx(latest_result)
func = getattr(latest_result, func_name)
results.update(func(*args, add_dataloader_idx=add_dataloader_idx, **kwargs))
return results
def run_latest_batch_metrics_with_func_name(self, func_name, *args, **kwargs) -> List[Dict]:
"""
This function used cache_ref and cache_result to optimize loading metrics
Context: As we update the logger_connector metrics on every `self.log` call,
and it can be pretty time consuming, especially when logging outside batch loop.
HookResultStore keeps track of its latest added result object,
and cache its pbar and log metrics if already called on,
"""
return [
self.get_latest_from_func_name(self._latest_ref[dl_idx], func_name, *args, **kwargs)
for dl_idx in range(self.num_dataloaders)
]
def get_batch_pbar_metrics(self, *args, **kwargs):
return self.run_latest_batch_metrics_with_func_name("get_batch_pbar_metrics", *args, **kwargs)
def get_batch_log_metrics(self, *args, **kwargs):
return self.run_latest_batch_metrics_with_func_name("get_batch_log_metrics", *args, **kwargs)
def run_epoch_func(self, results, opt_metric, func_name, *args, **kwargs) -> None:
if not isinstance(opt_metric, Result):
raise Exception("The provided opt_metric should be a Result Object. Something is wrong")
func = getattr(opt_metric, func_name)
metrics_to_log = func(*args, add_dataloader_idx=self.has_several_dataloaders, **kwargs)
if self._should_warn:
for non_metric_key in opt_metric.get_non_metrics_keys():
if non_metric_key in metrics_to_log and non_metric_key not in warning_cache.warned_metrics:
metric = self._all_gather_fn(metrics_to_log[non_metric_key])
if any(metric[0] != m for m in metric[1:]):
warning_cache.warn(
f"The value associated to the key {non_metric_key}: {metric.cpu().tolist()} "
"doesn't appear to be the same accross all processes. "
"HINT: One could either do: `self.log(..., sync_dist=True, sync_fn=torch.mean)`"
" to force mean reduction across processes which can be inaccurate or implement"
" a `torchmetrics.Metric`"
)
warning_cache.warned_metrics.append(non_metric_key)
results.append(metrics_to_log)
def get_epoch_from_func_name(self, func_name, *args, **kwargs) -> List[Dict]:
results = []
for dl_idx in range(self.num_dataloaders):
opt_metrics = self._internals_reduced[dl_idx]
if isinstance(opt_metrics, defaultdict):
for opt_metric in opt_metrics.values():
self.run_epoch_func(results, opt_metric, func_name, *args, **kwargs)
else:
self.run_epoch_func(results, opt_metrics, func_name, *args, **kwargs)
return results
def get_epoch_pbar_metrics(self, *_, **__) -> List[Dict]:
return self.get_epoch_from_func_name("get_epoch_pbar_metrics")
def get_epoch_log_metrics(self, *_, **__) -> List[Dict]:
return self.get_epoch_from_func_name("get_epoch_log_metrics")
def get_forked_metrics(self, *_, **__) -> List[Dict]:
return self.get_epoch_from_func_name("get_forked_metrics")
def append(self, result: Result, info: Dict) -> None:
dataloader_idx = info["dataloader_idx"]
self._internal_type = info["type"]
opt_idx = info["opt_idx"]
if self._internal_type == ResultStoreType.INSIDE_BATCH_TRAIN_LOOP:
if dataloader_idx not in self._internals:
self._internals_reduced[dataloader_idx] = defaultdict(dict)
self._latest_ref[dataloader_idx] = {}
self._internals.setdefault(dataloader_idx, {})
batch_idx = info["batch_idx"]
self._internals[dataloader_idx].setdefault(opt_idx, {})
self._internals[dataloader_idx][opt_idx].setdefault(batch_idx, [])
self._internals[dataloader_idx][opt_idx][batch_idx].append(result)
else:
self._internals.setdefault(dataloader_idx, [])
self._internals[dataloader_idx].append(result)
self._latest_ref.setdefault(dataloader_idx, {})
self._latest_ref[dataloader_idx].setdefault(opt_idx, {})
self._latest_ref[dataloader_idx][opt_idx] = result
def auto_reduce_results_on_epoch_end(self) -> None:
"""
This function is called to reduce `self._internals` Result object.
The reduced Result object will be saved into `self._internals_reduced`
The `self._internals` stored Result objects will be deleted to save memory.
"""
if self.has_reduced:
return
for dl_idx in range(self.num_dataloaders):
epoch_metrics = self._internals[dl_idx]
if self._internal_type == ResultStoreType.INSIDE_BATCH_TRAIN_LOOP:
for opt_idx in list(epoch_metrics):
# TODO: Figure out to reduce memory
# TODO: How to start training in middle of epoch
outputs = epoch_metrics[opt_idx]
# reduce across time first
time_reduced_outputs = []
for tbptt_outputs in outputs.values():
tbptt_outputs = type(tbptt_outputs[0]).reduce_across_time(tbptt_outputs)
if len(tbptt_outputs) > 1:
time_reduced_outputs.append(tbptt_outputs)
if len(time_reduced_outputs) == 0:
continue
# reduce across training steps
outputs = type(time_reduced_outputs[0]).reduce_on_epoch_end(time_reduced_outputs)
# with manual opt need 1 + metrics because meta is always there
if outputs.minimize is not None:
outputs.minimize = outputs.minimize.mean()
self._internals_reduced[dl_idx][opt_idx] = outputs
# free memory
del self._internals[dl_idx][opt_idx]
else:
reduced_epoch_metrics = epoch_metrics[0]
if len(epoch_metrics) != 1:
reduced_epoch_metrics = type(reduced_epoch_metrics).reduce_on_epoch_end(epoch_metrics)
self._internals_reduced[dl_idx] = reduced_epoch_metrics
# free memory
del self._internals[dl_idx]
self.has_reduced = True
def __getitem__(self, key: str) -> Any:
return self._internals.get(key, None)
def __repr__(self):
return self._internals.__repr__()
class EpochResultStore:
"""
This class is defined for internal usage.
It holds all metrics logged using the self.log function using `HookResultStore` object.
The internal datastructure is as follow:
self._internals = {"fx_name_0": HookResultStore(), ..., "fx_name_n": HookResultStore()}
Pseudo Code Example:
```
model._current_fx_name = 'something'
model._results = Result()
model.log('a', ...)
epoch_result_store.cache_result()
```
"""
def __init__(self, trainer: 'pl.Trainer') -> None:
self.trainer = proxy(trainer)
# Add warning only for distributed (expect rpc as main worker is running the code).
_should_warn = trainer.accelerator_connector.is_distributed
_should_warn &= not trainer.training_type_plugin.rpc_enabled
self._should_warn = _should_warn
self.reset()
def __getitem__(self, key: str) -> Any:
return self._internals.get(key, None)
@property
def info(self):
"""
This function provides necessary parameters to properly configure HookResultStore obj
"""
model_ref = self.trainer.lightning_module
return {
"batch_idx": self.trainer.batch_idx,
"fx_name": model_ref._current_hook_fx_name or model_ref._current_fx_name,
"dataloader_idx": model_ref._current_dataloader_idx or 0,
"opt_idx": self._opt_idx or 0,
"split_idx": self._split_idx or 0,
"type": (
ResultStoreType.INSIDE_BATCH_TRAIN_LOOP if self._opt_idx is not None and self._split_idx is not None
else ResultStoreType.OUTSIDE_BATCH_TRAIN_LOOP
)
}
def reset_model(self):
"""
This function is used to reset model state at the end of the capture
"""
model_ref = self.trainer.lightning_module
model_ref._results = Result()
model_ref._current_hook_fx_name = None
model_ref._current_fx_name = ''
def cache_result(self) -> None:
"""
This function is called after every hook
and store the result object
"""
with self.trainer.profiler.profile("cache_result"):
model_ref = self.trainer.lightning_module
# extract hook results
hook_result = model_ref._results
if len(hook_result) == 1:
model_ref._current_hook_fx_name = None
model_ref._current_fx_name = ''
return
info = self.info
fx_name = info["fx_name"]
all_gather_fn = self.trainer.lightning_module.all_gather
self._internals.setdefault(fx_name, HookResultStore(fx_name, all_gather_fn, self._should_warn))
# attach capture batch_size
Result.attach_batch_size(self._batch_size, hook_result)
hook_result = hook_result.detach()
if self.trainer.move_metrics_to_cpu:
hook_result = hook_result.cpu()
elif self.trainer._distrib_type == DistributedType.DP:
hook_result = hook_result.to(torch.device("cuda", self.trainer.root_gpu))
self._internals[fx_name].append(hook_result, info)
# update logged_metrics, progress_bar_metrics, callback_metrics
if "epoch_end" in fx_name:
self.update_logger_connector()
self.reset_model()
def update_logger_connector(self) -> Tuple[Dict, Dict]:
"""
This function is called every time we capture a hook
It automatically updates the logger_connector followings:
- progress_bar_metrics with pbar_metrics
- logged_metrics with log_metrics
- callback_metrics with progress_bar_metrics + logged_metrics
"""
logger_connector = self.trainer.logger_connector
callback_metrics = {}
batch_pbar_metrics = {}
batch_log_metrics = {}
if not self._has_batch_loop_finished:
# get pbar
batch_pbar_metrics = self.get_latest_batch_pbar_metrics()
logger_connector.add_progress_bar_metrics(batch_pbar_metrics)
batch_log_metrics = self.get_latest_batch_log_metrics()
if self.trainer.training:
logger_connector._logged_metrics.update(batch_log_metrics)
callback_metrics.update(batch_pbar_metrics)
callback_metrics.update(batch_log_metrics)
else:
# get pbar
epoch_pbar_metrics = self.get_epoch_pbar_metrics()
logger_connector.add_progress_bar_metrics(epoch_pbar_metrics)
# get logged_metrics
epoch_log_metrics = self.get_epoch_log_metrics()
logger_connector._logged_metrics.update(epoch_log_metrics)
logger_connector._logged_metrics.update({"epoch": self.trainer.current_epoch})
# get forked_metrics
forked_metrics = self.get_forked_metrics()
callback_metrics.update(epoch_pbar_metrics)
callback_metrics.update(epoch_log_metrics)
callback_metrics.update(forked_metrics)
# TODO(carmocca): when we implement flushing the logger connector metrics after
# the trainer.state changes, this should check trainer.evaluating instead
if self.trainer.state in (TrainerState.TESTING, TrainerState.VALIDATING):
logger_connector.evaluation_callback_metrics.update(callback_metrics)
# update callback_metrics
logger_connector._callback_metrics.update(callback_metrics)
batch_pbar_metrics.pop("debug_epoch", None)
return batch_pbar_metrics, batch_log_metrics
def run_batch_from_func_name(self, func_name) -> Dict:
results = [getattr(hook_result, func_name) for hook_result in self._internals.values()]
results = [func(include_forked_originals=False) for func in results]
return {k: v for d in sum(results, []) for k, v in d.items()} # List[List[dict]] -> dict
def get_latest_batch_log_metrics(self) -> Dict:
batch_log_metrics = self.run_batch_from_func_name("get_batch_log_metrics")
return batch_log_metrics
def get_latest_batch_pbar_metrics(self) -> Dict:
batch_pbar_metrics = self.run_batch_from_func_name("get_batch_pbar_metrics")
return batch_pbar_metrics
@property
def has_reduced(self) -> bool:
hook_results = self._internals.values()
return len(hook_results) == sum(h.has_reduced for h in hook_results)
def auto_reduce_results_on_epoch_end(self) -> None:
if not self.has_reduced:
for hook_result in self._internals.values():
hook_result.auto_reduce_results_on_epoch_end()
@property
def has_batch_loop_finished(self) -> bool:
return self._has_batch_loop_finished
@has_batch_loop_finished.setter
def has_batch_loop_finished(self, has_batch_loop_finished):
if has_batch_loop_finished:
# If batch loop has finished, reduce metrics
self.auto_reduce_results_on_epoch_end()
# batch_size should be none as we finished batch loop
self._batch_size = None
self._has_batch_loop_finished = has_batch_loop_finished
self.update_logger_connector()
def run_epoch_by_func_name(self, func_name) -> Dict:
if not self.has_reduced:
self.auto_reduce_results_on_epoch_end()
results = [getattr(hook_result, func_name) for hook_result in self._internals.values()]
results = [func() for func in results]
return {k: v for d in sum(results, []) for k, v in d.items()} # List[List[dict]] -> dict
def get_epoch_pbar_metrics(self) -> Dict:
return self.run_epoch_by_func_name("get_epoch_pbar_metrics")
def get_epoch_log_metrics(self) -> Dict:
return self.run_epoch_by_func_name("get_epoch_log_metrics")
def get_forked_metrics(self) -> Dict:
return self.run_epoch_by_func_name("get_forked_metrics")
def reset(self):
self._internals = {}
self._dataloader_idx: Optional[int] = None
self._split_idx: Optional[int] = None
self._opt_idx: Optional[int] = None
self._batch_size: Optional[int] = None
self._has_batch_loop_finished = False
def __call__(
self,
fx_name: str,
dl_idx: Optional[int] = None,
opt_idx: Optional[int] = None,
batch_idx: Optional[int] = None,
split_idx: Optional[int] = None,
reduced: bool = False,
):
"""
This function is an helper to access stored data
It access data from the HookResultStore. Please,
check its data structure for better understanding
Data can be accessed with the following chains:
IF REDUCED:
* IF accessing a fx_name defined in batch training loop:
fx_name -> dl_idx -> opt_idx -> batch_idx -> split_idx
* ELSE fx_name -> dl_idx -> batch_idx
ELSE:
* IF accessing a fx_name defined in batch training loop:
fx_name -> dl_idx -> opt_idx
* ELSE fx_name -> dl_idx
Note:
As soon as a param is None, it breaks the chain and returns associated stored data.
Example::
result: Result = self(fx_name="training_step", dl_idx=0, opt_idx=0, reduced=True)
result['train_loss_epoch'] # aggregated train_loss over one epoch.
Args:
fx_name: Hook name from ModelHooks or Callback. Example: ``"training_step"``
dl_idx: Dataloader index in short. From ``0`` to ``num_dataloaders - 1``
opt_idx: Optimizer index in short. From ``0`` to ``num_optimizers - 1``
batch_idx: Batch index seen during batch training or evaluation.
Works only with ``reduced=False``
split_idx: Index of split idx in training loop when ttbt is used.
reduced: Data are being aggregated on on_epoch_end.
Indicates if we want to access the aggregated Result or not.
"""
hook_result = self[fx_name]
internal_type = hook_result._internal_type
result = hook_result._internals_reduced if reduced else hook_result._internals
if dl_idx is not None:
result = result[dl_idx]
if internal_type == ResultStoreType.INSIDE_BATCH_TRAIN_LOOP:
if opt_idx is not None:
result = result[opt_idx]
if not reduced and batch_idx is not None:
result = result[batch_idx]
if split_idx is not None:
result = result[split_idx]
elif not reduced and batch_idx is not None:
result = result[batch_idx]
return result
def __repr__(self):
return f"{self.__class__.__name__}(internals={self._internals})"
|
the-stack_106_27995 | #!/usr/bin/python
#-*- coding: utf-8 -*-
# (c) 2013, Yeukhon Wong <[email protected]>
# (c) 2014, Nate Coraor <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: hg
short_description: Manages Mercurial (hg) repositories.
description:
- Manages Mercurial (hg) repositories. Supports SSH, HTTP/S and local address.
version_added: "1.0"
author: "Yeukhon Wong (@yeukhon)"
options:
repo:
description:
- The repository address.
required: true
default: null
aliases: [ name ]
dest:
description:
- Absolute path of where the repository should be cloned to.
This parameter is required, unless clone and update are set to no
required: true
default: null
revision:
description:
- Equivalent C(-r) option in hg command which could be the changeset, revision number,
branch name or even tag.
required: false
default: null
aliases: [ version ]
force:
description:
- Discards uncommitted changes. Runs C(hg update -C). Prior to
1.9, the default was `yes`.
required: false
default: "no"
choices: [ "yes", "no" ]
purge:
description:
- Deletes untracked files. Runs C(hg purge).
required: false
default: "no"
choices: [ "yes", "no" ]
update:
required: false
default: "yes"
choices: [ "yes", "no" ]
version_added: "2.0"
description:
- If C(no), do not retrieve new revisions from the origin repository
clone:
required: false
default: "yes"
choices: [ "yes", "no" ]
version_added: "2.3"
description:
- If C(no), do not clone the repository if it does not exist locally.
executable:
required: false
default: null
version_added: "1.4"
description:
- Path to hg executable to use. If not supplied,
the normal mechanism for resolving binary paths will be used.
notes:
- "If the task seems to be hanging, first verify remote host is in C(known_hosts).
SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt,
one solution is to add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling
the hg module, with the following command: ssh-keyscan remote_host.com >> /etc/ssh/ssh_known_hosts."
requirements: [ ]
'''
EXAMPLES = '''
# Ensure the current working copy is inside the stable branch and deletes untracked files if any.
- hg:
repo: https://bitbucket.org/user/repo1
dest: /home/user/repo1
revision: stable
purge: yes
# Example just get information about the repository whether or not it has
# already been cloned locally.
- hg:
repo: git://bitbucket.org/user/repo
dest: /srv/checkout
clone: no
update: no
'''
import os
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
class Hg(object):
def __init__(self, module, dest, repo, revision, hg_path):
self.module = module
self.dest = dest
self.repo = repo
self.revision = revision
self.hg_path = hg_path
def _command(self, args_list):
(rc, out, err) = self.module.run_command([self.hg_path] + args_list)
return (rc, out, err)
def _list_untracked(self):
args = ['purge', '--config', 'extensions.purge=', '-R', self.dest, '--print']
return self._command(args)
def get_revision(self):
"""
hg id -b -i -t returns a string in the format:
"<changeset>[+] <branch_name> <tag>"
This format lists the state of the current working copy,
and indicates whether there are uncommitted changes by the
plus sign. Otherwise, the sign is omitted.
Read the full description via hg id --help
"""
(rc, out, err) = self._command(['id', '-b', '-i', '-t', '-R', self.dest])
if rc != 0:
self.module.fail_json(msg=err)
else:
return to_native(out).strip('\n')
def get_remote_revision(self):
(rc, out, err) = self._command(['id', self.repo])
if rc != 0:
self.module.fail_json(msg=err)
else:
return to_native(out).strip('\n')
def has_local_mods(self):
now = self.get_revision()
if '+' in now:
return True
else:
return False
def discard(self):
before = self.has_local_mods()
if not before:
return False
args = ['update', '-C', '-R', self.dest, '-r', '.']
(rc, out, err) = self._command(args)
if rc != 0:
self.module.fail_json(msg=err)
after = self.has_local_mods()
if before != after and not after: # no more local modification
return True
def purge(self):
# before purge, find out if there are any untracked files
(rc1, out1, err1) = self._list_untracked()
if rc1 != 0:
self.module.fail_json(msg=err1)
# there are some untrackd files
if out1 != '':
args = ['purge', '--config', 'extensions.purge=', '-R', self.dest]
(rc2, out2, err2) = self._command(args)
if rc2 != 0:
self.module.fail_json(msg=err2)
return True
else:
return False
def cleanup(self, force, purge):
discarded = False
purged = False
if force:
discarded = self.discard()
if purge:
purged = self.purge()
if discarded or purged:
return True
else:
return False
def pull(self):
return self._command(
['pull', '-R', self.dest, self.repo])
def update(self):
if self.revision is not None:
return self._command(['update', '-r', self.revision, '-R', self.dest])
return self._command(['update', '-R', self.dest])
def clone(self):
if self.revision is not None:
return self._command(['clone', self.repo, self.dest, '-r', self.revision])
return self._command(['clone', self.repo, self.dest])
@property
def at_revision(self):
"""
There is no point in pulling from a potentially down/slow remote site
if the desired changeset is already the current changeset.
"""
if self.revision is None or len(self.revision) < 7:
# Assume it's a rev number, tag, or branch
return False
(rc, out, err) = self._command(['--debug', 'id', '-i', '-R', self.dest])
if rc != 0:
self.module.fail_json(msg=err)
if out.startswith(self.revision):
return True
return False
# ===========================================
def main():
module = AnsibleModule(
argument_spec = dict(
repo = dict(required=True, aliases=['name']),
dest = dict(type='path'),
revision = dict(default=None, aliases=['version']),
force = dict(default='no', type='bool'),
purge = dict(default='no', type='bool'),
update = dict(default='yes', type='bool'),
clone = dict(default='yes', type='bool'),
executable = dict(default=None),
),
)
repo = module.params['repo']
dest = module.params['dest']
revision = module.params['revision']
force = module.params['force']
purge = module.params['purge']
update = module.params['update']
clone = module.params['clone']
hg_path = module.params['executable'] or module.get_bin_path('hg', True)
if dest is not None:
hgrc = os.path.join(dest, '.hg/hgrc')
# initial states
before = ''
changed = False
cleaned = False
if not dest and (clone or update):
module.fail_json(msg="the destination directory must be specified unless clone=no and update=no")
hg = Hg(module, dest, repo, revision, hg_path)
# If there is no hgrc file, then assume repo is absent
# and perform clone. Otherwise, perform pull and update.
if not clone and not update:
out = hg.get_remote_revision()
module.exit_json(after=out, changed=False)
if not os.path.exists(hgrc):
if clone:
(rc, out, err) = hg.clone()
if rc != 0:
module.fail_json(msg=err)
else:
module.exit_json(changed=False)
elif not update:
# Just return having found a repo already in the dest path
before = hg.get_revision()
elif hg.at_revision:
# no update needed, don't pull
before = hg.get_revision()
# but force and purge if desired
cleaned = hg.cleanup(force, purge)
else:
# get the current state before doing pulling
before = hg.get_revision()
# can perform force and purge
cleaned = hg.cleanup(force, purge)
(rc, out, err) = hg.pull()
if rc != 0:
module.fail_json(msg=err)
(rc, out, err) = hg.update()
if rc != 0:
module.fail_json(msg=err)
after = hg.get_revision()
if before != after or cleaned:
changed = True
module.exit_json(before=before, after=after, changed=changed, cleaned=cleaned)
if __name__ == '__main__':
main()
|
the-stack_106_27997 | # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import glob
import os
import shutil
import subprocess
import sys
from pathlib import Path
import click
from rich import print
@click.group(name="zoo")
def zoo_cli():
pass
@zoo_cli.command(name="build", help="Build a policy")
@click.argument("policy", type=click.Path(exists=True), metavar="<policy>")
def build_policy(policy):
def clean():
subprocess.check_call([sys.executable, "setup.py", "clean", "--all"])
def build():
cwd = Path(os.getcwd())
subprocess.check_call([sys.executable, "setup.py", "bdist_wheel"])
results = sorted(glob.glob("./dist/*.whl"), key=os.path.getmtime, reverse=True)
assert len(results) > 0, f"No policy package was built at path={cwd}"
wheel = Path(results[0])
dst_path = cwd / wheel.name
shutil.move(wheel.resolve(), cwd / wheel.name)
return dst_path
os.chdir(policy)
clean()
wheel_path = build()
clean()
print(
f"""
Policy built successfully and is available at,
\t[bold]{wheel_path}[/bold]
You can now add it to the policy zoo if you want to make it available to scenarios.
"""
)
@zoo_cli.command(
name="manager",
help="Start the manager process which instantiates workers. Workers execute remote agents.",
)
@click.argument("port", default=7432, type=int)
def manager(port):
from smarts.zoo import manager as zoo_manager
zoo_manager.serve(port)
@zoo_cli.command(
name="install",
help="Attempt to install the specified agents from the given paths/url",
)
@click.argument(
"agent_paths",
type=click.Path(exists=True),
metavar="<script>",
nargs=-1,
required=True,
)
def install_agents(agent_paths):
if not agent_paths:
# nargs=-1 in combination with a default value is not supported
# if agent_paths is not given, set the known two zoo agent paths as default
agent_paths = ["zoo/policies/open-agent", "zoo/policies/rl-agent"]
pip_install_cmd = [
"pip",
"install",
".",
]
def clean(exec_dir):
subprocess.check_call(
[sys.executable, "setup.py", "clean", "--all"],
cwd=exec_dir,
)
for agent_path in agent_paths:
policy_dir = os.path.join(os.getcwd(), agent_path)
clean(policy_dir)
proc = subprocess.Popen(
pip_install_cmd,
stderr=subprocess.PIPE,
cwd=policy_dir,
)
proc.wait()
stdout, std_err = proc.communicate()
if proc.returncode != 0:
click.echo(
f"{agent_path} may be already installed. Check Error output for more details!"
)
click.echo(std_err)
else:
click.echo(f"Installed {agent_path} successfully")
zoo_cli.add_command(build_policy)
zoo_cli.add_command(manager)
zoo_cli.add_command(install_agents)
|
the-stack_106_27999 | from freilanz.logging import logger
from freilanz.helper import FREILANZ_ROOT_DIR, make_dir, CONFIG_FILE_NAME
from freilanz.config import init_base_config
log = logger(__name__)
def init(click, *args, **kwargs):
log.info('start init process')
click.echo('Starting initializing process')
root_dir = FREILANZ_ROOT_DIR
if root_dir:
root_dir_question = click.confirm(
f'Should we use {FREILANZ_ROOT_DIR} as the root dir?')
if not root_dir_question:
root_dir = click.prompt('Where do you want to set the root dir?')
click.echo(f'Set up root dir {root_dir}')
make_dir(root_dir, exist_ok=True)
init_base_config(**kwargs, root_dir=root_dir)
if init_base_config:
click.echo(f'Added base config file to {root_dir}/{CONFIG_FILE_NAME}')
open_it = click.confirm('Would you like to open the config file?')
if open_it:
click.edit(filename=f'{root_dir}/{CONFIG_FILE_NAME}') |
the-stack_106_28004 | import types
from collections import deque, namedtuple
from pycsp3.classes.entities import Node, TypeNode
from pycsp3.classes.main.constraints import (
ScalarProduct, PartialConstraint, ConstraintSum, ConstraintElement, ConstraintElementMatrix, ConstraintInstantiation, ECtr, auxiliary)
from pycsp3.classes.main.variables import Variable, VariableInteger, NotVariable
from pycsp3.libs.forbiddenfruit import curse
from pycsp3.tools.inspector import checkType
from pycsp3.tools.utilities import flatten, is_containing, unique_type_in, is_1d_tuple, is_1d_list, is_2d_list, is_matrix, ANY, error_if
queue_in = deque() # To store partial constraints when using the IN operator
def cursing():
def _dict_add(self, other): # for being able to merge dictionaries
if isinstance(other, dict):
d = self.copy()
d.update(other)
return d
raise NotImplementedError # return save_dict_add(self, other)
def _tuple_mul(self, other): # for being able to use scalar products
if is_containing(self, (Variable, Node), check_first_only=True):
return ScalarProduct(self, other)
if is_containing(self, int) and isinstance(other, (list, tuple)) and is_containing(other, (Variable, Node), check_first_only=True):
return ScalarProduct(other, self)
return tuple.__mul__(self, other)
def _list_mul(self, other): # for being able to use scalar products
if is_containing(self, (Variable, Node), check_first_only=True):
return ScalarProduct(self, other)
return list.__mul__(self, other)
def _tuple_contains(self, other):
if not OpOverrider.activated:
return self.__contains__(other)
if is_containing(other, Variable) and len(self) > 0 and isinstance(self[0], (tuple, int)):
queue_in.append((list(self), other))
return True
if isinstance(other, int) and (is_1d_list(self, Variable) or is_1d_tuple(self, Variable)): # member/element constraint
queue_in.append((self, other))
return True
return self.__contains__(other)
def _list_contains(self, other): # for being able to use 'in' when expressing extension constraints
if not OpOverrider.activated:
return self.__contains__(other)
if isinstance(other, types.GeneratorType):
other = list(other)
if is_containing(other, Variable) and len(self) > 0 and isinstance(self[0], (list, tuple, int)):
queue_in.append((self, other))
return True
if is_containing(other, Variable) and len(self) == 0:
return other in set(self)
error_if(is_containing(other, Variable),
"It seems that you should use a set and not a list, as in x in {...}." + " Your arguments are " + str(other) + " " + str(self))
if isinstance(other, int) and (is_1d_list(self, Variable) or is_1d_tuple(self, Variable)): # member/element constraint
queue_in.append((self, other))
return True
return self.__contains__(other)
def _set_contains(self, other): # for being able to use 'in' when expressing intension/extension constraints
if not OpOverrider.activated:
return self.__contains__(other)
if isinstance(other, types.GeneratorType):
other = list(other)
tself = unique_type_in(self)
# if isinstance(other, Variable) and len(self) > 0 and is_containing(self, int): # unary table constraint
if isinstance(other, Variable) and tself in {int, str}: # unary table constraint
queue_in.append((list(self), other))
return True
# if isinstance(other, (Variable, PartialConstraint)) or isinstance(other, (int, str)) and is_containing(self, Variable): # intension constraint
if isinstance(other, (Variable, PartialConstraint)) or isinstance(other, (int, str)) and tself and issubclass(tself, Variable): # intension constraint
queue_in.append((self, other))
return True
# if is_1d_tuple(other, Variable) or is_1d_list(other, Variable): # non-unary table constraint
# queue_in.append((list(self), other))
# return True
if is_containing(other, Variable): # non-unary table constraint
queue_in.append((list(self), flatten(other)))
return True
return self.__contains__(other)
def _range_contains(self, other): # for being able to use 'in' when expressing conditions of constraints
if not OpOverrider.activated:
return range.__contains__(other)
if isinstance(other, ScalarProduct):
other = PartialConstraint(ConstraintSum(other.variables, other.coeffs, None)) # functions.Sum(other)
if isinstance(other, Variable): # unary table constraint (based on a range)
queue_in.append((list(self), other))
return True
if isinstance(other, PartialConstraint):
queue_in.append((self, other))
return True
return range.__contains__(self, other)
def _enumerate_contains(self, other):
if not OpOverrider.activated:
return self.__contains__(other)
if is_containing(other, Variable):
tmp = list(self)
if len(tmp) > 0 and isinstance(tmp[0], (tuple, int)):
queue_in.append((tmp, other))
return True
return self.__contains__(other)
curse(dict, "__add__", _dict_add)
curse(tuple, "__mul__", _tuple_mul)
curse(list, "__mul__", _list_mul)
curse(tuple, "__contains__", _tuple_contains)
curse(list, "__contains__", _list_contains)
curse(set, "__contains__", _set_contains)
curse(range, "__contains__", _range_contains)
curse(enumerate, "__contains__", _enumerate_contains)
cursing()
class OpOverrider:
activated = False
@staticmethod
def enable():
OpOverrider.activated = True
ListVar.__eq__ = OpOverrider.__eq__lv
ListVar.__getitem__ = OpOverrider.__getitem__lv
ListInt.__getitem__ = OpOverrider.__getitem__li
ListInt.__contains__ = OpOverrider.__contains__li
Variable.__eq__ = Node.__eq__ = OpOverrider.__eq__
Variable.__ne__ = Node.__ne__ = OpOverrider.__ne__
Variable.__lt__ = Node.__lt__ = OpOverrider.__lt__
Variable.__le__ = Node.__le__ = OpOverrider.__le__
Variable.__ge__ = Node.__ge__ = OpOverrider.__ge__
Variable.__gt__ = Node.__gt__ = OpOverrider.__gt__
Variable.__add__ = Node.__add__ = OpOverrider.__add__
Variable.__radd__ = Node.__radd__ = OpOverrider.__radd__
Variable.__sub__ = Node.__sub__ = OpOverrider.__sub__
Variable.__rsub__ = Node.__rsub__ = OpOverrider.__rsub__
Variable.__mul__ = Node.__mul__ = OpOverrider.__mul__
Variable.__rmul__ = Node.__rmul__ = OpOverrider.__rmul__
Variable.__pow__ = Node.__pow__ = OpOverrider.__pow__
Variable.__mod__ = Node.__mod__ = OpOverrider.__mod__
Variable.__floordiv__ = Node.__floordiv__ = OpOverrider.__floordiv__
Variable.__rfloordiv__ = Node.__rfloordiv__ = OpOverrider.__rfloordiv__
Variable.__and__ = Node.__and__ = OpOverrider.__and__
Variable.__or__ = Node.__or__ = OpOverrider.__or__
Variable.__invert__ = Node.__invert__ = OpOverrider.__invert__
Variable.__xor__ = Node.__xor__ = OpOverrider.__xor__
@staticmethod
def disable():
OpOverrider.activated = False
ListVar.__eq__ = list.__eq__
ListVar.__getitem__ = list.__getitem__
ListInt.__getitem__ = list.__getitem__
ListInt.__contains__ = list.__contains__
Variable.__eq__ = Node.__eq__ = object.__eq__
Variable.__ne__ = Node.__ne__ = object.__ne__
Variable.__lt__ = Node.__lt__ = object.__lt__
Variable.__le__ = Node.__le__ = object.__le__
Variable.__ge__ = Node.__ge__ = object.__ge__
Variable.__gt__ = Node.__gt__ = object.__gt__
Variable.__add__ = Node.__add__ = None
Variable.__radd__ = Node.__radd__ = None
Variable.__sub__ = Node.__sub__ = None
Variable.__rsub__ = Node.__rsub__ = None
Variable.__mul__ = Node.__mul__ = None
Variable.__rmul__ = Node.__rmul__ = None
Variable.__pow__ = Node.__pow__ = None
Variable.__mod__ = Node.__mod__ = None
Variable.__floordiv__ = Node.__floordiv__ = None
Variable.__rfloordiv__ = Node.__rfloordiv__ = None
Variable.__and__ = Node.__and__ = None
Variable.__or__ = Node.__or__ = None
Variable.__invert__ = Node.__invert__ = None
Variable.__xor__ = Node.__xor__ = None
return OpOverrider
@staticmethod
def execute(arg):
OpOverrider.enable()
return arg
@staticmethod
def project_recursive(t, indexes, dimension):
index = slice(None, None, None) if indexes[dimension] == ANY else indexes[dimension]
if isinstance(index, int):
if isinstance(t, list):
t = t[index] # to keep the shape (dimensions), we need to do that
else:
return t
elif isinstance(index, slice):
t = list.__getitem__(t, index)
else:
raise TypeError()
if isinstance(t, list) and dimension + 1 < len(indexes):
if not isinstance(index, int):
for i, element in enumerate(t):
t[i] = OpOverrider.project_recursive(element, indexes, dimension + 1)
else:
t = OpOverrider.project_recursive(t, indexes, dimension + 1)
return t
def __add__(self, other):
if isinstance(other, PartialConstraint):
return PartialConstraint.combine_partial_objects(self, TypeNode.ADD, other)
return Node.build(TypeNode.ADD, self, other)
def __radd__(self, other):
return Node.build(TypeNode.ADD, other, self)
def __sub__(self, other):
if isinstance(other, PartialConstraint):
return PartialConstraint.combine_partial_objects(self, TypeNode.SUB, other)
return Node.build(TypeNode.SUB, self, other)
def __rsub__(self, other):
return Node.build(TypeNode.SUB, other, self)
def __mul__(self, other):
return Node.build(TypeNode.MUL, self, other)
def __rmul__(self, other):
return Node.build(TypeNode.MUL, other, self)
def __mod__(self, other):
return Node.build(TypeNode.MOD, self, other)
def __pow__(self, other):
return Node.build(TypeNode.POW, self, other)
def __floordiv__(self, other):
return Node.build(TypeNode.DIV, self, other)
def __rfloordiv__(self, other):
return Node.build(TypeNode.DIV, other, self)
def __lt__(self, other):
if self is None or other is None:
return object.__lt__(self, other)
return PartialConstraint.__gt__(other, self) if isinstance(other, PartialConstraint) else Node.build(TypeNode.LT, self, other)
def __le__(self, other):
if self is None or other is None:
return object.__le__(self, other)
return PartialConstraint.__ge__(other, self) if isinstance(other, PartialConstraint) else Node.build(TypeNode.LE, self, other)
def __ge__(self, other):
if self is None or other is None:
return object.__ge__(self, other)
if isinstance(other, int) and other == 1 and isinstance(self, Node) and self.type == TypeNode.DIST: # we simplify the expression
return Node.build(TypeNode.NE, self.sons[0], self.sons[1])
return PartialConstraint.__le__(other, self) if isinstance(other, PartialConstraint) else Node.build(TypeNode.GE, self, other)
def __gt__(self, other):
if self is None or other is None:
return object.__gt__(self, other)
if isinstance(other, int) and other == 0 and isinstance(self, Node) and self.type == TypeNode.DIST: # we simplify the expression
return Node.build(TypeNode.NE, self.sons[0], self.sons[1])
return PartialConstraint.__lt__(other, self) if isinstance(other, PartialConstraint) else Node.build(TypeNode.GT, self, other)
def __eq__(self, other):
if self is None or other is None:
return object.__eq__(self, other)
if isinstance(other, int) and other == 0 and isinstance(self, Node) and self.type == TypeNode.DIST: # we simplify the expression
return Node.build(TypeNode.EQ, self.sons[0], self.sons[1])
return PartialConstraint.__eq__(other, self) if isinstance(other, PartialConstraint) else Node.build(TypeNode.EQ, self, other)
def __ne__(self, other):
if self is None or other is None:
return object.__ne__(self, other)
if isinstance(other, int) and other == 0 and isinstance(self, Node) and self.type == TypeNode.DIST: # we simplify the expression
return Node.build(TypeNode.NE, self.sons[0], self.sons[1])
return PartialConstraint.__ne__(other, self) if isinstance(other, PartialConstraint) else Node.build(TypeNode.NE, self, other)
def __or__(self, other):
return object.__or__(self, other) if None in {self, other} else Node.disjunction(self, other)
def __and__(self, other):
return object.__and__(self, other) if None in {self, other} else Node.conjunction(self, other)
def __invert__(self):
return NotVariable(self) if isinstance(self, VariableInteger) else Node.build(TypeNode.NOT, self)
def __xor__(self, other):
return object.__xor__(self, other) if None in {self, other} else Node.build(TypeNode.XOR, self, other)
def __eq__lv(self, other): # lv for ListVar
def Instantiation(*, variables, values):
variables = flatten(variables)
values = flatten(values) if not isinstance(values, range) else list(values)
checkType(variables, [Variable])
checkType(values, (int, [int]))
if len(variables) == 0:
return None
if len(values) == 1 and len(variables) > 1:
values = [values[0]] * len(variables)
return ConstraintInstantiation(variables, values)
if isinstance(other, (list, tuple)) and any(isinstance(v, int) for v in other):
return ECtr(Instantiation(variables=self, values=other))
return list.__eq__(self, other)
def __getitem__lv(self, indexes):
if isinstance(indexes, PartialConstraint):
indexes = auxiliary().replace_partial_constraint(indexes)
if isinstance(indexes, Variable):
return PartialConstraint(ConstraintElement(self, indexes))
if isinstance(indexes, tuple) and len(indexes) > 0:
indexes = auxiliary().replace_partial_constraints(list(indexes))
if any(isinstance(i, Variable) for i in indexes): # this must be a constraint Element-Matrix
assert is_matrix(self) and len(indexes) == 2, "A matrix is expected, with two indexes"
if all(isinstance(i, Variable) for i in indexes):
return PartialConstraint(ConstraintElementMatrix(self, indexes[0], indexes[1]))
else:
if isinstance(indexes[0], Variable) and isinstance(indexes[1], int):
return PartialConstraint(ConstraintElement(self[:, indexes[1]], indexes[0]))
elif isinstance(indexes[0], int) and isinstance(indexes[1], Variable):
return PartialConstraint(ConstraintElement(self[indexes[0]], indexes[1]))
else:
assert False
result = OpOverrider.project_recursive(self, indexes, 0)
try:
return ListVar(result) # TODO are sublists also guaranteed to be ListVar?
except TypeError:
return result
result = list.__getitem__(self, indexes)
try:
return ListVar(result)
except TypeError:
return result
def __getitem__li(self, indexes): # li for ListInt
if isinstance(indexes, PartialConstraint):
indexes = auxiliary().replace_partial_constraint(indexes)
if isinstance(indexes, Variable):
return PartialConstraint(ConstraintElement(self, indexes))
if isinstance(indexes, tuple) and len(indexes) > 0:
indexes = auxiliary().replace_partial_constraints(list(indexes))
if any(isinstance(i, Variable) for i in indexes): # this must be a constraint Element-Matrix
assert is_matrix(self) and len(indexes) == 2, "A matrix is expected, with two indexes"
if all(isinstance(i, Variable) for i in indexes):
return PartialConstraint(ConstraintElementMatrix(self, indexes[0], indexes[1]))
else:
if isinstance(indexes[0], Variable) and isinstance(indexes[1], int):
return PartialConstraint(ConstraintElement(self[:, indexes[1]], indexes[0]))
elif isinstance(indexes[0], int) and isinstance(indexes[1], Variable):
return PartialConstraint(ConstraintElement(self[indexes[0]], indexes[1]))
else:
assert False
result = OpOverrider.project_recursive(self, indexes, 0)
try:
return ListVar(result) # TODO is it ListVar or ListInt ?
except TypeError:
return result
result = list.__getitem__(self, indexes)
try:
return ListInt(result)
except TypeError:
return result
def __contains__li(self, other):
if is_containing(other, Variable) and len(self) > 0 and isinstance(self[0], (tuple, int)):
queue_in.append((self, other))
return True
return list.__contains__(self, other)
class ListInt(list):
def __init__(self, integers):
super().__init__(integers) # self.extend(integers)
def __getslice__(self, i, j):
return ListInt(super().__getslice__(i, j))
def __add__(self, other):
return ListInt(super().__add__(other))
def __mul__(self, other):
if is_containing(other, (Variable, Node)):
return ScalarProduct(other, self)
assert is_containing(self, (Variable, Node))
return ScalarProduct(self, other)
def __rmul__(self, other):
return ListInt.__mul__(other, self)
def columns(m):
def column(j):
assert is_2d_list(m), "column() can only be called on 2-dimensional lists"
assert all(len(row) > j for row in m), "one row has not at least j+1 elements"
return ListVar(row[j] for row in m)
assert is_matrix(m), "columns() can only be called on matrices"
return ListVar(column(j) for j in range(len(m[0])))
class ListVar(list):
# def __new__(self, variables): # if we subclass tuple instead of list (while removing __init__)
# return super().__new__(ListVar, variables)
def __init__(self, variables):
super().__init__(variables)
def __getslice__(self, i, j):
return ListVar(super().__getslice__(i, j))
def __add__(self, other):
return ListVar(super().__add__(other))
def __mul__(self, other):
assert is_containing(self, (Variable, Node))
return ScalarProduct(self, list(other) if isinstance(other, (tuple, range)) else other)
def __contains__(self, other):
if isinstance(other, int) and (is_1d_list(self, Variable) or is_1d_tuple(self, Variable)): # member constraint
queue_in.append((self, other))
return True
return list.__contains__(self, other)
# def __rmul__(self, other): return ListVar.__mul__(other, self)
def columns(self):
return columns(self)
def convert_to_namedtuples(obj):
if not hasattr(convert_to_namedtuples, "cnt"):
convert_to_namedtuples.cnt = 0
if isinstance(obj, list):
if is_1d_list(obj, int):
return ListInt(obj)
if is_1d_list(obj, Variable):
return ListVar(obj)
if is_1d_list(obj, dict):
nt = namedtuple("nt" + str(convert_to_namedtuples.cnt), obj[0].keys())
convert_to_namedtuples.cnt += 1
return [nt(*(convert_to_namedtuples(v) for (k, v) in d.items())) for d in obj]
t = [convert_to_namedtuples(v) for v in obj]
return ListInt(t) if isinstance(t[0], ListInt) else ListVar(t) if isinstance(t[0], ListVar) else t
if isinstance(obj, dict):
nt = namedtuple("nt" + str(convert_to_namedtuples.cnt), obj.keys())
convert_to_namedtuples.cnt += 1
return nt(*(convert_to_namedtuples(v) for (k, v) in obj.items()))
return obj
def is_namedtuple(obj): # imperfect way of checking, but must be enough for our use (when JSON dumping Compilation.data)
t = type(obj)
if len(t.__bases__) != 1 or t.__bases__[0] != tuple:
return False
fields = getattr(t, '_fields', None)
return isinstance(fields, tuple) and all(type(field) == str for field in fields)
|
the-stack_106_28005 | import argparse
import json
import requests
import stix2
def get_data_from_branch(domain, branch="master"):
dest = "https://raw.githubusercontent.com/" \
"mitre/cti/{}/{}/{}" \
".json".format(branch, domain, domain)
stix_json = requests.get(dest).json()
return stix2.MemoryStore(stix_data=stix_json["objects"])
if __name__ == '__main__':
print("Running...\n")
# Setup Arguments ...
parser = argparse.ArgumentParser()
'''
Example File:
https://raw.githubusercontent.com/scythe-io/community-threats/93f4e07c6792499153be2702f4f8ea23c3666cb9/Orangeworm/orangeworm_layer.json
'''
parser.add_argument(
'--jsonfile', required=True,
help='''
The target ATT&CK Navigator JSON file. Can be local file or URL.
''',
)
parser.add_argument(
'--mitigations', action='store_true',
help='''
Optional ability to print all the mitigations for the TID.
''',
)
args = parser.parse_args()
# Load custom layer JSON
# First, try for a local file
try:
with open(args.jsonfile) as f:
custom_layer = json.load(f)
except (FileNotFoundError, IsADirectoryError, OSError):
try:
custom_layer = requests.get(args.jsonfile).json()
except requests.exceptions.MissingSchema as e:
print(
"Error: could not find '{}' local/URL!".format(args.jsonfile)
)
print(e)
print("\n ...Exiting.\n")
exit()
except ValueError as e:
# Catch JSONDecodeError too ...
print(
"Error: bad JSON via '{}' URL!".format(args.jsonfile)
)
print(e)
print("\n ...Exiting.\n")
exit()
# Load ATT&CK Data from internet
src = get_data_from_branch("enterprise-attack")
# Gather data into single object
data = {}
for technique in custom_layer['techniques']:
# Query for Technique information
cur_tec = src.query([
stix2.Filter(
"external_references.external_id", "=",
technique['techniqueID']
),
stix2.Filter("type", "=", "attack-pattern")
])[0]
# Get the Tactic
cur_tactic = cur_tec["kill_chain_phases"][0]["phase_name"]
# Sort by tactic
if data.get(cur_tactic) is None:
data[cur_tactic] = []
data[cur_tactic].append(
(
technique['techniqueID'],
cur_tec["name"],
cur_tec['x_mitre_detection']
)
)
# End FOR
# Present Data ...
for tactic in data:
# Remove Duplicates
data[tactic] = list(dict.fromkeys(data[tactic]))
# Print Results
print("\n{}".format(tactic.title()))
for technique in data[tactic]:
print("{} - {}".format(technique[0], technique[1]))
if args.mitigations:
print("Mitigations - {}\n".format(technique[2]))
# End technique FOR
# End tactic FOR
# Done!
print("\n ...Exiting.\n")
exit()
|
the-stack_106_28006 | # coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PASCAL VOC datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import xml.etree.ElementTree
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
_VOC_CITATION = """\
@misc{{pascal-voc-{year},
author = "Everingham, M. and Van~Gool, L. and Williams, C. K. I. and Winn, J. and Zisserman, A.",
title = "The {{PASCAL}} {{V}}isual {{O}}bject {{C}}lasses {{C}}hallenge {year} {{(VOC{year})}} {{R}}esults",
howpublished = "http://www.pascal-network.org/challenges/VOC/voc{year}/workshop/index.html"}}
"""
_VOC_DESCRIPTION = """\
This dataset contains the data from the PASCAL Visual Object Classes Challenge
{year}, a.k.a. VOC{year}, corresponding to the Classification and Detection
competitions.
A total of {num_images} images are included in this dataset, where each image
contains a set of objects, out of 20 different classes, making a total of
{num_objects} annotated objects.
In the Classification competition, the goal is to predict the set of labels
contained in the image, while in the Detection competition the goal is to
predict the bounding box and label of each individual object.
WARNING: As per the official dataset, the test set of VOC2012 does not contain
annotations.
"""
_VOC_URL = "http://host.robots.ox.ac.uk/pascal/VOC/voc{year}/"
# Original site, it is down very often.
# _VOC_DATA_URL = "http://host.robots.ox.ac.uk/pascal/VOC/voc{year}/"
# Data mirror:
_VOC_DATA_URL = "http://pjreddie.com/media/files/"
_VOC_LABELS = (
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"pottedplant",
"sheep",
"sofa",
"train",
"tvmonitor",
)
_VOC_POSES = (
"frontal",
"rear",
"left",
"right",
"unspecified",
)
def _get_example_objects(annon_filepath):
"""Function to get all the objects from the annotation XML file."""
with tf.io.gfile.GFile(annon_filepath, "r") as f:
root = xml.etree.ElementTree.parse(f).getroot()
# Disable pytype to avoid attribute-error due to find returning
# Optional[Element]
# pytype: disable=attribute-error
size = root.find("size")
width = float(size.find("width").text)
height = float(size.find("height").text)
for obj in root.findall("object"):
# Get object's label name.
label = obj.find("name").text.lower()
# Get objects' pose name.
pose = obj.find("pose").text.lower()
is_truncated = (obj.find("truncated").text == "1")
is_difficult = (obj.find("difficult").text == "1")
bndbox = obj.find("bndbox")
xmax = float(bndbox.find("xmax").text)
xmin = float(bndbox.find("xmin").text)
ymax = float(bndbox.find("ymax").text)
ymin = float(bndbox.find("ymin").text)
yield {
"label": label,
"pose": pose,
"bbox": tfds.features.BBox(
ymin / height, xmin / width, ymax / height, xmax / width),
"is_truncated": is_truncated,
"is_difficult": is_difficult,
}
# pytype: enable=attribute-error
class VocConfig(tfds.core.BuilderConfig):
"""BuilderConfig for Voc."""
def __init__(
self, year=None, filenames=None, has_test_annotations=True, **kwargs):
self.year = year
self.filenames = filenames
self.has_test_annotations = has_test_annotations
super(VocConfig, self).__init__(
name=year,
# Version history:
# 4.0.0: Added BuildConfig and 2012 version support, deprecate Voc2007.
# 3.0.0: S3 with new hashing function (different shuffle).
# 2.0.0: S3 (new shuffling, sharding and slicing mechanism).
version=tfds.core.Version("4.0.0"),
**kwargs)
class Voc(tfds.core.GeneratorBasedBuilder):
"""Pascal VOC 2007 or 2012."""
BUILDER_CONFIGS = [
VocConfig(
year="2007",
description=_VOC_DESCRIPTION.format(
year=2007, num_images=9963, num_objects=24640),
filenames={
"trainval": "VOCtrainval_06-Nov-2007.tar",
"test": "VOCtest_06-Nov-2007.tar",
},
has_test_annotations=True,
),
VocConfig(
year="2012",
description=_VOC_DESCRIPTION.format(
year=2012, num_images=11540, num_objects=27450),
filenames={
"trainval": "VOCtrainval_11-May-2012.tar",
"test": "VOC2012test.tar",
},
has_test_annotations=False,
),
]
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=self.builder_config.description,
features=tfds.features.FeaturesDict({
"image": tfds.features.Image(),
"image/filename": tfds.features.Text(),
"objects": tfds.features.Sequence({
"label": tfds.features.ClassLabel(names=_VOC_LABELS),
"bbox": tfds.features.BBoxFeature(),
"pose": tfds.features.ClassLabel(names=_VOC_POSES),
"is_truncated": tf.bool,
"is_difficult": tf.bool,
}),
"labels": tfds.features.Sequence(
tfds.features.ClassLabel(names=_VOC_LABELS)),
"labels_no_difficult": tfds.features.Sequence(
tfds.features.ClassLabel(names=_VOC_LABELS)),
}),
urls=[_VOC_URL.format(year=self.builder_config.year)],
citation=_VOC_CITATION.format(year=self.builder_config.year),
)
def _split_generators(self, dl_manager):
paths = dl_manager.download_and_extract({
k: os.path.join(_VOC_DATA_URL, v)
for k, v in self.builder_config.filenames.items()
})
return [
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs=dict(data_path=paths["test"], set_name="test")),
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs=dict(data_path=paths["trainval"], set_name="train")),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs=dict(data_path=paths["trainval"], set_name="val")),
]
def _generate_examples(self, data_path, set_name):
"""Yields examples."""
set_filepath = os.path.normpath(os.path.join(
data_path, "VOCdevkit/VOC{}/ImageSets/Main/{}.txt".format(
self.builder_config.year, set_name)))
load_annotations = (
self.builder_config.has_test_annotations or set_name != "test")
with tf.io.gfile.GFile(set_filepath, "r") as f:
for line in f:
image_id = line.strip()
example = self._generate_example(data_path, image_id, load_annotations)
yield image_id, example
def _generate_example(self, data_path, image_id, load_annotations):
image_filepath = os.path.normpath(os.path.join(
data_path, "VOCdevkit/VOC{}/JPEGImages/{}.jpg".format(
self.builder_config.year, image_id)))
annon_filepath = os.path.normpath(os.path.join(
data_path, "VOCdevkit/VOC{}/Annotations/{}.xml".format(
self.builder_config.year, image_id)))
if load_annotations:
objects = list(_get_example_objects(annon_filepath))
# Use set() to remove duplicates
labels = sorted(set(obj["label"] for obj in objects))
labels_no_difficult = sorted(set(
obj["label"] for obj in objects if obj["is_difficult"] == 0
))
else: # The test set of VOC2012 does not contain annotations
objects = []
labels = []
labels_no_difficult = []
return {
"image": image_filepath,
"image/filename": image_id + ".jpg",
"objects": objects,
"labels": labels,
"labels_no_difficult": labels_no_difficult,
}
|
the-stack_106_28009 | import os
import sys
from typing import List
from urllib.parse import urlparse
import cv2
import numpy as np
import torch
from torch.hub import download_url_to_file, get_dir
def get_cache_path_by_url(url):
parts = urlparse(url)
hub_dir = get_dir()
model_dir = os.path.join(hub_dir, "checkpoints")
if not os.path.isdir(model_dir):
os.makedirs(os.path.join(model_dir, "hub", "checkpoints"))
filename = os.path.basename(parts.path)
cached_file = os.path.join(model_dir, filename)
return cached_file
def download_model(url):
cached_file = get_cache_path_by_url(url)
if not os.path.exists(cached_file):
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
hash_prefix = None
download_url_to_file(url, cached_file, hash_prefix, progress=True)
return cached_file
def ceil_modulo(x, mod):
if x % mod == 0:
return x
return (x // mod + 1) * mod
def numpy_to_bytes(image_numpy: np.ndarray, ext: str) -> bytes:
data = cv2.imencode(f".{ext}", image_numpy,
[
int(cv2.IMWRITE_JPEG_QUALITY), 100,
int(cv2.IMWRITE_PNG_COMPRESSION), 0
])[1]
image_bytes = data.tobytes()
return image_bytes
def load_img(img_bytes, gray: bool = False):
alpha_channel = None
nparr = np.frombuffer(img_bytes, np.uint8)
if gray:
np_img = cv2.imdecode(nparr, cv2.IMREAD_GRAYSCALE)
else:
np_img = cv2.imdecode(nparr, cv2.IMREAD_UNCHANGED)
if len(np_img.shape) == 3 and np_img.shape[2] == 4:
alpha_channel = np_img[:, :, -1]
np_img = cv2.cvtColor(np_img, cv2.COLOR_BGRA2RGB)
else:
np_img = cv2.cvtColor(np_img, cv2.COLOR_BGR2RGB)
return np_img, alpha_channel
def norm_img(np_img):
if len(np_img.shape) == 2:
np_img = np_img[:, :, np.newaxis]
np_img = np.transpose(np_img, (2, 0, 1))
np_img = np_img.astype("float32") / 255
return np_img
def resize_max_size(
np_img, size_limit: int, interpolation=cv2.INTER_CUBIC
) -> np.ndarray:
# Resize image's longer size to size_limit if longer size larger than size_limit
h, w = np_img.shape[:2]
if max(h, w) > size_limit:
ratio = size_limit / max(h, w)
new_w = int(w * ratio + 0.5)
new_h = int(h * ratio + 0.5)
return cv2.resize(np_img, dsize=(new_w, new_h), interpolation=interpolation)
else:
return np_img
def pad_img_to_modulo(img: np.ndarray, mod: int):
"""
Args:
img: [H, W, C]
mod:
Returns:
"""
if len(img.shape) == 2:
img = img[:, :, np.newaxis]
height, width = img.shape[:2]
out_height = ceil_modulo(height, mod)
out_width = ceil_modulo(width, mod)
return np.pad(
img,
((0, out_height - height), (0, out_width - width), (0, 0)),
mode="symmetric",
)
def boxes_from_mask(mask: np.ndarray) -> List[np.ndarray]:
"""
Args:
mask: (h, w, 1) 0~255
Returns:
"""
height, width = mask.shape[:2]
_, thresh = cv2.threshold(mask, 127, 255, 0)
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
boxes = []
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
box = np.array([x, y, x + w, y + h]).astype(np.int)
box[::2] = np.clip(box[::2], 0, width)
box[1::2] = np.clip(box[1::2], 0, height)
boxes.append(box)
return boxes
|
the-stack_106_28010 | from django.apps import AppConfig
from django.db.models.signals import post_save, pre_delete
def save_symmetric_lexical_similarity(sender, instance, created, raw, **kwargs):
if raw:
return
if created:
# Create the reflexive similarity, avoiding infinite recursion
ls = sender(
language_1=instance.language_2,
language_2=instance.language_1,
percent_low=instance.percent_low,
percent_high=instance.percent_high,
notes=instance.notes)
if not getattr(instance, '_disable_signals', False):
ls.save_without_signals(**kwargs)
else:
# Calling .update() on querysets doesn't send pre_save or post_save
# signals, so we don't need to call ls.save_without_signals() here
ls = sender.objects.filter(language_1=instance.language_2,
language_2=instance.language_1).update(
percent_low=instance.percent_low,
percent_high=instance.percent_high,
notes=instance.notes)
def delete_symmetric_lexical_similarity(sender, instance, **kwargs):
# Delete the reflexive similarity, avoiding infinite recursion
if not getattr(instance, '_disable_signals', False):
try:
sender.objects.get(
language_1=instance.language_2,
language_2=instance.language_1).delete_without_signals()
except sender.DoesNotExist:
pass
class WorldLanguagesConfig(AppConfig):
name = 'world_languages'
def ready(self):
post_save.connect(
save_symmetric_lexical_similarity,
sender=self.get_model('LexicalSimilarity'),
dispatch_uid='save_symmetric_lexical_similarity')
pre_delete.connect(
delete_symmetric_lexical_similarity,
sender=self.get_model('LexicalSimilarity'),
dispatch_uid='delete_symmetric_lexical_similarity')
|
the-stack_106_28013 | ###########################################################
###########################################################
### Created on Wed May 24 11:27:54 2017 ###
### Updated on Thu May 25 13:36:15 2017 ###
### By Samuel Low ###
### Atmospheric Density Model ###
### U.S. Standard Atmosphere Table 1976 ###
### Valid only for altitudes 86km to 1000km ###
###########################################################
###########################################################
import math
# Given some mean anomaly, M, find the eccentric anomaly E from the relation
# M = E - e*sin(E), where M is input in radians.
def SolveKepEqn(M,e):
E1 = M # Initialise eccentric anomaly
residual = 1.0 # Initialise convergence residual
while residual >= 0.00001:
fn = E1 - (e*math.sin(E1)) - M
fd = 1 - (e*math.cos(E1))
E2 = E1 - (fn/fd)
residual = abs(E2-E1) # Compute residual
E1 = E2 # Update the eccentric anomaly
return E2 |
the-stack_106_28015 | import math
from animator import basic_func, objects
def bernstein_basis(k, n):
return lambda x: math.comb(n, k)*x**k*(1-x)**(n-k)
def bernstein(foo, n):
return lambda x: sum([foo(k/n)*bernstein_basis(k, n)(x) for k in range(1, n+1)])
def generate_frame(n, generate_png=False, foo=lambda x: 0 if x == 0 else x*math.sin(1/x)):
frame = basic_func.OneAxisFrame((1280, 720), 'black', 50, 50)
func = objects.Function(foo)
func2 = objects.Function(bernstein(foo, n))
settings_function = {
'sampling rate': 3,
'thickness': 8,
'blur': 3,
'color': 'gray'
}
settings_function2 = {
'sampling rate': 3,
'thickness': 8,
'blur': 3,
'color': 'white'
}
settings_axes = {
'sampling rate': 3,
'thickness': 2,
'blur': 1,
'color': 'white'
}
settings_grid = {
'sampling rate': 3,
'thickness': 3,
'blur': 2,
'color': 'white'
}
frame.add_axis_surface(x_bounds=(0, 1), y_bounds=(-2, 2))
frame.blit_axes(settings_axes, x_only=True)
frame.blit_parametric_object(func, settings_function)
frame.blit_parametric_object(func2, settings_function2)
# frame.blit_x_grid(settings_grid, interval=.1, length=.01)
frame.blit_axis_surface()
if generate_png:
frame.generate_png(f'xsin_bern{n}.png')
return frame
def render_video(n, foo=lambda x: 0 if x == 0 else x*math.sin(1/x), start=0, filename='xsin_bernstein_hd.mp4',
save_ram=True):
video = basic_func.Film(5, (1280, 720))
for i in range(start, n):
video.add_frame(generate_frame(i, generate_png=False, foo=foo), save_ram=save_ram)
print(i)
video.render(filename, save_ram=save_ram)
if __name__ == '__main__':
# generate_frame(40, True)
render_video(45, start=0, foo=lambda x: x**(1/2), filename='sqrt.mp4')
|
the-stack_106_28020 | #!/usr/bin/env python3
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Start a Distributed Message Service Instance
"""
import openstack
openstack.enable_logging(True)
conn = openstack.connect(cloud='otc')
instance = "instance_id_or_name"
conn.dcs.start_instance(instance)
|
the-stack_106_28022 | """Class to perform under-sampling using balace cascade."""
# Authors: Guillaume Lemaitre <[email protected]>
# Christos Aridas
# License: MIT
from collections import Counter
import numpy as np
from sklearn.base import ClassifierMixin
from sklearn.neighbors import KNeighborsClassifier
from sklearn.utils import check_random_state, safe_indexing
from sklearn.model_selection import cross_val_predict
from .base import BaseEnsembleSampler
from ..under_sampling.base import BaseUnderSampler
from ..utils import check_sampling_strategy, check_target_type
from ..utils import Substitution
from ..utils._docstring import _random_state_docstring
@Substitution(
sampling_strategy=BaseUnderSampler._sampling_strategy_docstring,
random_state=_random_state_docstring)
class BalanceCascade(BaseEnsembleSampler):
"""Create an ensemble of balanced sets by iteratively under-sampling the
imbalanced dataset using an estimator.
This method iteratively select subset and make an ensemble of the
different sets. The selection is performed using a specific classifier.
Read more in the :ref:`User Guide <ensemble_samplers>`.
Parameters
----------
{sampling_strategy}
return_indices : bool, optional (default=True)
Whether or not to return the indices of the samples randomly
selected from the majority class.
{random_state}
n_max_subset : int or None, optional (default=None)
Maximum number of subsets to generate. By default, all data from
the training will be selected that could lead to a large number of
subsets. We can probably deduce this number empirically.
estimator : object, optional (default=KNeighborsClassifier())
An estimator inherited from :class:`sklearn.base.ClassifierMixin` and
having an attribute :func:`predict_proba`.
bootstrap : bool, optional (default=True)
Whether to bootstrap the data before each iteration.
ratio : str, dict, or callable
.. deprecated:: 0.4
Use the parameter ``sampling_strategy`` instead. It will be removed
in 0.6.
Notes
-----
The method is described in [1]_.
Supports multi-class resampling. A one-vs.-rest scheme is used as
originally proposed in [1]_.
See :ref:`sphx_glr_auto_examples_ensemble_plot_balance_cascade.py`.
See also
--------
BalancedBaggingClassifier, EasyEnsemble
References
----------
.. [1] X. Y. Liu, J. Wu and Z. H. Zhou, "Exploratory Undersampling for
Class-Imbalance Learning," in IEEE Transactions on Systems, Man, and
Cybernetics, Part B (Cybernetics), vol. 39, no. 2, pp. 539-550,
April 2009.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.ensemble import \
BalanceCascade # doctest: +NORMALIZE_WHITESPACE
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape %s' % Counter(y))
Original dataset shape Counter({{1: 900, 0: 100}})
>>> bc = BalanceCascade(random_state=42)
>>> X_res, y_res = bc.fit_sample(X, y)
>>> print('Resampled dataset shape %s' % Counter(y_res[0])) \
# doctest: +ELLIPSIS
Resampled dataset shape Counter({{...}})
"""
def __init__(self,
sampling_strategy='auto',
return_indices=False,
random_state=None,
n_max_subset=None,
estimator=None,
ratio=None):
super(BalanceCascade, self).__init__(
sampling_strategy=sampling_strategy, ratio=ratio)
self.random_state = random_state
self.return_indices = return_indices
self.estimator = estimator
self.n_max_subset = n_max_subset
def fit(self, X, y):
"""Find the classes statistics before to perform sampling.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Matrix containing the data which have to be sampled.
y : array-like, shape (n_samples,)
Corresponding label for each sample in X.
Returns
-------
self : object,
Return self.
"""
super(BalanceCascade, self).fit(X, y)
y = check_target_type(y)
self.sampling_strategy_ = check_sampling_strategy(
self.sampling_strategy, y, 'under-sampling')
return self
def _validate_estimator(self):
"""Private function to create the classifier"""
if (self.estimator is not None and
isinstance(self.estimator, ClassifierMixin) and
hasattr(self.estimator, 'predict')):
self.estimator_ = self.estimator
elif self.estimator is None:
self.estimator_ = KNeighborsClassifier()
else:
raise ValueError('Invalid parameter `estimator`. Got {}.'.format(
type(self.estimator)))
self.logger.debug(self.estimator_)
def _sample(self, X, y):
"""Resample the dataset.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Matrix containing the data which have to be sampled.
y : array-like, shape (n_samples,)
Corresponding label for each sample in X.
Returns
-------
X_resampled : {ndarray, sparse matrix}, shape \
(n_subset, n_samples_new, n_features)
The array containing the resampled data.
y_resampled : ndarray, shape (n_subset, n_samples_new)
The corresponding label of `X_resampled`
idx_under : ndarray, shape (n_subset, n_samples, )
If `return_indices` is `True`, a boolean array will be returned
containing the which samples have been selected.
"""
self._validate_estimator()
random_state = check_random_state(self.random_state)
# array to know which samples are available to be taken
samples_mask = np.ones(y.shape, dtype=bool)
# where the different set will be stored
idx_under = []
n_subsets = 0
b_subset_search = True
while b_subset_search:
target_stats = Counter(
safe_indexing(y, np.flatnonzero(samples_mask)))
# store the index of the data to under-sample
index_under_sample = np.empty((0, ), dtype=y.dtype)
# value which will be picked at each round
index_constant = np.empty((0, ), dtype=y.dtype)
for target_class in target_stats.keys():
if target_class in self.sampling_strategy_.keys():
n_samples = self.sampling_strategy_[target_class]
# extract the data of interest for this round from the
# current class
index_class = np.flatnonzero(y == target_class)
index_class_interest = index_class[samples_mask[
y == target_class]]
y_class = safe_indexing(y, index_class_interest)
# select randomly the desired features
index_target_class = random_state.choice(
range(y_class.size), size=n_samples, replace=False)
index_under_sample = np.concatenate(
(index_under_sample,
index_class_interest[index_target_class]),
axis=0)
else:
index_constant = np.concatenate(
(index_constant, np.flatnonzero(y == target_class)),
axis=0)
# store the set created
n_subsets += 1
subset_indices = np.concatenate(
(index_under_sample, index_constant), axis=0)
idx_under.append(subset_indices)
# fit and predict using cross validation
X_subset = safe_indexing(X, subset_indices)
y_subset = safe_indexing(y, subset_indices)
pred = cross_val_predict(self.estimator_, X_subset, y_subset)
# extract the prediction about the targeted classes only
pred_target = pred[:index_under_sample.size]
index_classified = index_under_sample[pred_target == safe_indexing(
y_subset, range(index_under_sample.size))]
samples_mask[index_classified] = False
# check the stopping criterion
if self.n_max_subset is not None:
if n_subsets == self.n_max_subset:
b_subset_search = False
# check that there is enough samples for another round
target_stats = Counter(
safe_indexing(y, np.flatnonzero(samples_mask)))
for target_class in self.sampling_strategy_.keys():
if (target_stats[target_class] <
self.sampling_strategy_[target_class]):
b_subset_search = False
X_resampled, y_resampled = [], []
for indices in idx_under:
X_resampled.append(safe_indexing(X, indices))
y_resampled.append(safe_indexing(y, indices))
if self.return_indices:
return (np.array(X_resampled), np.array(y_resampled),
np.array(idx_under))
else:
return np.array(X_resampled), np.array(y_resampled)
|
the-stack_106_28025 | # Copyright [2021] Luis Alberto Pineda Cortés,
# Rafael Morales Gamboa.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import json
import numpy as np
from matplotlib import pyplot as plt
# Keys for data
LOSS = 'loss'
C_LOSS = 'classification_loss'
A_LOSS = 'autoencoder_loss'
C_ACCURACY = 'classification_accuracy'
A_ACCURACY = 'autoencoder_accuracy'
VAL = 'val_'
def trplot(a_measure, b_measure, a_label, b_label, max_epoch, nn):
epoch = len(a_measure)
epoch = max_epoch if epoch > max_epoch else epoch
fig = plt.figure()
x = np.arange(0,epoch)
plt.errorbar(x, a_measure[:epoch], fmt='b-.', label=a_label)
plt.errorbar(x, b_measure[:epoch], fmt='r--,', label=b_label)
plt.legend(loc=0)
plt.suptitle(f'Neural net No. {nn}')
plt.show()
def teplot(a_measure, b_measure, a_label, b_label):
fig = plt.figure()
x = np.arange(0,len(a_measure))
plt.errorbar(x, a_measure, fmt='b-.', label=a_label)
plt.errorbar(x, b_measure, fmt='r--,', label=b_label)
plt.legend(loc=0)
plt.suptitle(f'Average results')
plt.show()
def training_stats(data, max_epoch):
""" Analyse neural nets training data.
Training stats data is a list of dictionaries with the full
set of keys declared above.
"""
n = 0
for d in data:
trplot(d[LOSS], d[VAL+LOSS], LOSS, VAL+LOSS,max_epoch,n)
trplot(d[C_LOSS], d[VAL+C_LOSS], C_LOSS, VAL+C_LOSS,max_epoch,n)
trplot(d[A_LOSS], d[VAL+A_LOSS], A_LOSS, VAL+A_LOSS,max_epoch,n)
trplot(d[C_ACCURACY], d[VAL+C_ACCURACY], C_ACCURACY, VAL+C_ACCURACY,max_epoch,n)
trplot(d[A_ACCURACY], d[VAL+A_ACCURACY], A_ACCURACY, VAL+A_ACCURACY,max_epoch,n)
n += 1
def testing_stats(data):
""" Analyse neural nets testing data.
"""
n = len(data)
m = {LOSS: [],
C_LOSS: [],
A_LOSS: [],
C_ACCURACY: [],
A_ACCURACY: []}
for d in data:
m[LOSS].append(d[LOSS])
m[C_LOSS].append(d[C_LOSS])
m[A_LOSS].append(d[A_LOSS])
m[C_ACCURACY].append(d[C_ACCURACY])
m[A_ACCURACY].append(d[A_ACCURACY])
teplot(m[C_LOSS],m[A_LOSS],C_LOSS,A_LOSS)
teplot(m[C_ACCURACY],m[A_ACCURACY],C_ACCURACY,A_ACCURACY)
if __name__== "__main__" :
if len(sys.argv) != 3:
print(f'Usage: {sys.argv[0]} file.json epochs.')
sys.exit(1)
fname = sys.argv[1]
max_epoch = int(sys.argv[2])
history = {}
with open(fname) as json_file:
history = json.load(json_file)
history = history['history']
# Now, history contains a list with the statistics from the neural nets.
# Odd elements have statistics from training and validation, while
# even elements have statistics from testing.
training = []
testing = []
odd = True
for s in history:
if odd:
training.append(s)
else:
testing.append(s)
odd = not odd
testing_stats(testing)
training_stats(training, max_epoch)
|
the-stack_106_28026 | from flask import Flask, request
from flask_restful import Resource, Api
from datetime import datetime
from uuid import uuid4
from logger import Logger
from models.advanced import from_files as advanced_from_files
from models.basic import from_file as basic_from_file
basic_recommender_fp = "../models/basic/recommendations.json"
advanced_user_to_group_fp = "../models/advanced/user_to_group.json"
advanced_group_recommendations_fp = "../models/advanced/group_recommendations.json"
logs_fp = "logs/logs.txt"
############################################################################
# NOTE #
# Models should be already created before running the service. #
# Code will crash, if files specified in paths above (ending with _fp) #
# do not exist or are corrupted! #
############################################################################
advanced_model = advanced_from_files(
user_to_group_fp=advanced_user_to_group_fp,
group_recommendations_fp=advanced_group_recommendations_fp
)
basic_model = basic_from_file(
recommendations_fp=basic_recommender_fp
)
logger = Logger(logging_fp=logs_fp)
# Setting up the flask application.
app = Flask(__name__)
api = Api(app)
class Recommender(Resource):
@staticmethod
def get():
response = {
"id": str(uuid4()),
"date": datetime.now().strftime('%Y-%m-%d %H:%M:%S')
}
try:
query_param_dict = Recommender.__query_args()
except RuntimeError as err:
response["message"] = str(err)
return Recommender.__send_response(response, 400)
response["user_id"] = int(query_param_dict["user_id"])
response["model"] = query_param_dict["model"]
if query_param_dict["model"] == "advanced":
recommendations = advanced_model.recommend(
user_id=int(query_param_dict["user_id"]),
category=str(query_param_dict["category_path"])
)
elif query_param_dict["model"] == "basic":
recommendations = basic_model.recommend(
user_id=int(query_param_dict["user_id"]),
category=str(query_param_dict["category_path"])
)
else:
response["message"] = "unknown model type!"
return Recommender.__send_response(response, 400)
response["recommendations"] = recommendations
return Recommender.__send_response(response)
@staticmethod
def __query_args() -> dict:
args = request.args
needed_keys = ["user_id", "category_path", "model"]
for key in needed_keys:
if key not in args:
raise RuntimeError("{} value is missing!".format(key))
return args.to_dict()
@staticmethod
def __send_response(response, code: int = 200):
logger.log(response)
return response, code
api.add_resource(Recommender, '/')
if __name__ == '__main__':
app.run()
|
the-stack_106_28028 | # Written by: Nick Gerend, @dataoutsider
# Viz: "", enjoy!
import pandas as pd
import numpy as np
import os
from datetime import datetime
from math import pi, cos, sin, sqrt, exp
def circle(diam, points):
x = []
y = []
path = []
angle = 0.
path_i = 1
for i in range(points):
x.append(diam/2.*sin(angle*pi/180.))
y.append(diam/2.*cos(angle*pi/180.))
path.append(path_i)
angle += 1./(points-1)*360.
path_i += 1
return x,y,path
count = 500
circ = circle(4.4, count)
import csv
with open(os.path.dirname(__file__) + '/background.csv', 'w',) as csvfile:
writer = csv.writer(csvfile, lineterminator = '\n')
writer.writerow(['index', 'x', 'y', 'path'])
for i in range(count):
writer.writerow([i, circ[0][i], circ[1][i], circ[2][i]])
writer.writerow([i, 0, 2.5, count+1])
writer.writerow([i, 2.5, 2.5, count+2])
writer.writerow([i, 2.5, -2.5, count+3])
writer.writerow([i, -2.5, -2.5, count+4])
writer.writerow([i, -2.5, 2.5, count+5])
writer.writerow([i, 0, 2.5, count+6])
print('finished') |
the-stack_106_28029 | import sys
sys.path.append('../tytus/parser/team27/G-27/execution/abstract')
sys.path.append('../tytus/parser/team27/G-27/execution/symbol')
sys.path.append('../tytus/parser/team27/G-27/execution/querie')
sys.path.append('../tytus/storage')
from querie import *
from environment import *
from typ import *
from add_column import *
from drop_column import *
class Alter_Table(Querie):
def __init__(self, tableName,operacion, row, column):
Querie.__init__(self, row, column)
self.tableName = tableName
self.operacion = operacion
def execute(self, environment):
if not isinstance(self.tableName,str):
return {'Error': 'El nombre indicado de la tabla no es una cadena.', 'Fila':self.row, 'Columna': self.column }
if isinstance(self.operacion,list):
print('')
else:
return self.operacion.execute(environment,self.tableName) |
the-stack_106_28031 | import os
import sys
import mxnet as mx
from random import shuffle
import numpy as np
def patch_path(path):
return os.path.join(os.path.dirname(__file__), path)
def main():
sys.path.append(patch_path('..'))
output_dir_path = patch_path('output')
model_dir_path = patch_path('models')
from mxnet_gan.library.pixel2pixel import Pixel2PixelGan
from mxnet_gan.data.facades_data_set import load_image_pairs
from mxnet_gan.library.image_utils import load_image, visualize, save_image
img_pairs = load_image_pairs(patch_path('data/facades'))
gan = Pixel2PixelGan(model_ctx=mx.gpu(0), data_ctx=mx.gpu(0))
gan.load_model(model_dir_path)
shuffle(img_pairs)
for i, (source_img_path, _) in enumerate(img_pairs[:20]):
source_img = load_image(source_img_path, gan.img_width, gan.img_height)
target_img = gan.generate(source_image=source_img)
img = mx.nd.concat(source_img.as_in_context(gan.model_ctx), target_img, dim=2)
visualize(img)
img = ((img.asnumpy().transpose(1, 2, 0) + 1.0) * 127.5).astype(np.uint8)
save_image(img, os.path.join(output_dir_path, Pixel2PixelGan.model_name + '-generated-' + str(i) + '.png'))
if __name__ == '__main__':
main()
|
the-stack_106_28034 | # Python implementation of the MySQL client-server protocol
# http://dev.mysql.com/doc/internals/en/client-server-protocol.html
# Error codes:
# https://dev.mysql.com/doc/refman/5.5/en/error-handling.html
import errno
import os
import socket
import struct
import sys
import traceback
import warnings
from . import _auth
from .charset import charset_by_name, charset_by_id
from .constants import CLIENT, COMMAND, CR, ER, FIELD_TYPE, SERVER_STATUS
from . import converters
from .cursors import Cursor
from .optionfile import Parser
from .protocol import (
dump_packet,
MysqlPacket,
FieldDescriptorPacket,
OKPacketWrapper,
EOFPacketWrapper,
LoadLocalPacketWrapper,
)
from . import err, VERSION_STRING
try:
import ssl
SSL_ENABLED = True
except ImportError:
ssl = None
SSL_ENABLED = False
try:
import getpass
DEFAULT_USER = getpass.getuser()
del getpass
except (ImportError, KeyError):
# KeyError occurs when there's no entry in OS database for a current user.
DEFAULT_USER = None
DEBUG = False
TEXT_TYPES = {
FIELD_TYPE.BIT,
FIELD_TYPE.BLOB,
FIELD_TYPE.LONG_BLOB,
FIELD_TYPE.MEDIUM_BLOB,
FIELD_TYPE.STRING,
FIELD_TYPE.TINY_BLOB,
FIELD_TYPE.VAR_STRING,
FIELD_TYPE.VARCHAR,
FIELD_TYPE.GEOMETRY,
}
DEFAULT_CHARSET = "utf8mb4"
MAX_PACKET_LEN = 2**24 - 1
def _pack_int24(n):
return struct.pack("<I", n)[:3]
# https://dev.mysql.com/doc/internals/en/integer.html#packet-Protocol::LengthEncodedInteger
def _lenenc_int(i):
if i < 0:
raise ValueError(
"Encoding %d is less than 0 - no representation in LengthEncodedInteger" % i
)
elif i < 0xFB:
return bytes([i])
elif i < (1 << 16):
return b"\xfc" + struct.pack("<H", i)
elif i < (1 << 24):
return b"\xfd" + struct.pack("<I", i)[:3]
elif i < (1 << 64):
return b"\xfe" + struct.pack("<Q", i)
else:
raise ValueError(
"Encoding %x is larger than %x - no representation in LengthEncodedInteger"
% (i, (1 << 64))
)
class Connection:
"""
Representation of a socket with a mysql server.
The proper way to get an instance of this class is to call
connect().
Establish a connection to the MySQL database. Accepts several
arguments:
:param host: Host where the database server is located.
:param user: Username to log in as.
:param password: Password to use.
:param database: Database to use, None to not use a particular one.
:param port: MySQL port to use, default is usually OK. (default: 3306)
:param bind_address: When the client has multiple network interfaces, specify
the interface from which to connect to the host. Argument can be
a hostname or an IP address.
:param unix_socket: Use a unix socket rather than TCP/IP.
:param read_timeout: The timeout for reading from the connection in seconds (default: None - no timeout)
:param write_timeout: The timeout for writing to the connection in seconds (default: None - no timeout)
:param charset: Charset to use.
:param sql_mode: Default SQL_MODE to use.
:param read_default_file:
Specifies my.cnf file to read these parameters from under the [client] section.
:param conv:
Conversion dictionary to use instead of the default one.
This is used to provide custom marshalling and unmarshalling of types.
See converters.
:param use_unicode:
Whether or not to default to unicode strings.
This option defaults to true.
:param client_flag: Custom flags to send to MySQL. Find potential values in constants.CLIENT.
:param cursorclass: Custom cursor class to use.
:param init_command: Initial SQL statement to run when connection is established.
:param connect_timeout: The timeout for connecting to the database in seconds.
(default: 10, min: 1, max: 31536000)
:param ssl: A dict of arguments similar to mysql_ssl_set()'s parameters.
:param ssl_ca: Path to the file that contains a PEM-formatted CA certificate.
:param ssl_cert: Path to the file that contains a PEM-formatted client certificate.
:param ssl_disabled: A boolean value that disables usage of TLS.
:param ssl_key: Path to the file that contains a PEM-formatted private key for the client certificate.
:param ssl_verify_cert: Set to true to check the server certificate's validity.
:param ssl_verify_identity: Set to true to check the server's identity.
:param read_default_group: Group to read from in the configuration file.
:param autocommit: Autocommit mode. None means use server default. (default: False)
:param local_infile: Boolean to enable the use of LOAD DATA LOCAL command. (default: False)
:param max_allowed_packet: Max size of packet sent to server in bytes. (default: 16MB)
Only used to limit size of "LOAD LOCAL INFILE" data packet smaller than default (16KB).
:param defer_connect: Don't explicitly connect on construction - wait for connect call.
(default: False)
:param auth_plugin_map: A dict of plugin names to a class that processes that plugin.
The class will take the Connection object as the argument to the constructor.
The class needs an authenticate method taking an authentication packet as
an argument. For the dialog plugin, a prompt(echo, prompt) method can be used
(if no authenticate method) for returning a string from the user. (experimental)
:param server_public_key: SHA256 authentication plugin public key value. (default: None)
:param binary_prefix: Add _binary prefix on bytes and bytearray. (default: False)
:param compress: Not supported.
:param named_pipe: Not supported.
:param db: **DEPRECATED** Alias for database.
:param passwd: **DEPRECATED** Alias for password.
See `Connection <https://www.python.org/dev/peps/pep-0249/#connection-objects>`_ in the
specification.
"""
_sock = None
_auth_plugin_name = ""
_closed = False
_secure = False
def __init__(
self,
*,
user=None, # The first four arguments is based on DB-API 2.0 recommendation.
password="",
host=None,
database=None,
unix_socket=None,
port=0,
charset="",
sql_mode=None,
read_default_file=None,
conv=None,
use_unicode=True,
client_flag=0,
cursorclass=Cursor,
init_command=None,
connect_timeout=10,
read_default_group=None,
autocommit=False,
local_infile=False,
max_allowed_packet=16 * 1024 * 1024,
defer_connect=False,
auth_plugin_map=None,
read_timeout=None,
write_timeout=None,
bind_address=None,
binary_prefix=False,
program_name=None,
server_public_key=None,
ssl=None,
ssl_ca=None,
ssl_cert=None,
ssl_disabled=None,
ssl_key=None,
ssl_verify_cert=None,
ssl_verify_identity=None,
compress=None, # not supported
named_pipe=None, # not supported
passwd=None, # deprecated
db=None, # deprecated
):
if db is not None and database is None:
# We will raise warning in 2022 or later.
# See https://github.com/PyMySQL/PyMySQL/issues/939
# warnings.warn("'db' is deprecated, use 'database'", DeprecationWarning, 3)
database = db
if passwd is not None and not password:
# We will raise warning in 2022 or later.
# See https://github.com/PyMySQL/PyMySQL/issues/939
# warnings.warn(
# "'passwd' is deprecated, use 'password'", DeprecationWarning, 3
# )
password = passwd
if compress or named_pipe:
raise NotImplementedError(
"compress and named_pipe arguments are not supported"
)
self._local_infile = bool(local_infile)
if self._local_infile:
client_flag |= CLIENT.LOCAL_FILES
if read_default_group and not read_default_file:
if sys.platform.startswith("win"):
read_default_file = "c:\\my.ini"
else:
read_default_file = "/etc/my.cnf"
if read_default_file:
if not read_default_group:
read_default_group = "client"
cfg = Parser()
cfg.read(os.path.expanduser(read_default_file))
def _config(key, arg):
if arg:
return arg
try:
return cfg.get(read_default_group, key)
except Exception:
return arg
user = _config("user", user)
password = _config("password", password)
host = _config("host", host)
database = _config("database", database)
unix_socket = _config("socket", unix_socket)
port = int(_config("port", port))
bind_address = _config("bind-address", bind_address)
charset = _config("default-character-set", charset)
if not ssl:
ssl = {}
if isinstance(ssl, dict):
for key in ["ca", "capath", "cert", "key", "cipher"]:
value = _config("ssl-" + key, ssl.get(key))
if value:
ssl[key] = value
self.ssl = False
if not ssl_disabled:
if ssl_ca or ssl_cert or ssl_key or ssl_verify_cert or ssl_verify_identity:
ssl = {
"ca": ssl_ca,
"check_hostname": bool(ssl_verify_identity),
"verify_mode": ssl_verify_cert
if ssl_verify_cert is not None
else False,
}
if ssl_cert is not None:
ssl["cert"] = ssl_cert
if ssl_key is not None:
ssl["key"] = ssl_key
if ssl:
if not SSL_ENABLED:
raise NotImplementedError("ssl module not found")
self.ssl = True
client_flag |= CLIENT.SSL
self.ctx = self._create_ssl_ctx(ssl)
self.host = host or "localhost"
self.port = port or 3306
if type(self.port) is not int:
raise ValueError("port should be of type int")
self.user = user or DEFAULT_USER
self.password = password or b""
if isinstance(self.password, str):
self.password = self.password.encode("latin1")
self.db = database
self.unix_socket = unix_socket
self.bind_address = bind_address
if not (0 < connect_timeout <= 31536000):
raise ValueError("connect_timeout should be >0 and <=31536000")
self.connect_timeout = connect_timeout or None
if read_timeout is not None and read_timeout <= 0:
raise ValueError("read_timeout should be > 0")
self._read_timeout = read_timeout
if write_timeout is not None and write_timeout <= 0:
raise ValueError("write_timeout should be > 0")
self._write_timeout = write_timeout
self.charset = charset or DEFAULT_CHARSET
self.use_unicode = use_unicode
self.encoding = charset_by_name(self.charset).encoding
client_flag |= CLIENT.CAPABILITIES
if self.db:
client_flag |= CLIENT.CONNECT_WITH_DB
self.client_flag = client_flag
self.cursorclass = cursorclass
self._result = None
self._affected_rows = 0
self.host_info = "Not connected"
# specified autocommit mode. None means use server default.
self.autocommit_mode = autocommit
if conv is None:
conv = converters.conversions
# Need for MySQLdb compatibility.
self.encoders = {k: v for (k, v) in conv.items() if type(k) is not int}
self.decoders = {k: v for (k, v) in conv.items() if type(k) is int}
self.sql_mode = sql_mode
self.init_command = init_command
self.max_allowed_packet = max_allowed_packet
self._auth_plugin_map = auth_plugin_map or {}
self._binary_prefix = binary_prefix
self.server_public_key = server_public_key
self._connect_attrs = {
"_client_name": "pymysql",
"_pid": str(os.getpid()),
"_client_version": VERSION_STRING,
}
if program_name:
self._connect_attrs["program_name"] = program_name
if defer_connect:
self._sock = None
else:
self.connect()
def __enter__(self):
return self
def __exit__(self, *exc_info):
del exc_info
self.close()
def _create_ssl_ctx(self, sslp):
if isinstance(sslp, ssl.SSLContext):
return sslp
ca = sslp.get("ca")
capath = sslp.get("capath")
hasnoca = ca is None and capath is None
ctx = ssl.create_default_context(cafile=ca, capath=capath)
ctx.check_hostname = not hasnoca and sslp.get("check_hostname", True)
verify_mode_value = sslp.get("verify_mode")
if verify_mode_value is None:
ctx.verify_mode = ssl.CERT_NONE if hasnoca else ssl.CERT_REQUIRED
elif isinstance(verify_mode_value, bool):
ctx.verify_mode = ssl.CERT_REQUIRED if verify_mode_value else ssl.CERT_NONE
else:
if isinstance(verify_mode_value, str):
verify_mode_value = verify_mode_value.lower()
if verify_mode_value in ("none", "0", "false", "no"):
ctx.verify_mode = ssl.CERT_NONE
elif verify_mode_value == "optional":
ctx.verify_mode = ssl.CERT_OPTIONAL
elif verify_mode_value in ("required", "1", "true", "yes"):
ctx.verify_mode = ssl.CERT_REQUIRED
else:
ctx.verify_mode = ssl.CERT_NONE if hasnoca else ssl.CERT_REQUIRED
if "cert" in sslp:
ctx.load_cert_chain(sslp["cert"], keyfile=sslp.get("key"))
if "cipher" in sslp:
ctx.set_ciphers(sslp["cipher"])
ctx.options |= ssl.OP_NO_SSLv2
ctx.options |= ssl.OP_NO_SSLv3
return ctx
def close(self):
"""
Send the quit message and close the socket.
See `Connection.close() <https://www.python.org/dev/peps/pep-0249/#Connection.close>`_
in the specification.
:raise Error: If the connection is already closed.
"""
if self._closed:
raise err.Error("Already closed")
self._closed = True
if self._sock is None:
return
send_data = struct.pack("<iB", 1, COMMAND.COM_QUIT)
try:
self._write_bytes(send_data)
except Exception:
pass
finally:
self._force_close()
@property
def open(self):
"""Return True if the connection is open."""
return self._sock is not None
def _force_close(self):
"""Close connection without QUIT message."""
if self._sock:
try:
self._sock.close()
except: # noqa
pass
self._sock = None
self._rfile = None
__del__ = _force_close
def autocommit(self, value):
self.autocommit_mode = bool(value)
current = self.get_autocommit()
if value != current:
self._send_autocommit_mode()
def get_autocommit(self):
return bool(self.server_status & SERVER_STATUS.SERVER_STATUS_AUTOCOMMIT)
def _read_ok_packet(self):
pkt = self._read_packet()
if not pkt.is_ok_packet():
raise err.OperationalError(
CR.CR_COMMANDS_OUT_OF_SYNC,
"Command Out of Sync",
)
ok = OKPacketWrapper(pkt)
self.server_status = ok.server_status
return ok
def _send_autocommit_mode(self):
"""Set whether or not to commit after every execute()."""
self._execute_command(
COMMAND.COM_QUERY, "SET AUTOCOMMIT = %s" % self.escape(self.autocommit_mode)
)
self._read_ok_packet()
def begin(self):
"""Begin transaction."""
self._execute_command(COMMAND.COM_QUERY, "BEGIN")
self._read_ok_packet()
def commit(self):
"""
Commit changes to stable storage.
See `Connection.commit() <https://www.python.org/dev/peps/pep-0249/#commit>`_
in the specification.
"""
self._execute_command(COMMAND.COM_QUERY, "COMMIT")
self._read_ok_packet()
def rollback(self):
"""
Roll back the current transaction.
See `Connection.rollback() <https://www.python.org/dev/peps/pep-0249/#rollback>`_
in the specification.
"""
self._execute_command(COMMAND.COM_QUERY, "ROLLBACK")
self._read_ok_packet()
def show_warnings(self):
"""Send the "SHOW WARNINGS" SQL command."""
self._execute_command(COMMAND.COM_QUERY, "SHOW WARNINGS")
result = MySQLResult(self)
result.read()
return result.rows
def select_db(self, db):
"""
Set current db.
:param db: The name of the db.
"""
self._execute_command(COMMAND.COM_INIT_DB, db)
self._read_ok_packet()
def escape(self, obj, mapping=None):
"""Escape whatever value is passed.
Non-standard, for internal use; do not use this in your applications.
"""
if isinstance(obj, str):
return "'" + self.escape_string(obj) + "'"
if isinstance(obj, (bytes, bytearray)):
ret = self._quote_bytes(obj)
if self._binary_prefix:
ret = "_binary" + ret
return ret
return converters.escape_item(obj, self.charset, mapping=mapping)
def literal(self, obj):
"""Alias for escape().
Non-standard, for internal use; do not use this in your applications.
"""
return self.escape(obj, self.encoders)
def escape_string(self, s):
if self.server_status & SERVER_STATUS.SERVER_STATUS_NO_BACKSLASH_ESCAPES:
return s.replace("'", "''")
return converters.escape_string(s)
def _quote_bytes(self, s):
if self.server_status & SERVER_STATUS.SERVER_STATUS_NO_BACKSLASH_ESCAPES:
return "'%s'" % (s.replace(b"'", b"''").decode("ascii", "surrogateescape"),)
return converters.escape_bytes(s)
def cursor(self, cursor=None):
"""
Create a new cursor to execute queries with.
:param cursor: The type of cursor to create. None means use Cursor.
:type cursor: :py:class:`Cursor`, :py:class:`SSCursor`, :py:class:`DictCursor`, or :py:class:`SSDictCursor`.
"""
if cursor:
return cursor(self)
return self.cursorclass(self)
# The following methods are INTERNAL USE ONLY (called from Cursor)
def query(self, sql, unbuffered=False):
# if DEBUG:
# print("DEBUG: sending query:", sql)
if isinstance(sql, str):
sql = sql.encode(self.encoding, "surrogateescape")
self._execute_command(COMMAND.COM_QUERY, sql)
self._affected_rows = self._read_query_result(unbuffered=unbuffered)
return self._affected_rows
def next_result(self, unbuffered=False):
self._affected_rows = self._read_query_result(unbuffered=unbuffered)
return self._affected_rows
def affected_rows(self):
return self._affected_rows
def kill(self, thread_id):
arg = struct.pack("<I", thread_id)
self._execute_command(COMMAND.COM_PROCESS_KILL, arg)
return self._read_ok_packet()
def ping(self, reconnect=True):
"""
Check if the server is alive.
:param reconnect: If the connection is closed, reconnect.
:type reconnect: boolean
:raise Error: If the connection is closed and reconnect=False.
"""
if self._sock is None:
if reconnect:
self.connect()
reconnect = False
else:
raise err.Error("Already closed")
try:
self._execute_command(COMMAND.COM_PING, "")
self._read_ok_packet()
except Exception:
if reconnect:
self.connect()
self.ping(False)
else:
raise
def set_charset(self, charset):
# Make sure charset is supported.
encoding = charset_by_name(charset).encoding
self._execute_command(COMMAND.COM_QUERY, "SET NAMES %s" % self.escape(charset))
self._read_packet()
self.charset = charset
self.encoding = encoding
def connect(self, sock=None):
self._closed = False
try:
if sock is None:
if self.unix_socket:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(self.connect_timeout)
sock.connect(self.unix_socket)
self.host_info = "Localhost via UNIX socket"
self._secure = True
if DEBUG:
print("connected using unix_socket")
else:
kwargs = {}
if self.bind_address is not None:
kwargs["source_address"] = (self.bind_address, 0)
while True:
try:
sock = socket.create_connection(
(self.host, self.port), self.connect_timeout, **kwargs
)
break
except (OSError, IOError) as e:
if e.errno == errno.EINTR:
continue
raise
self.host_info = "socket %s:%d" % (self.host, self.port)
if DEBUG:
print("connected using socket")
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
sock.settimeout(None)
self._sock = sock
self._rfile = sock.makefile("rb")
self._next_seq_id = 0
self._get_server_information()
self._request_authentication()
if self.sql_mode is not None:
c = self.cursor()
c.execute("SET sql_mode=%s", (self.sql_mode,))
if self.init_command is not None:
c = self.cursor()
c.execute(self.init_command)
c.close()
self.commit()
if self.autocommit_mode is not None:
self.autocommit(self.autocommit_mode)
except BaseException as e:
self._rfile = None
if sock is not None:
try:
sock.close()
except: # noqa
pass
if isinstance(e, (OSError, IOError, socket.error)):
exc = err.OperationalError(
CR.CR_CONN_HOST_ERROR,
"Can't connect to MySQL server on %r (%s)" % (self.host, e),
)
# Keep original exception and traceback to investigate error.
exc.original_exception = e
exc.traceback = traceback.format_exc()
if DEBUG:
print(exc.traceback)
raise exc
# If e is neither DatabaseError or IOError, It's a bug.
# But raising AssertionError hides original error.
# So just reraise it.
raise
def write_packet(self, payload):
"""Writes an entire "mysql packet" in its entirety to the network
adding its length and sequence number.
"""
# Internal note: when you build packet manually and calls _write_bytes()
# directly, you should set self._next_seq_id properly.
data = _pack_int24(len(payload)) + bytes([self._next_seq_id]) + payload
if DEBUG:
dump_packet(data)
self._write_bytes(data)
self._next_seq_id = (self._next_seq_id + 1) % 256
def _read_packet(self, packet_type=MysqlPacket):
"""Read an entire "mysql packet" in its entirety from the network
and return a MysqlPacket type that represents the results.
:raise OperationalError: If the connection to the MySQL server is lost.
:raise InternalError: If the packet sequence number is wrong.
"""
buff = bytearray()
while True:
packet_header = self._read_bytes(4)
# if DEBUG: dump_packet(packet_header)
btrl, btrh, packet_number = struct.unpack("<HBB", packet_header)
bytes_to_read = btrl + (btrh << 16)
if packet_number != self._next_seq_id:
self._force_close()
if packet_number == 0:
# MariaDB sends error packet with seqno==0 when shutdown
raise err.OperationalError(
CR.CR_SERVER_LOST,
"Lost connection to MySQL server during query",
)
raise err.InternalError(
"Packet sequence number wrong - got %d expected %d"
% (packet_number, self._next_seq_id)
)
self._next_seq_id = (self._next_seq_id + 1) % 256
recv_data = self._read_bytes(bytes_to_read)
if DEBUG:
dump_packet(recv_data)
buff += recv_data
# https://dev.mysql.com/doc/internals/en/sending-more-than-16mbyte.html
if bytes_to_read == 0xFFFFFF:
continue
if bytes_to_read < MAX_PACKET_LEN:
break
packet = packet_type(bytes(buff), self.encoding)
if packet.is_error_packet():
if self._result is not None and self._result.unbuffered_active is True:
self._result.unbuffered_active = False
packet.raise_for_error()
return packet
def _read_bytes(self, num_bytes):
self._sock.settimeout(self._read_timeout)
while True:
try:
data = self._rfile.read(num_bytes)
break
except (IOError, OSError) as e:
if e.errno == errno.EINTR:
continue
self._force_close()
raise err.OperationalError(
CR.CR_SERVER_LOST,
"Lost connection to MySQL server during query (%s)" % (e,),
)
except BaseException:
# Don't convert unknown exception to MySQLError.
self._force_close()
raise
if len(data) < num_bytes:
self._force_close()
raise err.OperationalError(
CR.CR_SERVER_LOST, "Lost connection to MySQL server during query"
)
return data
def _write_bytes(self, data):
self._sock.settimeout(self._write_timeout)
try:
self._sock.sendall(data)
except IOError as e:
self._force_close()
raise err.OperationalError(
CR.CR_SERVER_GONE_ERROR, "MySQL server has gone away (%r)" % (e,)
)
def _read_query_result(self, unbuffered=False):
self._result = None
if unbuffered:
try:
result = MySQLResult(self)
result.init_unbuffered_query()
except:
result.unbuffered_active = False
result.connection = None
raise
else:
result = MySQLResult(self)
result.read()
self._result = result
if result.server_status is not None:
self.server_status = result.server_status
return result.affected_rows
def insert_id(self):
if self._result:
return self._result.insert_id
else:
return 0
def _execute_command(self, command, sql):
"""
:raise InterfaceError: If the connection is closed.
:raise ValueError: If no username was specified.
"""
if not self._sock:
raise err.InterfaceError(0, "")
# If the last query was unbuffered, make sure it finishes before
# sending new commands
if self._result is not None:
if self._result.unbuffered_active:
warnings.warn("Previous unbuffered result was left incomplete")
self._result._finish_unbuffered_query()
while self._result.has_next:
self.next_result()
self._result = None
if isinstance(sql, str):
sql = sql.encode(self.encoding)
packet_size = min(MAX_PACKET_LEN, len(sql) + 1) # +1 is for command
# tiny optimization: build first packet manually instead of
# calling self..write_packet()
prelude = struct.pack("<iB", packet_size, command)
packet = prelude + sql[: packet_size - 1]
self._write_bytes(packet)
if DEBUG:
dump_packet(packet)
self._next_seq_id = 1
if packet_size < MAX_PACKET_LEN:
return
sql = sql[packet_size - 1 :]
while True:
packet_size = min(MAX_PACKET_LEN, len(sql))
self.write_packet(sql[:packet_size])
sql = sql[packet_size:]
if not sql and packet_size < MAX_PACKET_LEN:
break
def _request_authentication(self):
# https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse
if int(self.server_version.split(".", 1)[0]) >= 5:
self.client_flag |= CLIENT.MULTI_RESULTS
if self.user is None:
raise ValueError("Did not specify a username")
charset_id = charset_by_name(self.charset).id
if isinstance(self.user, str):
self.user = self.user.encode(self.encoding)
data_init = struct.pack(
"<iIB23s", self.client_flag, MAX_PACKET_LEN, charset_id, b""
)
if self.ssl and self.server_capabilities & CLIENT.SSL:
self.write_packet(data_init)
self._sock = self.ctx.wrap_socket(self._sock, server_hostname=self.host)
self._rfile = self._sock.makefile("rb")
self._secure = True
data = data_init + self.user + b"\0"
authresp = b""
plugin_name = None
if self._auth_plugin_name == "":
plugin_name = b""
authresp = _auth.scramble_native_password(self.password, self.salt)
elif self._auth_plugin_name == "mysql_native_password":
plugin_name = b"mysql_native_password"
authresp = _auth.scramble_native_password(self.password, self.salt)
elif self._auth_plugin_name == "caching_sha2_password":
plugin_name = b"caching_sha2_password"
if self.password:
if DEBUG:
print("caching_sha2: trying fast path")
authresp = _auth.scramble_caching_sha2(self.password, self.salt)
else:
if DEBUG:
print("caching_sha2: empty password")
elif self._auth_plugin_name == "sha256_password":
plugin_name = b"sha256_password"
if self.ssl and self.server_capabilities & CLIENT.SSL:
authresp = self.password + b"\0"
elif self.password:
authresp = b"\1" # request public key
else:
authresp = b"\0" # empty password
if self.server_capabilities & CLIENT.PLUGIN_AUTH_LENENC_CLIENT_DATA:
data += _lenenc_int(len(authresp)) + authresp
elif self.server_capabilities & CLIENT.SECURE_CONNECTION:
data += struct.pack("B", len(authresp)) + authresp
else: # pragma: no cover - not testing against servers without secure auth (>=5.0)
data += authresp + b"\0"
if self.db and self.server_capabilities & CLIENT.CONNECT_WITH_DB:
if isinstance(self.db, str):
self.db = self.db.encode(self.encoding)
data += self.db + b"\0"
if self.server_capabilities & CLIENT.PLUGIN_AUTH:
data += (plugin_name or b"") + b"\0"
if self.server_capabilities & CLIENT.CONNECT_ATTRS:
connect_attrs = b""
for k, v in self._connect_attrs.items():
k = k.encode("utf-8")
connect_attrs += _lenenc_int(len(k)) + k
v = v.encode("utf-8")
connect_attrs += _lenenc_int(len(v)) + v
data += _lenenc_int(len(connect_attrs)) + connect_attrs
self.write_packet(data)
auth_packet = self._read_packet()
# if authentication method isn't accepted the first byte
# will have the octet 254
if auth_packet.is_auth_switch_request():
if DEBUG:
print("received auth switch")
# https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchRequest
auth_packet.read_uint8() # 0xfe packet identifier
plugin_name = auth_packet.read_string()
if (
self.server_capabilities & CLIENT.PLUGIN_AUTH
and plugin_name is not None
):
auth_packet = self._process_auth(plugin_name, auth_packet)
else:
raise err.OperationalError("received unknown auth swich request")
elif auth_packet.is_extra_auth_data():
if DEBUG:
print("received extra data")
# https://dev.mysql.com/doc/internals/en/successful-authentication.html
if self._auth_plugin_name == "caching_sha2_password":
auth_packet = _auth.caching_sha2_password_auth(self, auth_packet)
elif self._auth_plugin_name == "sha256_password":
auth_packet = _auth.sha256_password_auth(self, auth_packet)
else:
raise err.OperationalError(
"Received extra packet for auth method %r", self._auth_plugin_name
)
if DEBUG:
print("Succeed to auth")
def _process_auth(self, plugin_name, auth_packet):
handler = self._get_auth_plugin_handler(plugin_name)
if handler:
try:
return handler.authenticate(auth_packet)
except AttributeError:
if plugin_name != b"dialog":
raise err.OperationalError(
CR.CR_AUTH_PLUGIN_CANNOT_LOAD,
"Authentication plugin '%s'"
" not loaded: - %r missing authenticate method"
% (plugin_name, type(handler)),
)
if plugin_name == b"caching_sha2_password":
return _auth.caching_sha2_password_auth(self, auth_packet)
elif plugin_name == b"sha256_password":
return _auth.sha256_password_auth(self, auth_packet)
elif plugin_name == b"mysql_native_password":
data = _auth.scramble_native_password(self.password, auth_packet.read_all())
elif plugin_name == b"client_ed25519":
data = _auth.ed25519_password(self.password, auth_packet.read_all())
elif plugin_name == b"mysql_old_password":
data = (
_auth.scramble_old_password(self.password, auth_packet.read_all())
+ b"\0"
)
elif plugin_name == b"mysql_clear_password":
# https://dev.mysql.com/doc/internals/en/clear-text-authentication.html
data = self.password + b"\0"
elif plugin_name == b"dialog":
pkt = auth_packet
while True:
flag = pkt.read_uint8()
echo = (flag & 0x06) == 0x02
last = (flag & 0x01) == 0x01
prompt = pkt.read_all()
if prompt == b"Password: ":
self.write_packet(self.password + b"\0")
elif handler:
resp = "no response - TypeError within plugin.prompt method"
try:
resp = handler.prompt(echo, prompt)
self.write_packet(resp + b"\0")
except AttributeError:
raise err.OperationalError(
CR.CR_AUTH_PLUGIN_CANNOT_LOAD,
"Authentication plugin '%s'"
" not loaded: - %r missing prompt method"
% (plugin_name, handler),
)
except TypeError:
raise err.OperationalError(
CR.CR_AUTH_PLUGIN_ERR,
"Authentication plugin '%s'"
" %r didn't respond with string. Returned '%r' to prompt %r"
% (plugin_name, handler, resp, prompt),
)
else:
raise err.OperationalError(
CR.CR_AUTH_PLUGIN_CANNOT_LOAD,
"Authentication plugin '%s' not configured" % (plugin_name,),
)
pkt = self._read_packet()
pkt.check_error()
if pkt.is_ok_packet() or last:
break
return pkt
else:
raise err.OperationalError(
CR.CR_AUTH_PLUGIN_CANNOT_LOAD,
"Authentication plugin '%s' not configured" % plugin_name,
)
self.write_packet(data)
pkt = self._read_packet()
pkt.check_error()
return pkt
def _get_auth_plugin_handler(self, plugin_name):
plugin_class = self._auth_plugin_map.get(plugin_name)
if not plugin_class and isinstance(plugin_name, bytes):
plugin_class = self._auth_plugin_map.get(plugin_name.decode("ascii"))
if plugin_class:
try:
handler = plugin_class(self)
except TypeError:
raise err.OperationalError(
CR.CR_AUTH_PLUGIN_CANNOT_LOAD,
"Authentication plugin '%s'"
" not loaded: - %r cannot be constructed with connection object"
% (plugin_name, plugin_class),
)
else:
handler = None
return handler
# _mysql support
def thread_id(self):
return self.server_thread_id[0]
def character_set_name(self):
return self.charset
def get_host_info(self):
return self.host_info
def get_proto_info(self):
return self.protocol_version
def _get_server_information(self):
i = 0
packet = self._read_packet()
data = packet.get_all_data()
self.protocol_version = data[i]
i += 1
server_end = data.find(b"\0", i)
self.server_version = data[i:server_end].decode("latin1")
i = server_end + 1
self.server_thread_id = struct.unpack("<I", data[i : i + 4])
i += 4
self.salt = data[i : i + 8]
i += 9 # 8 + 1(filler)
self.server_capabilities = struct.unpack("<H", data[i : i + 2])[0]
i += 2
if len(data) >= i + 6:
lang, stat, cap_h, salt_len = struct.unpack("<BHHB", data[i : i + 6])
i += 6
# TODO: deprecate server_language and server_charset.
# mysqlclient-python doesn't provide it.
self.server_language = lang
try:
self.server_charset = charset_by_id(lang).name
except KeyError:
# unknown collation
self.server_charset = None
self.server_status = stat
if DEBUG:
print("server_status: %x" % stat)
self.server_capabilities |= cap_h << 16
if DEBUG:
print("salt_len:", salt_len)
salt_len = max(12, salt_len - 9)
# reserved
i += 10
if len(data) >= i + salt_len:
# salt_len includes auth_plugin_data_part_1 and filler
self.salt += data[i : i + salt_len]
i += salt_len
i += 1
# AUTH PLUGIN NAME may appear here.
if self.server_capabilities & CLIENT.PLUGIN_AUTH and len(data) >= i:
# Due to Bug#59453 the auth-plugin-name is missing the terminating
# NUL-char in versions prior to 5.5.10 and 5.6.2.
# ref: https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake
# didn't use version checks as mariadb is corrected and reports
# earlier than those two.
server_end = data.find(b"\0", i)
if server_end < 0: # pragma: no cover - very specific upstream bug
# not found \0 and last field so take it all
self._auth_plugin_name = data[i:].decode("utf-8")
else:
self._auth_plugin_name = data[i:server_end].decode("utf-8")
def get_server_info(self):
return self.server_version
Warning = err.Warning
Error = err.Error
InterfaceError = err.InterfaceError
DatabaseError = err.DatabaseError
DataError = err.DataError
OperationalError = err.OperationalError
IntegrityError = err.IntegrityError
InternalError = err.InternalError
ProgrammingError = err.ProgrammingError
NotSupportedError = err.NotSupportedError
class MySQLResult:
def __init__(self, connection):
"""
:type connection: Connection
"""
self.connection = connection
self.affected_rows = None
self.insert_id = None
self.server_status = None
self.warning_count = 0
self.message = None
self.field_count = 0
self.description = None
self.rows = None
self.has_next = None
self.unbuffered_active = False
def __del__(self):
if self.unbuffered_active:
self._finish_unbuffered_query()
def read(self):
try:
first_packet = self.connection._read_packet()
if first_packet.is_ok_packet():
self._read_ok_packet(first_packet)
elif first_packet.is_load_local_packet():
self._read_load_local_packet(first_packet)
else:
self._read_result_packet(first_packet)
finally:
self.connection = None
def init_unbuffered_query(self):
"""
:raise OperationalError: If the connection to the MySQL server is lost.
:raise InternalError:
"""
self.unbuffered_active = True
first_packet = self.connection._read_packet()
if first_packet.is_ok_packet():
self._read_ok_packet(first_packet)
self.unbuffered_active = False
self.connection = None
elif first_packet.is_load_local_packet():
self._read_load_local_packet(first_packet)
self.unbuffered_active = False
self.connection = None
else:
self.field_count = first_packet.read_length_encoded_integer()
self._get_descriptions()
# Apparently, MySQLdb picks this number because it's the maximum
# value of a 64bit unsigned integer. Since we're emulating MySQLdb,
# we set it to this instead of None, which would be preferred.
self.affected_rows = 18446744073709551615
def _read_ok_packet(self, first_packet):
ok_packet = OKPacketWrapper(first_packet)
self.affected_rows = ok_packet.affected_rows
self.insert_id = ok_packet.insert_id
self.server_status = ok_packet.server_status
self.warning_count = ok_packet.warning_count
self.message = ok_packet.message
self.has_next = ok_packet.has_next
def _read_load_local_packet(self, first_packet):
if not self.connection._local_infile:
raise RuntimeError(
"**WARN**: Received LOAD_LOCAL packet but local_infile option is false."
)
load_packet = LoadLocalPacketWrapper(first_packet)
sender = LoadLocalFile(load_packet.filename, self.connection)
try:
sender.send_data()
except:
self.connection._read_packet() # skip ok packet
raise
ok_packet = self.connection._read_packet()
if (
not ok_packet.is_ok_packet()
): # pragma: no cover - upstream induced protocol error
raise err.OperationalError(
CR.CR_COMMANDS_OUT_OF_SYNC,
"Commands Out of Sync",
)
self._read_ok_packet(ok_packet)
def _check_packet_is_eof(self, packet):
if not packet.is_eof_packet():
return False
# TODO: Support CLIENT.DEPRECATE_EOF
# 1) Add DEPRECATE_EOF to CAPABILITIES
# 2) Mask CAPABILITIES with server_capabilities
# 3) if server_capabilities & CLIENT.DEPRECATE_EOF: use OKPacketWrapper instead of EOFPacketWrapper
wp = EOFPacketWrapper(packet)
self.warning_count = wp.warning_count
self.has_next = wp.has_next
return True
def _read_result_packet(self, first_packet):
self.field_count = first_packet.read_length_encoded_integer()
self._get_descriptions()
self._read_rowdata_packet()
def _read_rowdata_packet_unbuffered(self):
# Check if in an active query
if not self.unbuffered_active:
return
# EOF
packet = self.connection._read_packet()
if self._check_packet_is_eof(packet):
self.unbuffered_active = False
self.connection = None
self.rows = None
return
row = self._read_row_from_packet(packet)
self.affected_rows = 1
self.rows = (row,) # rows should tuple of row for MySQL-python compatibility.
return row
def _finish_unbuffered_query(self):
# After much reading on the MySQL protocol, it appears that there is,
# in fact, no way to stop MySQL from sending all the data after
# executing a query, so we just spin, and wait for an EOF packet.
while self.unbuffered_active:
packet = self.connection._read_packet()
if self._check_packet_is_eof(packet):
self.unbuffered_active = False
self.connection = None # release reference to kill cyclic reference.
def _read_rowdata_packet(self):
"""Read a rowdata packet for each data row in the result set."""
rows = []
while True:
packet = self.connection._read_packet()
if self._check_packet_is_eof(packet):
self.connection = None # release reference to kill cyclic reference.
break
rows.append(self._read_row_from_packet(packet))
self.affected_rows = len(rows)
self.rows = tuple(rows)
def _read_row_from_packet(self, packet):
row = []
for encoding, converter in self.converters:
try:
data = packet.read_length_coded_string()
except IndexError:
# No more columns in this row
# See https://github.com/PyMySQL/PyMySQL/pull/434
break
if data is not None:
if encoding is not None:
data = data.decode(encoding)
if DEBUG:
print("DEBUG: DATA = ", data)
if converter is not None:
data = converter(data)
row.append(data)
return tuple(row)
def _get_descriptions(self):
"""Read a column descriptor packet for each column in the result."""
self.fields = []
self.converters = []
use_unicode = self.connection.use_unicode
conn_encoding = self.connection.encoding
description = []
for i in range(self.field_count):
field = self.connection._read_packet(FieldDescriptorPacket)
self.fields.append(field)
description.append(field.description())
field_type = field.type_code
if use_unicode:
if field_type == FIELD_TYPE.JSON:
# When SELECT from JSON column: charset = binary
# When SELECT CAST(... AS JSON): charset = connection encoding
# This behavior is different from TEXT / BLOB.
# We should decode result by connection encoding regardless charsetnr.
# See https://github.com/PyMySQL/PyMySQL/issues/488
encoding = conn_encoding # SELECT CAST(... AS JSON)
elif field_type in TEXT_TYPES:
if field.charsetnr == 63: # binary
# TEXTs with charset=binary means BINARY types.
encoding = None
else:
encoding = conn_encoding
else:
# Integers, Dates and Times, and other basic data is encoded in ascii
encoding = "ascii"
else:
encoding = None
converter = self.connection.decoders.get(field_type)
if converter is converters.through:
converter = None
if DEBUG:
print(f"DEBUG: field={field}, converter={converter}")
self.converters.append((encoding, converter))
eof_packet = self.connection._read_packet()
assert eof_packet.is_eof_packet(), "Protocol error, expecting EOF"
self.description = tuple(description)
class LoadLocalFile:
def __init__(self, filename, connection):
self.filename = filename
self.connection = connection
def send_data(self):
"""Send data packets from the local file to the server"""
if not self.connection._sock:
raise err.InterfaceError(0, "")
conn = self.connection
try:
with open(self.filename, "rb") as open_file:
packet_size = min(
conn.max_allowed_packet, 16 * 1024
) # 16KB is efficient enough
while True:
chunk = open_file.read(packet_size)
if not chunk:
break
conn.write_packet(chunk)
except IOError:
raise err.OperationalError(
ER.FILE_NOT_FOUND,
f"Can't find file '{self.filename}'",
)
finally:
# send the empty packet to signify we are done sending data
conn.write_packet(b"")
|
the-stack_106_28035 |
import os
import requests
class FBProfileBuilder:
def __init__(self, userId, stateService):
self.userId = userId
self.stateService = stateService
def _get_user_info(self):
"""
Get facebook user info for profile building
:returns: user info
:rtype: json/dictionary
"""
URL = 'https://graph.facebook.com/v3.3/' + self.userId\
+ '?fields=first_name,last_name&access_token' + os.environ['FB_ACCESS_TOKEN']
resp = requests.get(URL)
return resp.json()
def __call__(self):
userInfo = self._get_user_info()
self.stateService.init_user_session({
'userId' : self.userId,
'first_name' : userInfo['first_name'],
'last_name' : userInfo['last_name']
})
return userInfo
|
the-stack_106_28037 | #!/usr/bin/env python3
#
# O.MG Cable firware extraction and analysis tool
# Copyright (C) 2021 Kevin Breen, Immersive Labs
# https://github.com/Immersive-Labs-Sec/OMG-Extractor
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import sys
import pprint
import esptool
import argparse
from io import StringIO
MODE_PATTERN = b'MODE ([1-2])\x00'
SSID_PATTERN = b'SSID (.*)\x00PASS'
PASS_PATTERN = b'PASS (.*)\x00MODE'
FLASH_SIZE = "0x1A7DDB"
# Firmware 1.5.3
PAYLOAD_OFFSETS = [
0xB0000,
0xB4000,
0xB8000,
0xBC000,
0xC0000,
0xC4000,
0xC8000
]
NAME_OFFSET = 0x17c004
DESCRIPTION_OFFSET = 0x17c024
MAC_OFFSETS = [0xFE492, 0xFD429]
class Capturing(list):
"""
We use this like a context manager to get the output from esptool
"""
def __enter__(self):
self._stdout = sys.stdout
sys.stdout = self._stringio = StringIO()
return self
def __exit__(self, *args):
self.extend(self._stringio.getvalue().splitlines())
del self._stringio
sys.stdout = self._stdout
def dump_firmware(dev_name, firmware_bin, verbose=False):
device_info = {
"hardware": {
"ChipID": "Unknown",
"MAC": "Unknown"
},
"MAC": "Unknown",
"Name": "Unknown",
"Description": "Unknown",
"Mode": "Unknown",
"SSID": "Unknown",
"Pass": "Unknown",
"scripts": []
}
if dev_name:
# Connect to the programmer dump ~ 2mb of data.
print(f"[+] Connecting to {dev_name}")
# Read Device Info
with Capturing() as esp_results:
command = ['--baud', '115200', '--port', dev_name, '--no-stub', 'chip_id' ]
try:
esptool.main(command)
except Exception as err:
print(err)
print(f"[!] Unable to find an OMG Cable on {dev_name}")
for element in esp_results:
if element.startswith("MAC"):
device_info['hardware']['MAC'] = element.split(": ")[-1]
if element.startswith("Chip ID: "):
device_info['hardware']['ChipID'] = element.split(": ")[-1]
print("[+] Found Device")
print(f" [-] Chip ID: {device_info['hardware']['ChipID']}")
print(f" [-] MAC: {device_info['hardware']['MAC']}")
print(f"[+] Dumping firmware to {firmware_bin}")
print(" [!] You will have to reconnect the cable to try again")
print(" [-] This will take a minute or 2")
try:
command = ['--baud', '115200', '--port', dev_name, 'read_flash', '0x0', FLASH_SIZE, firmware_bin]
if verbose:
esptool.main(command)
else:
with Capturing() as esp_results:
esptool.main(command)
print(" [-] Success")
except Exception as err:
print(f"[!] Error reading firmware: {err}")
return device_info
print("[+] Reading Firmware from {firmware_bin}")
with open(firmware_bin, "rb") as firmware_dump:
raw_firmware = firmware_dump.read()
print(" [-] Searching for Cable Mode")
cable_mode = re.search(MODE_PATTERN, raw_firmware)
# Find the WiFi Mode
if cable_mode:
if cable_mode.group(1) == "2":
wifi_mode = "Access Point"
else:
wifi_mode = "Station Mode"
device_info['Mode'] = wifi_mode
# Search for SSID Details
print(" [-] Searching for SSID Details")
ssid = re.search(SSID_PATTERN, raw_firmware).group(1)
if ssid:
device_info['SSID'] = ssid
ssid_pass = re.search(PASS_PATTERN, raw_firmware).group(1)
if ssid_pass:
device_info["Pass"] = ssid_pass
# Find MAC at offset
mac_bytes = raw_firmware[MAC_OFFSETS[0]:MAC_OFFSETS[0]+6]
mac_string = ":".join([hex(x)[2:] for x in mac_bytes])
device_info['MAC'] = mac_string.upper()
# User Set Name and Description
device_info['Name'] = raw_firmware[NAME_OFFSET:NAME_OFFSET+20].rstrip(b'\x00')
device_info['Description'] = raw_firmware[DESCRIPTION_OFFSET:DESCRIPTION_OFFSET+20].rstrip(b'\x00')
# Extract Payloads
print(" [-] Searching for Payloads")
# Make this a single regex with multiple matches?
payload_counter = 0
for offset in PAYLOAD_OFFSETS:
raw_script = raw_firmware[offset: offset+4000]
filtered_script = raw_script.rstrip(b'\xff')
device_info['scripts'].append(filtered_script)
payload_counter += 1
return device_info
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Extract Payloads from an O.mg Cable')
parser.add_argument('-d', '--device', help='USB Device to read "/dev/ttyUSB0"',
default="/dev/ttyUSB0")
parser.add_argument('-o', '--output', help="Firmware bin file", default="cable.bin")
parser.add_argument('-f', '--file', help="Read an existing firware dump")
args = parser.parse_args()
if args.file:
dev_name = None
bin_file = args.file
else:
dev_name = args.device
bin_file = args.output
device_info = dump_firmware(dev_name, bin_file)
pprint.pprint(device_info)
|
the-stack_106_28041 | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import mmcv
import numpy as np
import torch.multiprocessing as mp
from mmaction.localization import (generate_bsp_feature,
generate_candidate_proposals)
def load_video_infos(ann_file):
"""Load the video annotations.
Args:
ann_file (str): A json file path of the annotation file.
Returns:
list[dict]: A list containing annotations for videos.
"""
video_infos = []
anno_database = mmcv.load(ann_file)
for video_name in anno_database:
video_info = anno_database[video_name]
video_info['video_name'] = video_name
video_infos.append(video_info)
return video_infos
def generate_proposals(ann_file, tem_results_dir, pgm_proposals_dir,
pgm_proposals_thread, **kwargs):
"""Generate proposals using multi-process.
Args:
ann_file (str): A json file path of the annotation file for
all videos to be processed.
tem_results_dir (str): Directory to read tem results
pgm_proposals_dir (str): Directory to save generated proposals.
pgm_proposals_thread (int): Total number of threads.
kwargs (dict): Keyword arguments for "generate_candidate_proposals".
"""
video_infos = load_video_infos(ann_file)
num_videos = len(video_infos)
num_videos_per_thread = num_videos // pgm_proposals_thread
processes = []
manager = mp.Manager()
result_dict = manager.dict()
kwargs['result_dict'] = result_dict
for tid in range(pgm_proposals_thread - 1):
tmp_video_list = range(tid * num_videos_per_thread,
(tid + 1) * num_videos_per_thread)
p = mp.Process(
target=generate_candidate_proposals,
args=(
tmp_video_list,
video_infos,
tem_results_dir,
),
kwargs=kwargs)
p.start()
processes.append(p)
tmp_video_list = range((pgm_proposals_thread - 1) * num_videos_per_thread,
num_videos)
p = mp.Process(
target=generate_candidate_proposals,
args=(
tmp_video_list,
video_infos,
tem_results_dir,
),
kwargs=kwargs)
p.start()
processes.append(p)
for p in processes:
p.join()
# save results
os.makedirs(pgm_proposals_dir, exist_ok=True)
prog_bar = mmcv.ProgressBar(num_videos)
header = 'tmin,tmax,tmin_score,tmax_score,score,match_iou,match_ioa'
for video_name in result_dict:
proposals = result_dict[video_name]
proposal_path = osp.join(pgm_proposals_dir, video_name + '.csv')
np.savetxt(
proposal_path,
proposals,
header=header,
delimiter=',',
comments='')
prog_bar.update()
def generate_features(ann_file, tem_results_dir, pgm_proposals_dir,
pgm_features_dir, pgm_features_thread, **kwargs):
"""Generate proposals features using multi-process.
Args:
ann_file (str): A json file path of the annotation file for
all videos to be processed.
tem_results_dir (str): Directory to read tem results.
pgm_proposals_dir (str): Directory to read generated proposals.
pgm_features_dir (str): Directory to save generated features.
pgm_features_thread (int): Total number of threads.
kwargs (dict): Keyword arguments for "generate_bsp_feature".
"""
video_infos = load_video_infos(ann_file)
num_videos = len(video_infos)
num_videos_per_thread = num_videos // pgm_features_thread
processes = []
manager = mp.Manager()
feature_return_dict = manager.dict()
kwargs['result_dict'] = feature_return_dict
for tid in range(pgm_features_thread - 1):
tmp_video_list = range(tid * num_videos_per_thread,
(tid + 1) * num_videos_per_thread)
p = mp.Process(
target=generate_bsp_feature,
args=(
tmp_video_list,
video_infos,
tem_results_dir,
pgm_proposals_dir,
),
kwargs=kwargs)
p.start()
processes.append(p)
tmp_video_list = range((pgm_features_thread - 1) * num_videos_per_thread,
num_videos)
p = mp.Process(
target=generate_bsp_feature,
args=(
tmp_video_list,
video_infos,
tem_results_dir,
pgm_proposals_dir,
),
kwargs=kwargs)
p.start()
processes.append(p)
for p in processes:
p.join()
# save results
os.makedirs(pgm_features_dir, exist_ok=True)
prog_bar = mmcv.ProgressBar(num_videos)
for video_name in feature_return_dict.keys():
bsp_feature = feature_return_dict[video_name]
feature_path = osp.join(pgm_features_dir, video_name + '.npy')
np.save(feature_path, bsp_feature)
prog_bar.update()
def parse_args():
parser = argparse.ArgumentParser(description='Proposal generation module')
parser.add_argument('config', help='test config file path')
parser.add_argument(
'--mode',
choices=['train', 'test'],
default='test',
help='train or test')
args = parser.parse_args()
return args
def main():
print('Begin Proposal Generation Module')
args = parse_args()
cfg = mmcv.Config.fromfile(args.config)
tem_results_dir = cfg.tem_results_dir
pgm_proposals_dir = cfg.pgm_proposals_dir
pgm_features_dir = cfg.pgm_features_dir
if args.mode == 'test':
generate_proposals(cfg.ann_file_val, tem_results_dir,
pgm_proposals_dir, **cfg.pgm_proposals_cfg)
print('\nFinish proposal generation')
generate_features(cfg.ann_file_val, tem_results_dir, pgm_proposals_dir,
pgm_features_dir, **cfg.pgm_features_test_cfg)
print('\nFinish feature generation')
elif args.mode == 'train':
generate_proposals(cfg.ann_file_train, tem_results_dir,
pgm_proposals_dir, **cfg.pgm_proposals_cfg)
print('\nFinish proposal generation')
generate_features(cfg.ann_file_train, tem_results_dir,
pgm_proposals_dir, pgm_features_dir,
**cfg.pgm_features_train_cfg)
print('\nFinish feature generation')
print('Finish Proposal Generation Module')
if __name__ == '__main__':
main()
|
the-stack_106_28042 | """Support remote entity for Xiaomi Miot."""
import logging
import time
from functools import partial
from homeassistant.const import * # noqa: F401
from homeassistant.components import remote
from homeassistant.components.remote import (
DOMAIN as ENTITY_DOMAIN,
RemoteEntity,
)
from miio.chuangmi_ir import (
ChuangmiIr,
DeviceException,
)
from . import (
DOMAIN,
CONF_MODEL,
XIAOMI_CONFIG_SCHEMA as PLATFORM_SCHEMA, # noqa: F401
MiotEntity,
async_setup_config_entry,
bind_services_to_entries,
TRANSLATION_LANGUAGES,
)
from .core.miot_spec import (
MiotSpec,
)
from .core.xiaomi_cloud import (
MiotCloud,
MiCloudException,
)
_LOGGER = logging.getLogger(__name__)
DATA_KEY = f'{ENTITY_DOMAIN}.{DOMAIN}'
SERVICE_TO_METHOD = {}
async def async_setup_entry(hass, config_entry, async_add_entities):
await async_setup_config_entry(hass, config_entry, async_setup_platform, async_add_entities, ENTITY_DOMAIN)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
hass.data.setdefault(DATA_KEY, {})
hass.data[DOMAIN]['add_entities'][ENTITY_DOMAIN] = async_add_entities
model = str(config.get(CONF_MODEL) or '')
entities = []
miot = config.get('miot_type')
if miot:
spec = await MiotSpec.async_from_type(hass, miot)
if spec.name in ['remote_control', 'ir_remote_control']:
if 'chuangmi.remote.' in model or 'chuangmi.ir.' in model:
entities.append(MiotRemoteEntity(config, spec))
elif model in [
'xiaomi.wifispeaker.l05c',
'xiaomi.wifispeaker.lx5a',
'xiaomi.wifispeaker.lx06',
'lumi.acpartner.mcn04',
]:
entities.append(MiotRemoteEntity(config, spec))
for entity in entities:
hass.data[DOMAIN]['entities'][entity.unique_id] = entity
async_add_entities(entities, update_before_add=True)
bind_services_to_entries(hass, SERVICE_TO_METHOD)
class MiotRemoteEntity(MiotEntity, RemoteEntity):
def __init__(self, config, miot_spec: MiotSpec):
self._miot_spec = miot_spec
super().__init__(miot_service=None, config=config, logger=_LOGGER)
host = config.get(CONF_HOST)
token = config.get(CONF_TOKEN)
self._device = ChuangmiIr(host, token)
self._attr_should_poll = False
self._translations = {
**TRANSLATION_LANGUAGES,
**(TRANSLATION_LANGUAGES.get('_globals', {})),
**(TRANSLATION_LANGUAGES.get('ir_devices', {})),
}
async def async_added_to_hass(self):
await super().async_added_to_hass()
did = self.miot_did
mic = self.miot_cloud
irs = []
if did and isinstance(mic, MiotCloud):
dls = await mic.async_get_devices() or []
for d in dls:
if did != d.get('parent_id'):
continue
ird = d.get('did')
rdt = await mic.async_request_api('v2/irdevice/controller/keys', {'did': ird}) or {}
kys = (rdt.get('result') or {}).get('keys', {})
irs.append({
'did': ird,
'name': d.get('name'),
'keys': kys,
})
add_selects = self._add_entities.get('select')
if not kys:
self.logger.info('%s: IR device %s(%s) have no keys: %s', self.name, ird, d.get('name'), rdt)
elif add_selects and ird not in self._subs:
from .select import SelectSubEntity
ols = []
for k in kys:
nam = k.get('display_name') or k.get('name')
if not nam:
continue
nam = self._translations.get(nam, nam)
ols.append(nam)
self._subs[ird] = SelectSubEntity(self, ird, option={
'name': d.get('name'),
'entity_id': f'remote_{ird}'.replace('.', '_'),
'options': ols,
'select_option': self.press_ir_key,
})
add_selects([self._subs[ird]], update_before_add=False)
if irs:
self._state_attrs['ir_devices'] = irs
def is_on(self):
return True
def send_remote_command(self, command, **kwargs):
"""Send commands to a device."""
repeat = kwargs.get(remote.ATTR_NUM_REPEATS, remote.DEFAULT_NUM_REPEATS)
delays = kwargs.get(remote.ATTR_DELAY_SECS, remote.DEFAULT_DELAY_SECS)
did = kwargs.get(remote.ATTR_DEVICE)
for _ in range(repeat):
for cmd in command:
try:
if f'{cmd}'[:4] == 'key:':
ret = self.send_cloud_command(did, cmd)
else:
ret = self._device.play(cmd)
self.logger.info('%s: Send IR command %s(%s) result: %s', self.name, cmd, kwargs, ret)
except (DeviceException, MiCloudException) as exc:
self.logger.error('%s: Send IR command %s(%s) failed: %s', self.name, cmd, kwargs, exc)
time.sleep(delays)
def send_cloud_command(self, did, command):
key = f'{command}'
if key[:4] == 'key:':
key = key[4:]
try:
key = int(key)
except (TypeError, ValueError):
key = None
if not did or not key:
self.logger.warning('%s: IR command %s to %s invalid for cloud.', self.name, command, did)
return False
mic = self.miot_cloud
if not mic:
return False
res = mic.request_miot_api('v2/irdevice/controller/key/click', {
'did': did,
'key_id': key,
}) or {}
if res.get('code'):
self.logger.warning('%s: Send IR command %s(%s) failed: %s', self.name, command, did, res)
return res
async def async_send_command(self, command, **kwargs):
"""Send commands to a device."""
await self.hass.async_add_executor_job(
partial(self.send_remote_command, command, **kwargs)
)
def learn_command(self, **kwargs):
"""Learn a command from a device."""
try:
key = int(kwargs.get(remote.ATTR_DEVICE))
return self._device.learn(key)
except (TypeError, ValueError, DeviceException) as exc:
self.logger.warning('%s: Learn command failed: %s, the device ID is used to store command '
'and must between 1 and 1000000.', self.name, exc)
return False
def delete_command(self, **kwargs):
"""Delete commands from the database."""
raise NotImplementedError()
def press_ir_key(self, select, **kwargs):
key = None
did = kwargs.get('attr')
for d in self._state_attrs.get('ir_devices', []):
if did and did != d.get('did'):
continue
for k in d.get('keys', []):
if select not in [
k.get('display_name'),
k.get('name'),
self._translations.get(k.get('display_name') or k.get('name')),
]:
continue
key = k.get('id')
if key:
return self.send_cloud_command(did, key)
return False
|
the-stack_106_28044 | # -*- coding: utf-8 -*-
import re
import subprocess
from collections import namedtuple
from typing import Any
from typing import Optional
from poetry.core.utils._compat import PY36
from poetry.core.utils._compat import WINDOWS
from poetry.core.utils._compat import Path
from poetry.core.utils._compat import decode
pattern_formats = {
"protocol": r"\w+",
"user": r"[a-zA-Z0-9_.-]+",
"resource": r"[a-zA-Z0-9_.-]+",
"port": r"\d+",
"path": r"[\w~.\-/\\]+",
"name": r"[\w~.\-]+",
"rev": r"[^@#]+",
}
PATTERNS = [
re.compile(
r"^(git\+)?"
r"(?P<protocol>https?|git|ssh|rsync|file)://"
r"(?:(?P<user>{user})@)?"
r"(?P<resource>{resource})?"
r"(:(?P<port>{port}))?"
r"(?P<pathname>[:/\\]({path}[/\\])?"
r"((?P<name>{name}?)(\.git|[/\\])?)?)"
r"([@#](?P<rev>{rev}))?"
r"$".format(
user=pattern_formats["user"],
resource=pattern_formats["resource"],
port=pattern_formats["port"],
path=pattern_formats["path"],
name=pattern_formats["name"],
rev=pattern_formats["rev"],
)
),
re.compile(
r"(git\+)?"
r"((?P<protocol>{protocol})://)"
r"(?:(?P<user>{user})@)?"
r"(?P<resource>{resource}:?)"
r"(:(?P<port>{port}))?"
r"(?P<pathname>({path})"
r"(?P<name>{name})(\.git|/)?)"
r"([@#](?P<rev>{rev}))?"
r"$".format(
protocol=pattern_formats["protocol"],
user=pattern_formats["user"],
resource=pattern_formats["resource"],
port=pattern_formats["port"],
path=pattern_formats["path"],
name=pattern_formats["name"],
rev=pattern_formats["rev"],
)
),
re.compile(
r"^(?:(?P<user>{user})@)?"
r"(?P<resource>{resource})"
r"(:(?P<port>{port}))?"
r"(?P<pathname>([:/]{path}/)"
r"(?P<name>{name})(\.git|/)?)"
r"([@#](?P<rev>{rev}))?"
r"$".format(
user=pattern_formats["user"],
resource=pattern_formats["resource"],
port=pattern_formats["port"],
path=pattern_formats["path"],
name=pattern_formats["name"],
rev=pattern_formats["rev"],
)
),
re.compile(
r"((?P<user>{user})@)?"
r"(?P<resource>{resource})"
r"[:/]{{1,2}}"
r"(?P<pathname>({path})"
r"(?P<name>{name})(\.git|/)?)"
r"([@#](?P<rev>{rev}))?"
r"$".format(
user=pattern_formats["user"],
resource=pattern_formats["resource"],
path=pattern_formats["path"],
name=pattern_formats["name"],
rev=pattern_formats["rev"],
)
),
]
class GitError(RuntimeError):
pass
class ParsedUrl:
def __init__(
self,
protocol, # type: Optional[str]
resource, # type: Optional[str]
pathname, # type: Optional[str]
user, # type: Optional[str]
port, # type: Optional[str]
name, # type: Optional[str]
rev, # type: Optional[str]
):
self.protocol = protocol
self.resource = resource
self.pathname = pathname
self.user = user
self.port = port
self.name = name
self.rev = rev
@classmethod
def parse(cls, url): # type: (str) -> ParsedUrl
for pattern in PATTERNS:
m = pattern.match(url)
if m:
groups = m.groupdict()
return ParsedUrl(
groups.get("protocol"),
groups.get("resource"),
groups.get("pathname"),
groups.get("user"),
groups.get("port"),
groups.get("name"),
groups.get("rev"),
)
raise ValueError('Invalid git url "{}"'.format(url))
@property
def url(self): # type: () -> str
return "{}{}{}{}{}".format(
"{}://".format(self.protocol) if self.protocol else "",
"{}@".format(self.user) if self.user else "",
self.resource,
":{}".format(self.port) if self.port else "",
"/" + self.pathname.lstrip(":/"),
)
def format(self): # type: () -> str
return self.url
def __str__(self): # type: () -> str
return self.format()
GitUrl = namedtuple("GitUrl", ["url", "revision"])
_executable = None
def executable():
global _executable
if _executable is not None:
return _executable
if WINDOWS and PY36:
# Finding git via where.exe
where = "%WINDIR%\\System32\\where.exe"
paths = decode(
subprocess.check_output([where, "git"], shell=True, encoding="oem")
).split("\n")
for path in paths:
if not path:
continue
path = Path(path.strip())
try:
path.relative_to(Path.cwd())
except ValueError:
_executable = str(path)
break
else:
_executable = "git"
if _executable is None:
raise RuntimeError("Unable to find a valid git executable")
return _executable
def _reset_executable():
global _executable
_executable = None
class GitConfig:
def __init__(self, requires_git_presence=False): # type: (bool) -> None
self._config = {}
try:
config_list = decode(
subprocess.check_output(
[executable(), "config", "-l"], stderr=subprocess.STDOUT
)
)
m = re.findall("(?ms)^([^=]+)=(.*?)$", config_list)
if m:
for group in m:
self._config[group[0]] = group[1]
except (subprocess.CalledProcessError, OSError):
if requires_git_presence:
raise
def get(self, key, default=None): # type: (Any, Optional[Any]) -> Any
return self._config.get(key, default)
def __getitem__(self, item): # type: (Any) -> Any
return self._config[item]
class Git:
def __init__(self, work_dir=None): # type: (Optional[Path]) -> None
self._config = GitConfig(requires_git_presence=True)
self._work_dir = work_dir
@classmethod
def normalize_url(cls, url): # type: (str) -> GitUrl
parsed = ParsedUrl.parse(url)
formatted = re.sub(r"^git\+", "", url)
if parsed.rev:
formatted = re.sub(r"[#@]{}$".format(parsed.rev), "", formatted)
altered = parsed.format() != formatted
if altered:
if re.match(r"^git\+https?", url) and re.match(
r"^/?:[^0-9]", parsed.pathname
):
normalized = re.sub(r"git\+(.*:[^:]+):(.*)", "\\1/\\2", url)
elif re.match(r"^git\+file", url):
normalized = re.sub(r"git\+", "", url)
else:
normalized = re.sub(r"^(?:git\+)?ssh://", "", url)
else:
normalized = parsed.format()
return GitUrl(re.sub(r"#[^#]*$", "", normalized), parsed.rev)
@property
def config(self): # type: () -> GitConfig
return self._config
def clone(self, repository, dest): # type: (str, Path) -> str
self._check_parameter(repository)
return self.run("clone", "--recurse-submodules", "--", repository, str(dest))
def checkout(self, rev, folder=None): # type: (str, Optional[Path]) -> str
args = []
if folder is None and self._work_dir:
folder = self._work_dir
if folder:
args += [
"--git-dir",
(folder / ".git").as_posix(),
"--work-tree",
folder.as_posix(),
]
self._check_parameter(rev)
args += ["checkout", rev]
return self.run(*args)
def rev_parse(self, rev, folder=None): # type: (str, Optional[Path]) -> str
args = []
if folder is None and self._work_dir:
folder = self._work_dir
if folder:
args += [
"--git-dir",
(folder / ".git").as_posix(),
"--work-tree",
folder.as_posix(),
]
self._check_parameter(rev)
# We need "^0" (an alternative to "^{commit}") to ensure that the
# commit SHA of the commit the tag points to is returned, even in
# the case of annotated tags.
#
# We deliberately avoid the "^{commit}" syntax itself as on some
# platforms (cygwin/msys to be specific), the braces are interpreted
# as special characters and would require escaping, while on others
# they should not be escaped.
args += ["rev-parse", rev + "^0"]
return self.run(*args)
def get_ignored_files(self, folder=None): # type: (Optional[Path]) -> list
args = []
if folder is None and self._work_dir:
folder = self._work_dir
if folder:
args += [
"--git-dir",
(folder / ".git").as_posix(),
"--work-tree",
folder.as_posix(),
]
args += ["ls-files", "--others", "-i", "--exclude-standard"]
output = self.run(*args)
return output.strip().split("\n")
def remote_urls(self, folder=None): # type: (Optional[Path]) -> dict
output = self.run(
"config", "--get-regexp", r"remote\..*\.url", folder=folder
).strip()
urls = {}
for url in output.splitlines():
name, url = url.split(" ", 1)
urls[name.strip()] = url.strip()
return urls
def remote_url(self, folder=None): # type: (Optional[Path]) -> str
urls = self.remote_urls(folder=folder)
return urls.get("remote.origin.url", urls[list(urls.keys())[0]])
def run(self, *args, **kwargs): # type: (*Any, **Any) -> str
folder = kwargs.pop("folder", None)
if folder:
args = (
"--git-dir",
(folder / ".git").as_posix(),
"--work-tree",
folder.as_posix(),
) + args
return decode(
subprocess.check_output(
[executable()] + list(args), stderr=subprocess.STDOUT
)
).strip()
def _check_parameter(self, parameter): # type: (str) -> None
"""
Checks a git parameter to avoid unwanted code execution.
"""
if parameter.strip().startswith("-"):
raise GitError("Invalid Git parameter: {}".format(parameter))
|
the-stack_106_28045 | from mongoengine.errors import FieldDoesNotExist
def serializable_value(self, field_name):
"""
Returns the value of the field name for this instance. If the field is
a foreign key, returns the id value, instead of the object. If there's
no Field object with this name on the model, the model attribute's
value is returned directly.
Used to serialize a field's value (in the serializer, or form output,
for project). Normally, you would just access the attribute directly
and not use this method.
"""
try:
field = self._meta.get_field(field_name)
except FieldDoesNotExist:
return getattr(self, field_name)
return getattr(self, field.name)
|
the-stack_106_28046 | import threading
import discord
import json
import os
import async_cleverbot as ac
import cogs
from discord.ext import commands
import os
import aiozaneapi
import asyncio
from datetime import datetime
import aiosqlite
from discord.ext.buttons import Paginator
from helpe import Help
from asyncdagpi import Client
import time
import mystbin
class Pag(Paginator):
async def teardown(self):
try:
await self.page.clear_reactions()
except discord.HTTPException:
pass
intents = discord.Intents.all()
intents.members = True
intents.reactions = True
intents.guilds = True
async def get_prefix(bot, message):
if message.guild is None:
prefixes = ["th,", "th.", "th ", "please dont find this one, "]
elif message.author.id == 787800565512929321:
prefixes = ["th,", "th.", "th ", ""]
else:
prefixes = ["th,", "th.", "th ", "please dont find this one,"]
return commands.when_mentioned_or(*prefixes)(bot, message)
bot = commands.Bot(command_prefix=get_prefix, intents=intents, help_command=Help(), allowed_mentions=discord.AllowedMentions(users=True, roles=False, everyone=False, replied_user=True),case_insensitive=True)
bot.db = aiosqlite.connect("main.sqlite")
bot.mystbin_client = mystbin.Client()
bot.version = "15"
START_BAL = 250
token = open("toke.txt", "r").read()
bot.load_extension("jishaku")
hce = bot.get_command("help")
hce.hidden = True
dagpitoken = open("asy.txt", "r").read()
robloxcookie = open("roblox.txt", "r").read()
topastoken = open("top.txt", "r").read()
chatbottoken = open("chat.txt", "r").read()
hypixel = open("hypixel.txt", "r").read()
bot.robloxc = f"{robloxcookie}"
bot.hypixel = f"{hypixel}"
bot.topken = f"{topastoken}"
bot.chatbot = ac.Cleverbot(f"{chatbottoken}")
bot.se = aiozaneapi.Client(f'{open("zane.txt", "r").read()}')
bot.dagpi = Client(dagpitoken)
bot.start_time = time.time()
bot.thresholds = (10, 25, 50, 100)
@bot.event
async def on_connect():
print('bot connected')
@bot.event
async def on_ready():
for filename in os.listdir('./cogs'):
if filename.endswith('.py'):
bot.load_extension(f'cogs.{filename[:-3]}')
await bot.db
current_time = time.time()
difference = int(round(current_time - bot.start_time))
bot.stats = bot.get_channel(804496786038980618)
cursor = await bot.db.cursor()
await cursor.execute("""CREATE TABLE IF NOT EXISTS mail(num INTEGER NOT NULL PRIMARY KEY, user_name TEXT, balance INTEGER, user_id INTEGER)""")
await bot.db.commit()
await cursor.execute("""CREATE TABLE IF NOT EXISTS warns1(num INTEGER NOT NULL PRIMARY KEY, warns INTEGER, user_id INTEGER)""")
await bot.db.commit()
bot.description = f"Multi-Purpose Discord.py bot used in {len(bot.guilds)} guilds!"
print(f'Bot ready, running on {discord.__version__} and connected to {len(bot.guilds)}')
e = discord.Embed(title=f"Bot Loaded!", description=f"Bot ready, loaded all cogs perfectly! Time to load is {difference} secs :)")
await bot.stats.send(embed=e)
@bot.event
async def on_message(message):
if bot.user.mentioned_in(message) and message.mention_everyone is False:
if 'prefix' in message.content.lower():
await message.channel.send('A full list of all commands is available by typing ```th,help```')
await bot.process_commands(message)
@bot.listen()
async def on_invite_update(member, invite):
await bot.wait_for_invites()
print(f"{member} joined {member.guild} with invite {invite}")
can_send = member.guild.system_channel is not None
if invite.uses in bot.thresholds and can_send:
try:
# I am sorry that rocky-wocks was all that came to mind
await member.guild.system_channel.send(
f"**Congratulations** to {invite.inviter} for reaching the "
f"**{invite.uses}** invite threshold! They will be "
f"rewarded with **{1000*invite.uses:,}** shiny rocky-wocks!"
)
except discord.Forbidden:
print(f"[FAILED] {invite.code} @ {invite.uses} by "
f"{invite.inviter}")
@bot.event
async def on_member_join(member : discord.Member):
feedback = bot.get_channel(794164790368796672)
role = member.guild.get_role(794135439497101323)
if member.guild.id == 787825469391241217:
await member.add_roles(role)
else:
pass
@bot.event
async def on_guild_join(guild):
await guild.system_channel.send(f'Hey there! do th,help or <@787820448913686539> help for commands!')
@bot.command()
async def prefix(ctx):
pass
@bot.event
async def on_command_error(ctx, error):
guild = ctx.guild
if ctx.guild.id == 336642139381301249:
pass
if 787800565512929321 == ctx.author.id:
pass
else:
if isinstance(error, commands.CommandOnCooldown):
e1 = discord.Embed(title="Command Error!", description=f"`{error}`")
e1.set_footer(text=f"{ctx.author.name}")
await ctx.send(embed=e1)
elif isinstance(error, commands.CommandNotFound):
e2 = discord.Embed(title="Command Error!", description=f"`{error}`")
e2.set_footer(text=f"{ctx.author.name}")
await ctx.send(embed=e2)
elif isinstance(error, commands.MissingPermissions):
e3 = discord.Embed(title="Command Error!", description=f"`{error}`")
e3.set_footer(text=f"{ctx.author.name}")
await ctx.send(embed=e3)
elif isinstance(error, commands.MissingRequiredArgument):
e4 = discord.Embed(title="Command Error!", description=f"`{error}`")
e4.set_footer(text=f"{ctx.author.name}")
await ctx.send(embed=e4)
elif isinstance(error, commands.CommandInvokeError):
haha = ctx.author.avatar_url
e7 = discord.Embed(title="Oh no green you fucked up", description=f"`{error}`")
e7.add_field(name="Command Caused By?", value=f"{ctx.command}")
e7.add_field(name="By?", value=f"ID : {ctx.author.id}, Name : {ctx.author.name}")
e7.set_thumbnail(url=f"{haha}")
e7.set_footer(text=f"{ctx.author.name}")
await ctx.send("New Error, Sending to devs straight away!")
await bot.stats.send(embed=e7)
else:
haaha = ctx.author.avatar_url
e9 = discord.Embed(title="Oh no green you fucked up", description=f"`{error}`")
e9.add_field(name="Command Caused By?", value=f"{ctx.command}")
e9.add_field(name="By?", value=f"ID : {ctx.author.id}, Name : {ctx.author.name}")
e9.set_thumbnail(url=f"{haaha}")
e9.set_footer(text=f"{ctx.author.name}")
await ctx.send("New Error, Sending to devs straight away!")
await bot.stats.send(embed=e9)
bot.run(token)
|
the-stack_106_28048 | # -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2019-2020 Terbau
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import datetime
from typing import TYPE_CHECKING, List, Optional
from aioxmpp import JID
from .user import UserBase, ExternalAuth
from .errors import PartyError, Forbidden, HTTPException
from .presence import Presence
from .enums import Platform
if TYPE_CHECKING:
from .client import Client
from .party import ClientParty
# Type defs
Datetime = datetime.datetime
class FriendBase(UserBase):
__slots__ = UserBase.__slots__ + \
('_status', '_direction', '_favorite', '_created_at')
def __init__(self, client: 'Client', data: dict) -> None:
super().__init__(client, data)
def _update(self, data: dict) -> None:
super()._update(data)
self._status = data['status']
self._direction = data['direction']
self._created_at = self.client.from_iso(data['created'])
@property
def display_name(self) -> str:
""":class:`str`: The friend's displayname"""
return super().display_name
@property
def id(self) -> str:
""":class:`str`: The friend's id"""
return self._id
@property
def external_auths(self) -> List[ExternalAuth]:
""":class:`list`: List containing information about external auths.
Might be empty if the friend does not have any external auths"""
return self._external_auths
@property
def jid(self) -> JID:
""":class:`aioxmpp.JID`: The jid of the friend."""
return super().jid
@property
def status(self) -> str:
""":class:`str`: The friends status to the client. E.g. if the friend
is friends with the bot it will be ``ACCEPTED``.
.. warning::
This is not the same as status from presence!
"""
return self._status
@property
def direction(self) -> str:
""":class:`str`: The direction of the friendship. ``INBOUND`` if the friend
added :class:`ClientUser` else ``OUTGOING``.
"""
return self._direction
@property
def inbound(self) -> bool:
""":class:`bool`: ``True`` if this friend was the one to send the
friend request else ``False``.
"""
return self._direction == 'INBOUND'
@property
def outgoing(self) -> bool:
""":class:`bool`: ``True`` if the bot was the one to send the friend
request else ``False``.
"""
return self._direction == 'OUTGOING'
@property
def created_at(self) -> Datetime:
""":class:`datetime.datetime`: The UTC time of when the friendship was
created.
"""
return self._created_at
async def block(self) -> None:
"""|coro|
Blocks this friend.
Raises
------
HTTPException
Something went wrong when trying to block this user.
"""
await self.client.block_user(self.id)
def get_raw(self) -> dict:
return {
**(super().get_raw()),
'status': self.status,
'direction': self.direction,
'created': self.created_at
}
class Friend(FriendBase):
"""Represents a friend on Fortnite"""
__slots__ = FriendBase.__slots__ + ('_nickname', '_note', '_last_logout')
def __init__(self, client: 'Client', data: dict) -> None:
super().__init__(client, data)
self._last_logout = None
self._nickname = None
self._note = None
def __repr__(self) -> str:
return ('<Friend id={0.id!r} display_name={0.display_name!r} '
'epicgames_account={0.epicgames_account!r}>'.format(self))
def _update(self, data: dict) -> None:
super()._update(data)
self._favorite = data.get('favorite')
def _update_last_logout(self, dt: Datetime) -> None:
self._last_logout = dt
def _update_summary(self, data: dict) -> None:
_alias = data['alias']
self._nickname = _alias if _alias != '' else None
_note = data['note']
self._note = _note if _note != '' else None
@property
def display_name(self) -> str:
""":class:`str`: The friends displayname"""
return super().display_name
@property
def id(self) -> str:
""":class:`str`: The friends id"""
return self._id
@property
def favorite(self) -> bool:
""":class:`bool`: ``True`` if the friend is favorited by :class:`ClientUser`
else ``False``.
"""
return self._favorite
@property
def nickname(self) -> Optional[str]:
""":class:`str`: The friend's nickname. ``None`` if no nickname is set
for this friend.
"""
return self._nickname
@property
def note(self) -> Optional[str]:
""":class:`str`: The friend's note. ``None`` if no note is set."""
return self._note
@property
def external_auths(self) -> List[ExternalAuth]:
""":class:`list`: List containing information about external auths.
Might be empty if the friend does not have any external auths
"""
return self._external_auths
@property
def last_presence(self) -> Presence:
""":class:`Presence`: The last presence retrieved by the
friend. Might be ``None`` if no presence has been
received by this friend yet.
"""
return self.client.get_presence(self.id)
@property
def last_logout(self) -> Optional[Datetime]:
""":class:`datetime.datetime`: The UTC time of the last time this
friend logged off.
``None`` if this friend has never logged into fortnite or because
the friend was added after the client was started. If the latter is the
case, you can fetch the friends last logout with
:meth:`Friend.fetch_last_logout()`.
"""
return self._last_logout
@property
def platform(self) -> Optional[Platform]:
""":class:`Platform`: The platform the friend is currently online on.
``None`` if the friend is offline.
"""
pres = self.client.get_presence(self.id)
if pres is not None:
return pres.platform
def is_online(self) -> bool:
"""Method to check if a user is currently online.
.. warning::
This method uses the last received presence from this user to
determine if the friend is online or not. Therefore, this method
will most likely not return True when calling it in
:func:`event_friend_add()`. You could use :meth:`Client.wait_for()`
to wait for the presence to be received but remember that if the
friend is infact offline, no presence will be received. You can add
a timeout the method to make sure it won't wait forever.
Returns
-------
:class:`bool`
``True`` if the friend is currently online else ``False``.
"""
pres = self.client.get_presence(self.id)
if pres is None:
return False
return pres.available
async def fetch_last_logout(self):
"""|coro|
Fetches the last time this friend logged out.
Raises
------
HTTPException
An error occured while requesting.
Returns
-------
Optional[:class:`datetime.datetime`]
The last UTC datetime of this friends last logout. Could be
``None`` if the friend has never logged into fortnite.
"""
presences = await self.client.http.presence_get_last_online()
presence = presences.get(self.id)
if presence is not None:
self._update_last_logout(
self.client.from_iso(presence[0]['last_online'])
)
return self.last_logout
async def fetch_mutual_friends_count(self) -> int:
"""|coro|
Gets how many mutual friends the client and this friend have in common.
Returns
-------
:class:`int`
The number of friends you have common.
Raises
------
HTTPException
An error occured while requesting.
"""
data = await self.client.http.friends_get_summary()
for friend in data['friends']:
if friend['accountId'] == self.id:
return friend['mutual']
async def set_nickname(self, nickname: str) -> None:
"""|coro|
Sets the nickname of this friend.
Parameters
----------
nickname: :class:`str`
| The nickname you want to set.
| Min length: ``3``
| Max length: ``16``
Raises
------
ValueError
The nickname contains too few/many characters or contains invalid
characters.
HTTPException
An error occured while requesting.
"""
if not (3 <= len(nickname) <= 16):
raise ValueError('Invalid nickname length')
try:
await self.client.http.friends_set_nickname(self.id, nickname)
except HTTPException as e:
ignored = ('errors.com.epicgames.common.unsupported_media_type',
'errors.com.epicgames.validation.validation_failed')
if e.message_code in ignored:
raise ValueError('Invalid nickname')
e.reraise()
self._nickname = nickname
async def remove_nickname(self) -> None:
"""|coro|
Removes the friend's nickname.
Raises
------
HTTPException
An error occured while requesting.
"""
await self.client.http.friends_remove_nickname(self.id)
self._nickname = None
async def set_note(self, note: str) -> None:
"""|coro|
Pins a note to this friend.
Parameters
note: :class:`str`
| The note you want to set.
| Min length: ``3``
| Max length: ``255``
Raises
------
ValueError
The note contains too few/many characters or contains invalid
characters.
HTTPException
An error occured while requesting.
"""
if not (3 <= len(note) <= 255):
raise ValueError('Invalid note length')
try:
await self.client.http.friends_set_note(self.id, note)
except HTTPException as e:
ignored = ('errors.com.epicgames.common.unsupported_media_type',
'errors.com.epicgames.validation.validation_failed')
if e.message_code in ignored:
raise ValueError('Invalid note')
e.reraise()
self._note = note
async def remove_note(self) -> None:
"""|coro|
Removes the friend's note.
Raises
------
HTTPException
An error occured while requesting.
"""
await self.client.http.friends_remove_note(self.id)
self._note = None
async def remove(self) -> None:
"""|coro|
Removes the friend from your friendlist.
Raises
------
HTTPException
Something went wrong when trying to remove this friend.
"""
await self.client.remove_or_decline_friend(self.id)
async def send(self, content: str) -> None:
"""|coro|
Sends a :class:`FriendMessage` to this friend.
Parameters
----------
content: :class:`str`
The content of the message.
"""
await self.client.xmpp.send_friend_message(self.jid, content)
async def join_party(self) -> 'ClientParty':
"""|coro|
Attempts to join this friends' party.
Raises
------
PartyError
Party was not found.
Forbidden
The party you attempted to join was private.
HTTPException
Something else went wrong when trying to join the party.
Returns
-------
:class:`ClientParty`
The clients new party.
"""
_pre = self.last_presence
if _pre is None:
raise PartyError('Could not join party. Reason: Party not found')
if _pre.party.private:
raise Forbidden('Could not join party. Reason: Party is private')
return await _pre.party.join()
async def invite(self) -> None:
"""|coro|
Invites this friend to your party.
Raises
------
PartyError
Friend is already in your party.
PartyError
The party is full.
HTTPException
Something went wrong when trying to invite this friend.
"""
await self.client.user.party.invite(self.id)
class PendingFriend(FriendBase):
"""Represents a pending friend from Fortnite."""
__slots__ = FriendBase.__slots__
def __init__(self, client: 'Client', data: dict) -> None:
super().__init__(client, data)
def __repr__(self) -> str:
return ('<PendingFriend id={0.id!r} display_name={0.display_name!r} '
'epicgames_account={0.epicgames_account!r}>'.format(self))
@property
def created_at(self) -> Datetime:
""":class:`datetime.datetime`: The UTC time of when the request was
created
"""
return self._created_at
async def accept(self) -> Friend:
"""|coro|
Accepts this users' friend request.
Raises
------
HTTPException
Something went wrong when trying to accept this request.
Returns
-------
:class:`Friend`
Object of the friend you just added.
"""
friend = await self.client.accept_friend(self.id)
return friend
async def decline(self) -> None:
"""|coro|
Declines this users' friend request.
Raises
------
HTTPException
Something went wrong when trying to decline this request.
"""
await self.client.remove_or_decline_friend(self.id)
|
the-stack_106_28049 | #!/usr/bin/env python3
import os
import numpy as np
from common.realtime import sec_since_boot
from common.numpy_fast import clip, interp
from selfdrive.swaglog import cloudlog
from selfdrive.modeld.constants import index_function
from selfdrive.controls.lib.radar_helpers import _LEAD_ACCEL_TAU
from selfdrive.config import Conversions as CV
if __name__ == '__main__': # generating code
from pyextra.acados_template import AcadosModel, AcadosOcp, AcadosOcpSolver
else:
# from pyextra.acados_template import AcadosOcpSolver as AcadosOcpSolverFast
from selfdrive.controls.lib.longitudinal_mpc_lib.c_generated_code.acados_ocp_solver_pyx import AcadosOcpSolverFast # pylint: disable=no-name-in-module, import-error
from casadi import SX, vertcat
LONG_MPC_DIR = os.path.dirname(os.path.abspath(__file__))
EXPORT_DIR = os.path.join(LONG_MPC_DIR, "c_generated_code")
JSON_FILE = "acados_ocp_long.json"
SOURCES = ['lead0', 'lead1', 'cruise']
X_DIM = 3
U_DIM = 1
PARAM_DIM = 5
COST_E_DIM = 5
COST_DIM = COST_E_DIM + 1
CONSTR_DIM = 4
X_EGO_OBSTACLE_COST = 3.
X_EGO_COST = 0.
V_EGO_COST = 0.
A_EGO_COST = 0.
J_EGO_COST = 5.0
A_CHANGE_COST = .125
DANGER_ZONE_COST = 100.
CRASH_DISTANCE = .5
LIMIT_COST = 1e6
CRUISE_GAP_BP = [1., 2., 3., 4.]
CRUISE_GAP_V = [1.2, 1.35, 1.5, 1.7]
AUTO_TR_BP = [0., 10.*CV.KPH_TO_MS, 70.*CV.KPH_TO_MS, 110.*CV.KPH_TO_MS]
AUTO_TR_V = [1., 1.2, 1.35, 1.45]
AUTO_TR_CRUISE_GAP = 4
# Fewer timestamps don't hurt performance and lead to
# much better convergence of the MPC with low iterations
N = 12
MAX_T = 10.0
T_IDXS_LST = [index_function(idx, max_val=MAX_T, max_idx=N+1) for idx in range(N+1)]
T_IDXS = np.array(T_IDXS_LST)
T_DIFFS = np.diff(T_IDXS, prepend=[0.])
MIN_ACCEL = -3.5
T_FOLLOW = 1.45
COMFORT_BRAKE = 2.5
STOP_DISTANCE = 6.0
def get_stopped_equivalence_factor(v_lead):
return (v_lead**2) / (2 * COMFORT_BRAKE)
def get_safe_obstacle_distance(v_ego, tr):
return (v_ego**2) / (2 * COMFORT_BRAKE) + tr * v_ego + STOP_DISTANCE
def desired_follow_distance(v_ego, v_lead, tr):
return get_safe_obstacle_distance(v_ego, tr) - get_stopped_equivalence_factor(v_lead)
def gen_long_model():
model = AcadosModel()
model.name = 'long'
# set up states & controls
x_ego = SX.sym('x_ego')
v_ego = SX.sym('v_ego')
a_ego = SX.sym('a_ego')
model.x = vertcat(x_ego, v_ego, a_ego)
# controls
j_ego = SX.sym('j_ego')
model.u = vertcat(j_ego)
# xdot
x_ego_dot = SX.sym('x_ego_dot')
v_ego_dot = SX.sym('v_ego_dot')
a_ego_dot = SX.sym('a_ego_dot')
model.xdot = vertcat(x_ego_dot, v_ego_dot, a_ego_dot)
# live parameters
a_min = SX.sym('a_min')
a_max = SX.sym('a_max')
x_obstacle = SX.sym('x_obstacle')
prev_a = SX.sym('prev_a')
tr = SX.sym('tr')
model.p = vertcat(a_min, a_max, x_obstacle, prev_a, tr)
# dynamics model
f_expl = vertcat(v_ego, a_ego, j_ego)
model.f_impl_expr = model.xdot - f_expl
model.f_expl_expr = f_expl
return model
def gen_long_mpc_solver():
ocp = AcadosOcp()
ocp.model = gen_long_model()
Tf = T_IDXS[-1]
# set dimensions
ocp.dims.N = N
# set cost module
ocp.cost.cost_type = 'NONLINEAR_LS'
ocp.cost.cost_type_e = 'NONLINEAR_LS'
QR = np.zeros((COST_DIM, COST_DIM))
Q = np.zeros((COST_E_DIM, COST_E_DIM))
ocp.cost.W = QR
ocp.cost.W_e = Q
x_ego, v_ego, a_ego = ocp.model.x[0], ocp.model.x[1], ocp.model.x[2]
j_ego = ocp.model.u[0]
a_min, a_max = ocp.model.p[0], ocp.model.p[1]
x_obstacle = ocp.model.p[2]
prev_a = ocp.model.p[3]
tr = ocp.model.p[4]
ocp.cost.yref = np.zeros((COST_DIM, ))
ocp.cost.yref_e = np.zeros((COST_E_DIM, ))
desired_dist_comfort = get_safe_obstacle_distance(v_ego, tr)
# The main cost in normal operation is how close you are to the "desired" distance
# from an obstacle at every timestep. This obstacle can be a lead car
# or other object. In e2e mode we can use x_position targets as a cost
# instead.
costs = [((x_obstacle - x_ego) - (desired_dist_comfort)) / (v_ego + 10.),
x_ego,
v_ego,
a_ego,
20*(a_ego - prev_a),
j_ego]
ocp.model.cost_y_expr = vertcat(*costs)
ocp.model.cost_y_expr_e = vertcat(*costs[:-1])
# Constraints on speed, acceleration and desired distance to
# the obstacle, which is treated as a slack constraint so it
# behaves like an asymmetrical cost.
constraints = vertcat(v_ego,
(a_ego - a_min),
(a_max - a_ego),
((x_obstacle - x_ego) - (3/4) * (desired_dist_comfort)) / (v_ego + 10.))
ocp.model.con_h_expr = constraints
ocp.model.con_h_expr_e = vertcat(np.zeros(CONSTR_DIM))
x0 = np.zeros(X_DIM)
ocp.constraints.x0 = x0
ocp.parameter_values = np.array([-1.2, 1.2, 0.0, 0.0, T_FOLLOW])
# We put all constraint cost weights to 0 and only set them at runtime
cost_weights = np.zeros(CONSTR_DIM)
ocp.cost.zl = cost_weights
ocp.cost.Zl = cost_weights
ocp.cost.Zu = cost_weights
ocp.cost.zu = cost_weights
ocp.constraints.lh = np.zeros(CONSTR_DIM)
ocp.constraints.lh_e = np.zeros(CONSTR_DIM)
ocp.constraints.uh = 1e4*np.ones(CONSTR_DIM)
ocp.constraints.uh_e = 1e4*np.ones(CONSTR_DIM)
ocp.constraints.idxsh = np.arange(CONSTR_DIM)
# The HPIPM solver can give decent solutions even when it is stopped early
# Which is critical for our purpose where compute time is strictly bounded
# We use HPIPM in the SPEED_ABS mode, which ensures fastest runtime. This
# does not cause issues since the problem is well bounded.
ocp.solver_options.qp_solver = 'PARTIAL_CONDENSING_HPIPM'
ocp.solver_options.hessian_approx = 'GAUSS_NEWTON'
ocp.solver_options.integrator_type = 'ERK'
ocp.solver_options.nlp_solver_type = 'SQP_RTI'
ocp.solver_options.qp_solver_cond_N = N//4
# More iterations take too much time and less lead to inaccurate convergence in
# some situations. Ideally we would run just 1 iteration to ensure fixed runtime.
ocp.solver_options.qp_solver_iter_max = 10
# set prediction horizon
ocp.solver_options.tf = Tf
ocp.solver_options.shooting_nodes = T_IDXS
ocp.code_export_directory = EXPORT_DIR
return ocp
class LongitudinalMpc:
def __init__(self, e2e=False):
self.e2e = e2e
self.reset()
self.source = SOURCES[2]
def reset(self):
self.solver = AcadosOcpSolverFast('long', N, EXPORT_DIR)
self.v_solution = np.zeros(N+1)
self.a_solution = np.zeros(N+1)
self.prev_a = np.array(self.a_solution)
self.j_solution = np.zeros(N)
self.yref = np.zeros((N+1, COST_DIM))
for i in range(N):
self.solver.cost_set(i, "yref", self.yref[i])
self.solver.cost_set(N, "yref", self.yref[N][:COST_E_DIM])
self.x_sol = np.zeros((N+1, X_DIM))
self.u_sol = np.zeros((N,1))
self.params = np.zeros((N+1, PARAM_DIM))
self.param_tr = T_FOLLOW
for i in range(N+1):
self.solver.set(i, 'x', np.zeros(X_DIM))
self.last_cloudlog_t = 0
self.status = False
self.crash_cnt = 0.0
self.solution_status = 0
self.solve_time = 0.0
self.x0 = np.zeros(X_DIM)
self.set_weights()
def set_weights(self, prev_accel_constraint=True):
if self.e2e:
self.set_weights_for_xva_policy()
self.params[:,0] = -10.
self.params[:,1] = 10.
self.params[:,2] = 1e5
else:
self.set_weights_for_lead_policy(prev_accel_constraint)
def set_weights_for_lead_policy(self, prev_accel_constraint=True):
a_change_cost = A_CHANGE_COST if prev_accel_constraint else 0
W = np.asfortranarray(np.diag([X_EGO_OBSTACLE_COST, X_EGO_COST, V_EGO_COST, A_EGO_COST, a_change_cost, J_EGO_COST]))
for i in range(N):
W[4,4] = a_change_cost * np.interp(T_IDXS[i], [0.0, 1.0, 2.0], [1.0, 1.0, 0.0])
self.solver.cost_set(i, 'W', W)
# Setting the slice without the copy make the array not contiguous,
# causing issues with the C interface.
self.solver.cost_set(N, 'W', np.copy(W[:COST_E_DIM, :COST_E_DIM]))
# Set L2 slack cost on lower bound constraints
Zl = np.array([LIMIT_COST, LIMIT_COST, LIMIT_COST, DANGER_ZONE_COST])
for i in range(N):
self.solver.cost_set(i, 'Zl', Zl)
def set_weights_for_xva_policy(self):
W = np.asfortranarray(np.diag([0., 10., 1., 10., 0.0, 1.]))
for i in range(N):
self.solver.cost_set(i, 'W', W)
# Setting the slice without the copy make the array not contiguous,
# causing issues with the C interface.
self.solver.cost_set(N, 'W', np.copy(W[:COST_E_DIM, :COST_E_DIM]))
# Set L2 slack cost on lower bound constraints
Zl = np.array([LIMIT_COST, LIMIT_COST, LIMIT_COST, 0.0])
for i in range(N):
self.solver.cost_set(i, 'Zl', Zl)
def set_cur_state(self, v, a):
if abs(self.x0[1] - v) > 2.:
self.x0[1] = v
self.x0[2] = a
for i in range(0, N+1):
self.solver.set(i, 'x', self.x0)
else:
self.x0[1] = v
self.x0[2] = a
@staticmethod
def extrapolate_lead(x_lead, v_lead, a_lead, a_lead_tau):
a_lead_traj = a_lead * np.exp(-a_lead_tau * (T_IDXS**2)/2.)
v_lead_traj = np.clip(v_lead + np.cumsum(T_DIFFS * a_lead_traj), 0.0, 1e8)
x_lead_traj = x_lead + np.cumsum(T_DIFFS * v_lead_traj)
lead_xv = np.column_stack((x_lead_traj, v_lead_traj))
return lead_xv
def process_lead(self, lead):
v_ego = self.x0[1]
if lead is not None and lead.status:
x_lead = lead.dRel
v_lead = lead.vLead
a_lead = lead.aLeadK
a_lead_tau = lead.aLeadTau
else:
# Fake a fast lead car, so mpc can keep running in the same mode
x_lead = 50.0
v_lead = v_ego + 10.0
a_lead = 0.0
a_lead_tau = _LEAD_ACCEL_TAU
# MPC will not converge if immediate crash is expected
# Clip lead distance to what is still possible to brake for
min_x_lead = ((v_ego + v_lead)/2) * (v_ego - v_lead) / (-MIN_ACCEL * 2)
x_lead = clip(x_lead, min_x_lead, 1e8)
v_lead = clip(v_lead, 0.0, 1e8)
a_lead = clip(a_lead, -10., 5.)
lead_xv = self.extrapolate_lead(x_lead, v_lead, a_lead, a_lead_tau)
return lead_xv
def set_accel_limits(self, min_a, max_a):
self.cruise_min_a = min_a
self.cruise_max_a = max_a
def update(self, carstate, radarstate, v_cruise):
v_ego = self.x0[1]
self.status = radarstate.leadOne.status or radarstate.leadTwo.status
lead_xv_0 = self.process_lead(radarstate.leadOne)
lead_xv_1 = self.process_lead(radarstate.leadTwo)
# set accel limits in params
self.params[:,0] = interp(float(self.status), [0.0, 1.0], [self.cruise_min_a, MIN_ACCEL])
self.params[:,1] = self.cruise_max_a
# neokii
cruise_gap = int(clip(carstate.cruiseGap, 1., 4.))
if cruise_gap == AUTO_TR_CRUISE_GAP:
tr = interp(carstate.vEgo, AUTO_TR_BP, AUTO_TR_V)
else:
tr = interp(float(cruise_gap), CRUISE_GAP_BP, CRUISE_GAP_V)
self.param_tr = tr
# To estimate a safe distance from a moving lead, we calculate how much stopping
# distance that lead needs as a minimum. We can add that to the current distance
# and then treat that as a stopped car/obstacle at this new distance.
lead_0_obstacle = lead_xv_0[:,0] + get_stopped_equivalence_factor(lead_xv_0[:,1])
lead_1_obstacle = lead_xv_1[:,0] + get_stopped_equivalence_factor(lead_xv_1[:,1])
# Fake an obstacle for cruise, this ensures smooth acceleration to set speed
# when the leads are no factor.
v_lower = v_ego + (T_IDXS * self.cruise_min_a * 1.05)
v_upper = v_ego + (T_IDXS * self.cruise_max_a * 1.05)
v_cruise_clipped = np.clip(v_cruise * np.ones(N+1),
v_lower,
v_upper)
cruise_obstacle = np.cumsum(T_DIFFS * v_cruise_clipped) + get_safe_obstacle_distance(v_cruise_clipped, tr)
x_obstacles = np.column_stack([lead_0_obstacle, lead_1_obstacle, cruise_obstacle])
self.source = SOURCES[np.argmin(x_obstacles[0])]
self.params[:,2] = np.min(x_obstacles, axis=1)
self.params[:,3] = np.copy(self.prev_a)
self.params[:,4] = self.param_tr
self.run()
if (np.any(lead_xv_0[:,0] - self.x_sol[:,0] < CRASH_DISTANCE) and
radarstate.leadOne.modelProb > 0.9):
self.crash_cnt += 1
else:
self.crash_cnt = 0
def update_with_xva(self, x, v, a):
self.yref[:,1] = x
self.yref[:,2] = v
self.yref[:,3] = a
for i in range(N):
self.solver.cost_set(i, "yref", self.yref[i])
self.solver.cost_set(N, "yref", self.yref[N][:COST_E_DIM])
self.params[:,3] = np.copy(self.prev_a)
self.params[:,4] = self.param_tr
self.run()
def run(self):
for i in range(N+1):
self.solver.set(i, 'p', self.params[i])
self.solver.constraints_set(0, "lbx", self.x0)
self.solver.constraints_set(0, "ubx", self.x0)
t = sec_since_boot()
self.solution_status = self.solver.solve()
self.solve_time = sec_since_boot() - t
for i in range(N+1):
self.x_sol[i] = self.solver.get(i, 'x')
for i in range(N):
self.u_sol[i] = self.solver.get(i, 'u')
self.v_solution = self.x_sol[:,1]
self.a_solution = self.x_sol[:,2]
self.j_solution = self.u_sol[:,0]
self.prev_a = np.interp(T_IDXS + 0.05, T_IDXS, self.a_solution)
if self.solution_status != 0:
if t > self.last_cloudlog_t + 5.0:
self.last_cloudlog_t = t
cloudlog.warning(f"Long mpc reset, solution_status: {self.solution_status}")
self.reset()
if __name__ == "__main__":
ocp = gen_long_mpc_solver()
AcadosOcpSolver.generate(ocp, json_file=JSON_FILE, build=False)
|
the-stack_106_28050 | #!/usr/bin/env python3
# coding: utf-8
#
# Copyright (C) 2020 The SymbiFlow Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
import re
def get_scale_fs(timescale):
"""Convert sdf timescale to scale factor to femtoseconds as int
>>> get_scale_fs('1.0 fs')
1
>>> get_scale_fs('1ps')
1000
>>> get_scale_fs('10 ns')
10000000
>>> get_scale_fs('10.0 us')
10000000000
>>> get_scale_fs('100.0ms')
100000000000000
>>> get_scale_fs('100 s')
100000000000000000
>>> try:
... get_scale_fs('2s')
... except AssertionError as e:
... print(e)
Invalid SDF timescale 2s
"""
mm = re.match(r'(10{0,2})(\.0)? *([munpf]?s)', timescale)
sc_lut = {
's': 1e15,
'ms': 1e12,
'us': 1e9,
'ns': 1e6,
'ps': 1e3,
'fs': 1,
}
assert mm is not None, "Invalid SDF timescale {}".format(timescale)
base, _, sc = mm.groups()
return int(base) * int(sc_lut[sc])
def get_scale_seconds(timescale):
"""Convert sdf timescale to scale factor to floating point seconds
>>> get_scale_seconds('1.0 fs')
1e-15
>>> get_scale_seconds('1ps')
1e-12
>>> get_scale_seconds('10 ns')
1e-08
>>> get_scale_seconds('10.0 us')
1e-05
>>> get_scale_seconds('100.0ms')
0.1
>>> round(get_scale_seconds('100 s'), 6)
100.0
"""
return 1e-15 * get_scale_fs(timescale)
def prepare_entry(name=None,
type=None,
from_pin=None,
to_pin=None,
from_pin_edge=None,
to_pin_edge=None,
delay_paths=None,
cond_equation=None,
is_timing_check=False,
is_timing_env=False,
is_absolute=False,
is_incremental=False,
is_cond=False):
entry = dict()
entry['name'] = name
entry['type'] = type
entry['from_pin'] = from_pin
entry['to_pin'] = to_pin
entry['from_pin_edge'] = from_pin_edge
entry['to_pin_edge'] = to_pin_edge
entry['delay_paths'] = delay_paths
entry['is_timing_check'] = is_timing_check
entry['is_timing_env'] = is_timing_env
entry['is_absolute'] = is_absolute
entry['is_incremental'] = is_incremental
entry['is_cond'] = is_cond
entry['cond_equation'] = cond_equation
return entry
def add_port(portname, paths):
name = "port_" + portname['port']
return prepare_entry(name=name,
type='port',
from_pin=portname['port'],
to_pin=portname['port'],
delay_paths=paths)
def add_interconnect(pfrom, pto, paths):
name = "interconnect_"
name += pfrom['port'] + "_" + pto['port']
return prepare_entry(name=name,
type='interconnect',
from_pin=pfrom['port'],
to_pin=pto['port'],
from_pin_edge=pfrom['port_edge'],
to_pin_edge=pto['port_edge'],
delay_paths=paths)
def add_iopath(pfrom, pto, paths):
name = "iopath_"
name += pfrom['port'] + "_" + pto['port']
return prepare_entry(name=name,
type='iopath',
from_pin=pfrom['port'],
to_pin=pto['port'],
from_pin_edge=pfrom['port_edge'],
to_pin_edge=pto['port_edge'],
delay_paths=paths)
def add_device(port, paths):
name = "device_"
name += port['port']
return prepare_entry(name=name,
type='device',
from_pin=port['port'],
to_pin=port['port'],
delay_paths=paths)
def add_tcheck(type, pto, pfrom, paths):
name = type + "_"
name += pfrom['port'] + "_" + pto['port']
return prepare_entry(name=name,
type=type,
is_timing_check=True,
is_cond=pfrom['cond'],
cond_equation=pfrom['cond_equation'],
from_pin=pfrom['port'],
to_pin=pto['port'],
from_pin_edge=pfrom['port_edge'],
to_pin_edge=pto['port_edge'],
delay_paths=paths)
def add_constraint(type, pto, pfrom, paths):
name = type + "_"
name += pfrom['port'] + "_" + pto['port']
return prepare_entry(name=name,
type=type,
is_timing_env=True,
from_pin=pfrom['port'],
to_pin=pto['port'],
from_pin_edge=pfrom['port_edge'],
to_pin_edge=pto['port_edge'],
delay_paths=paths)
|
the-stack_106_28052 | import numpy as np
from ..objects import ServerDataSource
try:
import scipy
import scipy.misc
except ImportError as e:
print(e)
def source(**kwargs):
kwargs['transform'] = {'resample':'heatmap'}
kwargs['data'] = {'x': [0],
'y': [0],
'global_x_range' : [0, 10],
'global_y_range' : [0, 10],
'global_offset_x' : [0],
'global_offset_y' : [0],
'dw' : [10],
'dh' : [10],
'palette': ["Spectral-11"]
}
return ServerDataSource(**kwargs)
def downsample(image, image_x_axis, image_y_axis,
x_bounds, y_bounds, x_resolution, y_resolution):
x_resolution, y_resolution = int(round(x_resolution)), int(round(y_resolution))
x_bounds = [x_bounds.start, x_bounds.end]
y_bounds = [y_bounds.start, y_bounds.end]
x_bounds = np.searchsorted(image_x_axis, x_bounds)
y_bounds = np.searchsorted(image_y_axis, y_bounds)
#y_bounds = image.shape[0] + 1 - y_bounds[::-1]
subset = image[y_bounds[0]:y_bounds[1],
x_bounds[0]:x_bounds[1]]
x_downsample_factor = max(round(subset.shape[1] / x_resolution / 3.), 1)
y_downsample_factor = max(round(subset.shape[0] / y_resolution / 3.), 1)
subset = subset[::x_downsample_factor, ::y_downsample_factor]
image = scipy.misc.imresize(subset, (x_resolution, y_resolution),
interp='nearest')
bounds = image_x_axis[x_bounds[0]:x_bounds[1]]
dw = np.max(bounds) - np.min(bounds)
bounds = image_y_axis[y_bounds[0]:y_bounds[1]]
dh = np.max(bounds) - np.min(bounds)
return {'data': image,
'offset_x': image_x_axis[x_bounds[0]],
'offset_y': image_y_axis[y_bounds[0]],
'dw': dw,
'dh': dh,
'subset': subset,
}
|
the-stack_106_28054 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from mpl_finance import candlestick_ohlc
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.arima_model import ARIMA
def load_csv_with_dates(file):
'''
Loads csv files with first column dates
'''
return pd.read_csv(file,
index_col=0,
parse_dates=True,
infer_datetime_format=True)
def get_apple_stock(corrected=True):
'''
Loads apple stock prices from Yahoo! finance
params:
corrected: if True it'll correct the missing row on 1981-08-10
'''
apple_stock = load_csv_with_dates('datasets/AAPL_yahoo-finance_19801212-20190531.csv')
# for the sake of simplicity I'm gonna drop Adj Close column
apple_stock.drop(columns='Adj Close', inplace=True)
if corrected == True:
apple_stock.loc['1981-08-10'] = (apple_stock.loc['1981-08-07'] + apple_stock.loc['1981-08-11']) / 2
return apple_stock
def get_apple_close_price():
'''
Will return a pandas Series with just Close price
'''
apple_stock = get_apple_stock()
return apple_stock['Close']
def get_range(series, start, end=None):
'''
Returns a range between start and end
params:
series: pandas DataFrame
start: string - starting date
end: string - end date
'''
if end is not None:
return series[(series.index >= start) & (series.index <= end)]
else:
return series[series.index >= start]
def train_test_split(series, day_split):
'''
Train/test split on a specific day
params:
series: pandas DataFrame
day_split: string - when to split
'''
train = series[series.index <= day_split]
test = series[series.index > day_split]
return train, test
def plot_field_over_time(series,
y='Close',
xlabel='Year',
ylabel=None,
ylegend=None,
title='',
figsize=(15, 6)):
'''
Plots a field (y) over time
'''
ax = series.reset_index().plot(x='Date',
y=y,
title=title,
figsize=figsize)
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if ylegend is not None:
ax.legend([ylegend])
def plot_candlestick(series, xlabel, ylabel, title='', figsize=(15, 6)):
fig, ax = plt.subplots(figsize=figsize)
candlestick_ohlc(ax,
zip(mdates.date2num(series.index.to_pydatetime()),
df['Open'],
df['High'],
df['Low'],
df['Close']),
width=0.6,
colorup='g')
ax.xaxis_date()
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
plt.show()
def adf_test(series):
'''
Perform Dickey-Fuller test
see: https://www.analyticsvidhya.com/blog/2018/09/non-stationary-time-series-python/
'''
print ('Results of Dickey-Fuller Test:')
dftest = adfuller(series, autolag='AIC')
dfoutput = pd.Series(dftest[0:4], index=[
'Test Statistic',
'p-value',
'Lags Used',
'Number of Observations Used'
])
for key, value in dftest[4].items():
dfoutput['Critical Value (%s)' % key] = value
print (dfoutput)
def plot_series(series, title='', legend=None, figsize=(15, 6)):
fig, ax = plt.subplots(figsize=figsize)
plt.plot(series)
ax.set_title(title)
if legend is not None:
ax.legend(legend)
def difference(series):
'''
Calculate the n-th order discrete difference
'''
return np.diff(series), series[0]
def inverse_difference(series, first_value):
'''
Does the inverse of difference
'''
return np.hstack((first_value, first_value+np.cumsum(series)))
def log_transform(series):
return np.log(series)
def inverse_log_transform(series):
return np.exp(series)
def rmse(preds, targets):
'''
Calculates Root Mean Square Error
preds: Series of predictions
targets: Series of real values
'''
return np.sqrt(((preds - targets)**2).mean())
def plot_walk_forward_validation(test, predictions, model_name='Model', size=1, steps=1):
fig, ax = plt.subplots(figsize=(15, 6))
plt.plot(test[:size])
plt.plot(predictions)
ax.set_title('{} - Walk forward validation - {} days, {} days prediction'.format(model_name,
size,
steps))
ax.legend(['Expected', 'Predicted'])
def split_sequence(seq, look_back, n_outputs=1):
'''
split a sequence into samples.
Example:
seq = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
look_back = 3
n_outputs = 2
X y
-------------------
[1, 2, 3] [4, 5]
[2, 3, 4] [5, 6]
[3, 4, 5] [6, 7]
[4, 5, 6] [7, 8]
[6, 7, 8] [9, 10]
'''
X, y = list(), list()
seq_len = len(seq)
for i in range(seq_len):
# find the end of this pattern
target_i = i + look_back
# check if we are beyond the sequence
if target_i + n_outputs > seq_len: break
# gather input and output parts of the pattern
seq_x, seq_y = seq[i:target_i], seq[target_i:target_i+n_outputs]
X.append(seq_x)
y.append(seq_y)
return np.array(X), np.array(y)
def ARIMA_walk_forward_validation(train, test, order, size=1, steps=1, debug=True):
'''
Performs a walk-forward validation on ARIMA model
params:
train: Series - train set
test: Series - test set
order: Tuple - order parameter of ARIMA model
size: Integer - amount of days we're gonna walk
steps: Integer - how many days we're gonna forecast
debug: Bool - prints debug prediction vs expected prices
'''
history = [x for x in train]
pred = list()
limit_range = len(test[:size])
for t in range(0, limit_range, steps):
model = ARIMA(history, order=order)
model_fit = model.fit(disp=0) # trains the model with the new history
output = model_fit.forecast(steps=steps) # make predictions
yhat = output[0]
pred = pred + yhat.tolist()
obs = test[t:t+steps]
history = history + obs.values.tolist()
history = history[len(obs.values):] # shift to forget the oldest prices
if debug == True:
print('predicted={}, expected={}'.format(yhat, obs.values))
return pred[:limit_range]
def NN_walk_forward_validation(model,
train, test,
size=1, look_back=1, n_outputs=1):
'''
Performs a walk-forward validation on a NN model
params:
model: NN model
train: Series - train set
test: Series - test set
size: Integer - amount of days we're gonna walk
look_back: Integer - amount of past days to forecast future ones
n_outputs: Integer - amount of days predicted (output of predictor)
'''
past = train.reshape(-1,).copy()
future = test.reshape(-1,)[:size]
predictions = list()
limit_range = len(future)
for t in range(0, limit_range, n_outputs):
x_input = past[-look_back:] # grab the last look_back days from the past
x_input = x_input.reshape(1, look_back, 1)
# predict the next n_outputs days
y_hat = model.predict(x_input)
predictions.append(y_hat.reshape(n_outputs,))
# add the next real days to the past
past = np.concatenate((past, future[t:t+n_outputs]))
if len(future[t:t+n_outputs]) == n_outputs:
X_batch = x_input
y_batch = future[t:t+n_outputs].reshape(-1, n_outputs)
# Time to re-train the model with the new non-seen days
model.train_on_batch(X_batch, y_batch)
return np.array(predictions).reshape(-1,)[:limit_range]
def NN_walk_forward_validation_v2(model,
train, test,
size=1,
look_back=1, n_features=1, n_outputs=1):
'''
Performs a walk-forward validation on a NN model
when there are multiple features
'''
past = train.copy()
future = test[:size]
predictions = list()
limit_range = len(future)
for t in range(0, limit_range, n_outputs):
x_input = past[-look_back:] # grab the last look_back days from the past
x_input = x_input.reshape(1, look_back, n_features)
# predict the next n_outputs days
y_hat = model.predict(x_input)
predictions.append(y_hat.reshape(n_outputs,))
# add the next real days to the past
past = np.concatenate((past, future[t:t+n_outputs]))
if len(future[t:t+n_outputs]) == n_outputs:
X_batch = x_input
y_batch = future[t:t+n_outputs]
y_batch = y_batch[:, 3].reshape(-1, n_outputs)
# Time to re-train the model with the new non-seen days
model.train_on_batch(X_batch, y_batch)
return np.array(predictions).reshape(-1,)[:limit_range]
def plot_columns(series, columns, fromTo=None, figsize=(15, 6)):
fig, ax = plt.subplots(figsize=figsize)
if fromTo is not None:
series = get_range(series, fromTo[0], fromTo[1])
for column in columns:
series.reset_index().plot(ax=ax, x='Date', y=column)
def freeze_layers(model, freeze=True):
for layer in model.layers:
layer.trainable = freeze
def calculate_forecast_error(preds, targets):
preds, targets = np.array(preds), np.array(targets)
return targets - preds
def plot_residual_forecast_error(preds, targets, figsize=(15, 6)):
forecast_errors = calculate_forecast_error(preds, targets)
fig, ax = plt.subplots(figsize=figsize)
plt.plot(forecast_errors)
plt.axhline(y=0, color='grey', linestyle='--', )
ax.set_title('Residual Forecast Error')
def mape(preds, targets):
return np.mean(np.abs((targets - preds)/targets)) * 100
def print_performance_metrics(preds, targets, total_days=21, steps=1, model_name=''):
'''
Prints a report with different metrics:
Inspired by
https://machinelearningmastery.com/time-series-forecasting-performance-measures-with-python/
'''
preds, targets = np.array(preds), np.array(targets)
forecast_errors = calculate_forecast_error(preds, targets)
print('%s[%d days, %d days forecast]:\n' % (model_name, total_days, steps))
print('Forecast Bias: %.3f' % (np.sum(forecast_errors)*1.0/len(targets)))
print('MAE: %.3f' % (np.mean(np.abs(forecast_errors))))
print('MSE: %.3f' % (np.mean(forecast_errors**2)))
print('RMSE: %.3f' % (rmse(preds, targets)))
print('MAPE: %.3f' % (mape(preds, targets)))
def descale_with_features(predictions,
test,
n_features,
scaler=None,
transformer=None,
pos_to_fillin=3):
'''
In order to be able to de-scale the price, we need to
create a table with n_features columns and place the
predicted Close price in position 3
'''
ret_preds = np.zeros((predictions.shape[0], n_features))
ret_preds[:, pos_to_fillin] = predictions
ret_test = test
if scaler is not None:
ret_preds = scaler.inverse_transform(ret_preds)
ret_test = scaler.inverse_transform(ret_test)
if transformer is not None:
ret_preds = transformer.inverse_transform(ret_preds)
ret_test = transformer.inverse_transform(ret_test)
return ret_preds[:, pos_to_fillin], ret_test |
the-stack_106_28056 | # -*- coding: utf-8 -*-
'''
Tools for Web Flayer
'''
# Python
import os
import re
import sys
import time
import random
import pprint
import urllib
# 3rd party
import requests
from termcolor import colored
import psycopg2
from psycopg2.extras import Json
from bs4 import BeautifulSoup
# Internal
import flayer.event
class Output(object):
'''
Used for outputting data
'''
def __init__(self, opts):
'''
Initialize
'''
self.opts = opts
def action(self, msg, force=False):
'''
Something is currently happening
'''
if not self.opts['daemon'] or force is True:
print(colored(msg, self.opts.get('action_color', 'green')))
def info(self, msg, force=False):
'''
Informational only
'''
if not self.opts['daemon'] or force is True:
print(colored(msg, self.opts.get('info_color', 'cyan')))
def warn(self, msg, force=False):
'''
Something is possibly wrong, but not enough to stop running
'''
if not self.opts['daemon'] or force is True:
print(colored(msg, self.opts.get('warn_color', 'yellow')))
def error(self, msg, force=False):
'''
Something is wrong enough to halt execution
'''
if not self.opts['daemon'] or force is True:
print(colored(msg, self.opts.get('error_color', 'red'), attrs=['bold']))
def process_url(url_uuid, url, content, parsers):
'''
Process a URL
'''
fun = None
for mod in parsers:
if fun is not None:
break
if not mod.endswith('.func_map'):
continue
fun = parsers[mod](url)
fun(url_uuid, url, content)
def get_url(
url,
parent=None,
referer=None,
dbclient=None,
client=requests,
opts=None,
context=None,
):
'''
Download a URL (if necessary) and store it
'''
out = Output(opts)
headers = opts['headers'].copy()
data = opts.get('data', None)
if referer:
headers['referer'] = referer
if flayer.db.check_domain_wait(dbclient, url) is False:
# We need to put this URL back into the queue
queue_urls([url], dbclient, opts)
flayer.db.pattern_wait(dbclient, url)
flayer.db.set_domain_wait(dbclient, opts, url)
wait = 0
if opts.get('no_db_cache') is True:
# Skip all the DB stuff and just download the URL
req = client.request(
opts['method'],
url,
headers=headers,
data=data,
verify=bool(opts.get('verify', True)),
)
req.raise_for_status()
if opts.get('include_headers') is True:
out.info(pprint.pformat(dict(req.headers)))
content = req.text
if opts['random_wait'] is True:
wait = int(opts.get('wait', 10))
time.sleep(random.randrange(1, wait))
if url not in opts['warned']:
opts['warned'].append(url)
return 0, content
cur = dbclient.cursor()
exists = False
# Check for URL in DB
cur.execute('''
SELECT uuid, url, last_retrieved
FROM urls
WHERE url = %s
''', [url])
if cur.rowcount < 1:
# URL has never been retrieved
cur.execute('''
INSERT INTO urls
(url) VALUES (%s)
RETURNING uuid
''', [url])
dbclient.commit()
url_uuid = cur.fetchone()[0]
out.action('{} has not been retrieved before, new UUID is {}'.format(url, url_uuid))
else:
# URL has been retrieved, get its UUID
url_uuid = cur.fetchone()[0]
out.warn('{} exists, UUID is {}'.format(url, url_uuid))
exists = True
if url not in opts['warned']:
opts['warned'].append(url)
# Save referer relationships
if parent:
try:
cur.execute('''
INSERT INTO referers
(url_uuid, referer_uuid)
VALUES
(%s, %s)
''', [url_uuid, parent])
dbclient.commit()
except psycopg2.IntegrityError:
# This relationship already exists
dbclient.rollback()
if opts['force_directories'] and not opts['save_path']:
opts['save_path'] = '.'
# Check for content
cur.execute('''
SELECT data, uuid
FROM content
WHERE url_uuid = %s
ORDER BY retrieved
LIMIT 1
''', [url_uuid])
if cur.rowcount < 1:
try:
if opts['save_path']:
req = client.request(
opts['method'],
url,
headers=headers,
data=data,
verify=bool(opts.get('verify', True)),
stream=True,
)
content, req_headers = _save_path(url, url_uuid, req, wait, opts, context, dbclient)
else:
req = client.request(
opts['method'],
url,
headers=headers,
data=data,
verify=bool(opts.get('verify', True)),
)
content = req.text
req_headers = req.headers
except requests.exceptions.ConnectionError as exc:
out.error('Error downloading {}:'.format(url))
out.error(exc)
return 0, ''
except requests.exceptions.InvalidSchema as exc:
out.error('Error downloading {}:'.format(url))
out.error(exc)
return 0, ''
if url not in opts['warned']:
opts['warned'].append(url)
if opts.get('include_headers') is True:
out.info(pprint.pformat(dict(req_headers)))
if content:
cur.execute('''
INSERT INTO content
(url_uuid, data) VALUES (%s, %s)
''',
[
url_uuid,
Json({
'content': content.replace('\x00', ''),
'status': req.status_code,
})
]
)
dbclient.commit()
else:
if opts['force'] is True:
row_id = cur.fetchone()[1]
if opts['save_path']:
req = client.request(
opts['method'],
url,
headers=headers,
data=data,
verify=bool(opts.get('verify', True)),
stream=True,
)
content, req_headers = _save_path(url, url_uuid, req, wait, opts, context, dbclient)
else:
req = client.request(
opts['method'],
url,
headers=headers,
data=data,
verify=bool(opts.get('verify', True)),
)
content = req.text
req_headers = req.headers
if url not in opts['warned']:
opts['warned'].append(url)
if opts.get('include_headers') is True:
out.info(pprint.pformat(dict(req_headers)))
if content:
cur.execute('''
UPDATE content
SET url_uuid = %s, data = %s
WHERE uuid = %s
''',
[
url_uuid,
Json({'content': content}),
row_id
]
)
dbclient.commit()
else:
content = cur.fetchone()[0]['content']
flayer.db.pattern_wait(dbclient, url)
flayer.db.set_domain_wait(dbclient, opts, url)
if exists is False:
if opts['random_wait'] is True:
wait = int(opts.get('wait', 10))
time.sleep(random.randrange(1, wait))
return url_uuid, content
def _save_path(url, url_uuid, req, wait, opts, context, dbclient):
'''
Save the URL to a path
'''
urlcomps = urllib.parse.urlparse(url)
if opts['force_directories']:
newpath = urlcomps[2].lstrip('/')
file_name = os.path.join(opts['save_path'], urlcomps[1], newpath)
else:
file_name = os.path.join(opts['save_path'], urlcomps[2].split('/')[-1])
return status(req, url, url_uuid, file_name, wait, opts, context, dbclient)
def status(
req,
media_url,
url_uuid,
file_name,
wait=0,
opts=None,
context=None,
dbclient=None,
):
'''
Show status of the download
'''
out = Output(opts)
if opts is None:
opts = {}
if context is None:
context = {}
file_name = _rename(media_url, file_name, opts)
cache_dir = '/'.join(file_name.split('/')[:-1])
try:
os.makedirs(cache_dir, mode=0o0755, exist_ok=True)
except PermissionError as exc:
out.error('Cannot create directory {}: {}'.format(cache_dir, exc))
is_text = False
req_headers = req.headers
for header in list(req_headers):
if header.lower().startswith('content-type'):
if req_headers[header].startswith('text'):
is_text = True
content = ''
cur = dbclient.cursor()
agent_id = opts.get('id', 'unknown')
cur.execute(
'INSERT INTO active_dl (url_uuid, started_by) VALUES (%s, %s)',
[url_uuid, agent_id]
)
cur.execute('SELECT url FROM urls WHERE uuid = %s', [url_uuid])
root_url = cur.fetchone()[0]
flayer.db.pattern_wait(dbclient, media_url)
flayer.db.set_domain_wait(dbclient, opts, media_url)
out.action('Downloading: {}'.format(media_url))
if os.path.exists(file_name):
if opts['overwrite']:
out.warn('... {} exists, overwriting'.format(file_name))
else:
out.warn('... {} exists, skipping'.format(file_name))
return None, {}
if not opts['daemon']:
sys.stdout.write(colored('...Saving to: ', 'green'))
out.info(file_name)
buffer_size = 4096
total = int(req.headers.get('Content-Length', 0))
count = 0
try:
point = int(total / 100)
#increment = int(total / buffer_size)
except ZeroDivisionError:
out.error('Divide by zero error, status not available')
point = 0
#increment = 0
start_time = time.time()
last_time = time.time()
delay_blocks = 0
delay_count = 0
context['dl_data'] = {
'url': root_url,
'media_url': media_url,
'url_uuid': url_uuid,
'bytes_total': '',
'bytes_elapsed': '',
'time_total': '',
'time_left': '',
'kbsec': 0,
}
flayer.event.fire('flayer/{}/download'.format(opts['id']), {root_url: 'started'}, opts)
try:
with open(file_name, 'wb') as fhp:
#old_time = time.time()
try:
for block in req.iter_content(buffer_size):
if opts.get('hard_stop'):
queue_urls([media_url], dbclient, opts)
break
if opts.get('abort'):
break
if is_text is True:
content += str(block)
fhp.write(block)
count += buffer_size
delay_blocks += buffer_size
delay_count += 1
#old_time = time.time()
time_delay = time.time() - last_time
if time_delay >= float(1):
last_time = time.time()
try:
blocks_left = int((total - count) / buffer_size)
except ZeroDivisionError:
blocks_left = 0
kbsec = (buffer_size / 1024) * delay_count
try:
seconds_left = ((blocks_left * buffer_size) / 1024) / kbsec
except ZeroDivisionError:
seconds_left = 0
minutes_left = int(seconds_left / 60)
minsecs_left = seconds_left % 60
time_left = '%d:%02d' % (minutes_left, minsecs_left)
seconds_elapsed = time.time() - start_time
seconds_total = seconds_elapsed + seconds_left
minutes_total = int(seconds_total / 60)
minsecs_total = int(seconds_total % 60)
time_total = '%d:%02d' % (minutes_total, minsecs_total)
try:
percent = int(count / point)
except ZeroDivisionError:
percent = 0
context['dl_data']['bytes_total'] = total # pylint: disable=bad-whitespace
context['dl_data']['bytes_elapsed'] = count # pylint: disable=bad-whitespace
context['dl_data']['time_total'] = time_total # pylint: disable=bad-whitespace
context['dl_data']['time_left'] = time_left # pylint: disable=bad-whitespace
context['dl_data']['kbsec'] = kbsec # pylint: disable=bad-whitespace
if not opts['daemon']:
sys.stdout.write('\x1b[2K\r')
sys.stdout.write(
colored('Total size is {} '.format(sizeof_fmt(total)), 'green'))
sys.stdout.write(colored('({} bytes), '.format(total), 'green'))
sys.stdout.write(colored('{}%, '.format(str(percent)), 'cyan'))
sys.stdout.write(colored(kbsec, 'cyan'))
sys.stdout.write(colored(' KiB/s, ', 'cyan'))
sys.stdout.write(colored('{}/{} left'.format(time_left, time_total), 'cyan'))
sys.stdout.flush()
delay_blocks = 0
delay_count = 0
except OSError as exc:
out.error('OS Error: {}'.format(exc))
out.error('Media URL: {}'.format(media_url))
except ProtocolError as exc:
out.error('Protocol Error: {}'.format(exc))
out.error('Media URL: {}'.format(media_url))
except Exception as exc:
out.error('Exception: {}'.format(exc))
out.error('Media URL: {}'.format(media_url))
except OSError as exc:
out.error('There was an error opening {}: {}'.format(file_name, exc))
del context['dl_data']
if opts.get('hard_stop') or opts.get('abort'):
os.remove(file_name)
if is_text is True and opts.get('save_html', True) is False:
os.remove(file_name)
if not content:
content = None
cur.execute('DELETE FROM active_dl WHERE url_uuid = %s', [url_uuid])
flayer.event.fire('flayer/{}/download'.format(opts['id']), {root_url: 'complete'}, opts)
flayer.db.pattern_wait(dbclient, media_url)
flayer.db.set_domain_wait(dbclient, opts, media_url)
if not opts['daemon']:
print()
time.sleep(wait)
return content, req_headers
def sizeof_fmt(num, suffix='B'):
'''
Show human-readable sizes
'''
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s " % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s " % (num, 'Yi', suffix)
def dbsave_media(cur, media_url, url_uuid, file_name, dbclient):
'''
Save a media item into the database, once it's been downloaded
cur: Database cursor
media_url: The URL of the image/video that was downloaded
url_uuid: The UUID of the parent of the media_url
file_name: The place where the media_url was downloaded to
'''
try:
cur.execute('''
INSERT INTO urls (url) values (%s) RETURNING uuid
''', [media_url])
dbclient.commit()
new_id = cur.fetchone()[0]
except psycopg2.IntegrityError:
# This relationship already exists
dbclient.rollback()
cur.execute('''
SELECT uuid FROM urls WHERE url = %s
''', [media_url])
new_id = cur.fetchone()[0]
try:
cur.execute('''
INSERT INTO referers (url_uuid, referer_uuid) values (%s, %s)
''', [new_id, url_uuid])
dbclient.commit()
except psycopg2.IntegrityError:
# This relationship already exists
dbclient.rollback()
cur.execute('''
SELECT COUNT(*) FROM content WHERE url_uuid = %s
''', [new_id])
if cur.fetchone()[0] < 1:
cur.execute('''
INSERT INTO content
(url_uuid, cache_path)
VALUES
(%s, %s)
''', [new_id, file_name])
dbclient.commit()
def queue_urls(links, dbclient, opts):
'''
Check the database for any queued URLS, and add to the list
'''
out = Output(opts)
cur = dbclient.cursor()
if isinstance(links, str):
links = [links]
for url in links:
if opts.get('force') is not True and not opts.get('queue_id'):
# Check for URL in DB
cur.execute('''
SELECT uuid
FROM urls
WHERE url = %s
''', [url])
if cur.rowcount > 0:
if url not in opts['warned']:
out.info('URL has already been downloaded; use --force if necessary')
else:
if url not in opts['warned']:
opts['warned'].append(url)
continue
fields = ['url']
args = [url]
if opts.get('queue_id') is not None:
fields.append('uuid')
args.append(opts['queue_id'])
if 'refresh_interval' in opts:
fields.append('refresh_interval')
args.append(opts['refresh_interval'])
if 'overwrite' not in opts:
opts['overwrite'] = False
fields.append('overwrite')
args.append(opts['overwrite'])
query = 'INSERT INTO dl_queue ({}) VALUES ({})'.format(
', '.join(fields),
', '.join(['%s' for arg in range(len(args))])
)
try:
cur.execute(query, args)
dbclient.commit()
except psycopg2.IntegrityError:
# This URL is already queued
dbclient.rollback()
cur.execute('SELECT count(*) FROM dl_queue')
return cur.fetchone()[0]
def reprocess_urls(urls, patterns, dbclient=None):
'''
Reprocess the cached URLs which matches the pattern(s)
'''
if not urls:
urls = []
if isinstance(patterns, str):
patterns = [patterns]
cur = dbclient.cursor()
wheres = ['url~%s'] * len(patterns)
query = 'SELECT url FROM urls WHERE {}'.format(' OR '.join(wheres))
cur.execute(query, patterns)
for row in cur.fetchall():
urls.append(row[0])
return urls
def queue_regexp(urls, pattern, dbclient, opts):
'''
Add the URLs matching the pattern to the download queue
'''
expr = re.compile(pattern)
links = []
for url in urls:
if expr.search(url):
links.append(url)
queue_urls(links, dbclient, opts)
def _rename(media_url, file_name, opts):
'''
When files are downloaded using status, rename as per a template
'''
out = Output(opts)
template = opts.get('rename_template', '')
if not template:
return file_name
urlcomps = urllib.parse.urlparse(media_url)
replacements = {
'host': urlcomps[1].split(':')[0],
'path': '/'.join(urlcomps[2].split('/')[:-2])
}
# File extensions
if '.' in urlcomps[2].split('/')[-1]:
replacements['ext'] = urlcomps[2].split('/')[-1].split('.')[-1]
else:
replacements['ext'] = ''
if not opts.get('rename_count'):
opts['rename_count'] = opts.get('rename_count_start', 0)
if opts.get('rename_count_padding'):
try:
opts['rename_count_padding'] = int(opts['rename_count_padding'])
except ValueError:
out.warn('--rename-count-padding must be an integer, using 0')
opts['rename_count_padding'] = 0
template = template.replace('{count}', '{count:0>{rename_count_padding}}')
replacements['rename_count_padding'] = opts['rename_count_padding']
replacements['count'] = str(opts['rename_count'])
opts['rename_count'] += 1
file_name = os.path.join(opts['save_path'], template.format(**replacements))
return file_name
def parse_links(url, content, level, opts):
'''
Return the links from an HTML page
'''
out = Output(opts)
hrefs = []
try:
# Get ready to do some html parsing
soup = BeautifulSoup(content, 'html.parser')
# Generate absolute URLs for every link on the page
url_comps = urllib.parse.urlparse(url)
tags = soup.find_all('a')
if opts['search_src'] is True:
tags = tags + soup.find_all(src=True)
for link in tags:
if level > int(opts['level']):
continue
href = urllib.parse.urljoin(url, link.get('href'))
if opts['search_src'] is True and not link.get('href'):
href = urllib.parse.urljoin(url, link.get('src'))
link_comps = urllib.parse.urlparse(href)
if link.text.startswith('javascript'):
continue
if int(opts.get('level', 0)) > 0 and int(opts.get('level', 0)) < 2:
continue
if opts['span_hosts'] is not True:
if not link_comps[1].startswith(url_comps[1].split(':')[0]):
continue
hrefs.append(href.split('#')[0])
# Render the page, and print it along with the links
if opts.get('render', False) is True:
out.info(soup.get_text())
return hrefs
except TypeError:
# This URL probably isn't HTML
return []
|
the-stack_106_28059 | """Unit test for user usage."""
import os
from unittest import TestCase
import requests_mock
from src.app import send_usage_statistics
class TestUserUsage(TestCase):
"""Unit test class to test method send_user_usage."""
TEST_URL = 'https://test.zalan.do'
@requests_mock.Mocker()
def test_send_usage_statistics(self, mocked_request):
os.environ['SEND_ANONYMOUS_USAGE_INFO'] = str(True)
os.environ['DL_IMAGE_VERSION'] = '1.0'
platforms = {
'APPLICATION_ID': 'tip-locust',
'BUILD_URL': 'https://tip.ci.zalan.do/job/test-tracker/1/',
'CDP_TARGET_REPOSITORY': 'github.bus.zalan.do/butomo/test-tracker',
'Local': 'fakeenv'
}
for k, v in platforms.items():
os.environ[k] = v
mocked_request.post(url='https://www.google-analytics.com/collect', text='ok')
send_usage_statistics(self.TEST_URL)
self.assertTrue(mocked_request.called)
del os.environ[k]
def tearDown(self):
env_keys = ['SEND_ANONYMOUS_USAGE_INFO', 'DL_IMAGE_VERSION']
for k in env_keys:
if os.getenv(k):
del os.environ[k]
|
the-stack_106_28060 | from past.builtins import basestring
from django.db.models.signals import post_delete, post_save
from django.http import Http404, HttpResponseBadRequest
from celery.result import AsyncResult
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.exceptions import ParseError
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.settings import api_settings
from rest_framework.viewsets import ModelViewSet
from onadata.apps.api.permissions import DataViewViewsetPermissions
from onadata.apps.api.tools import get_baseviewset_class
from onadata.apps.logger.models.data_view import DataView
from onadata.apps.viewer.models.export import Export
from onadata.libs.mixins.authenticate_header_mixin import \
AuthenticateHeaderMixin
from onadata.libs.mixins.cache_control_mixin import CacheControlMixin
from onadata.libs.mixins.etags_mixin import ETagsMixin
from onadata.libs.renderers import renderers
from onadata.libs.serializers.data_serializer import JsonDataSerializer
from onadata.libs.serializers.dataview_serializer import DataViewSerializer
from onadata.libs.serializers.xform_serializer import XFormSerializer
from onadata.libs.utils import common_tags
from onadata.libs.utils.api_export_tools import (custom_response_handler,
export_async_export_response,
include_hxl_row,
process_async_export,
response_for_format)
from onadata.libs.utils.cache_tools import (PROJECT_LINKED_DATAVIEWS,
safe_delete)
from onadata.libs.utils.chart_tools import (get_chart_data_for_field,
get_field_from_field_name)
from onadata.libs.utils.export_tools import str_to_bool
from onadata.libs.utils.model_tools import get_columns_with_hxl
BaseViewset = get_baseviewset_class()
def get_form_field_chart_url(url, field):
return u'%s?field_name=%s' % (url, field)
class DataViewViewSet(AuthenticateHeaderMixin,
CacheControlMixin, ETagsMixin, BaseViewset,
ModelViewSet):
"""
A simple ViewSet for viewing and editing DataViews.
"""
queryset = DataView.objects.select_related()
serializer_class = DataViewSerializer
permission_classes = [DataViewViewsetPermissions]
lookup_field = 'pk'
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES + [
renderers.XLSRenderer,
renderers.XLSXRenderer,
renderers.CSVRenderer,
renderers.CSVZIPRenderer,
renderers.SAVZIPRenderer,
renderers.ZipRenderer,
]
def get_serializer_class(self):
if self.action == 'data':
serializer_class = JsonDataSerializer
else:
serializer_class = self.serializer_class
return serializer_class
@action(methods=['GET'], detail=True)
def data(self, request, format='json', **kwargs):
"""Retrieve the data from the xform using this dataview"""
start = request.GET.get("start")
limit = request.GET.get("limit")
count = request.GET.get("count")
sort = request.GET.get("sort")
query = request.GET.get("query")
export_type = self.kwargs.get('format', request.GET.get("format"))
self.object = self.get_object()
if export_type is None or export_type in ['json', 'debug']:
data = DataView.query_data(self.object, start, limit,
str_to_bool(count), sort=sort,
filter_query=query)
if 'error' in data:
raise ParseError(data.get('error'))
serializer = self.get_serializer(data, many=True)
return Response(serializer.data)
else:
return custom_response_handler(request, self.object.xform, query,
export_type,
dataview=self.object)
@action(methods=['GET'], detail=True)
def export_async(self, request, *args, **kwargs):
params = request.query_params
job_uuid = params.get('job_uuid')
export_type = params.get('format')
include_hxl = params.get('include_hxl', False)
include_labels = params.get('include_labels', False)
include_labels_only = params.get('include_labels_only', False)
query = params.get("query")
dataview = self.get_object()
xform = dataview.xform
if include_labels is not None:
include_labels = str_to_bool(include_labels)
if include_labels_only is not None:
include_labels_only = str_to_bool(include_labels_only)
if include_hxl is not None:
include_hxl = str_to_bool(include_hxl)
remove_group_name = params.get('remove_group_name', False)
columns_with_hxl = get_columns_with_hxl(xform.survey.get('children'))
if columns_with_hxl and include_hxl:
include_hxl = include_hxl_row(
dataview.columns, list(columns_with_hxl)
)
options = {
'remove_group_name': remove_group_name,
'dataview_pk': dataview.pk,
'include_hxl': include_hxl,
'include_labels': include_labels,
'include_labels_only': include_labels_only
}
if query:
options.update({'query': query})
if job_uuid:
job = AsyncResult(job_uuid)
if job.state == 'SUCCESS':
export_id = job.result
export = Export.objects.get(id=export_id)
resp = export_async_export_response(request, export)
else:
resp = {
'job_status': job.state
}
else:
resp = process_async_export(request, xform, export_type,
options=options)
return Response(data=resp,
status=status.HTTP_202_ACCEPTED,
content_type="application/json")
@action(methods=['GET'], detail=True)
def form(self, request, format='json', **kwargs):
dataview = self.get_object()
xform = dataview.xform
if format not in ['json', 'xml', 'xls']:
return HttpResponseBadRequest('400 BAD REQUEST',
content_type='application/json',
status=400)
filename = xform.id_string + "." + format
response = response_for_format(xform, format=format)
response['Content-Disposition'] = 'attachment; filename=' + filename
return response
@action(methods=['GET'], detail=True)
def form_details(self, request, *args, **kwargs):
dataview = self.get_object()
xform = dataview.xform
serializer = XFormSerializer(xform, context={'request': request})
return Response(data=serializer.data,
content_type="application/json")
@action(methods=['GET'], detail=True)
def charts(self, request, *args, **kwargs):
dataview = self.get_object()
xform = dataview.xform
serializer = self.get_serializer(dataview)
field_name = request.query_params.get('field_name')
field_xpath = request.query_params.get('field_xpath')
fmt = kwargs.get('format', request.accepted_renderer.format)
group_by = request.query_params.get('group_by')
if field_name:
field = get_field_from_field_name(field_name, xform)
field_xpath = field_name if isinstance(field, basestring) \
else field.get_abbreviated_xpath()
if field_xpath and field_xpath not in dataview.columns and \
field_xpath not in [common_tags.SUBMISSION_TIME,
common_tags.SUBMITTED_BY,
common_tags.DURATION]:
raise Http404(
"Field %s does not not exist on the dataview" % field_name)
if field_name or field_xpath:
data = get_chart_data_for_field(
field_name, xform, fmt, group_by, field_xpath,
data_view=dataview
)
return Response(data, template_name='chart_detail.html')
if fmt != 'json' and field_name is None:
raise ParseError("Not supported")
data = serializer.data
data["fields"] = {}
for field in xform.survey_elements:
field_xpath = field.get_abbreviated_xpath()
if field_xpath in dataview.columns:
url = reverse('dataviews-charts', kwargs={'pk': dataview.pk},
request=request, format=fmt)
field_url = get_form_field_chart_url(url, field.name)
data["fields"][field.name] = field_url
return Response(data)
@action(methods=['GET'], detail=True)
def xls_export(self, request, *args, **kwargs):
dataview = self.get_object()
xform = dataview.xform
token = None
export_type = "xls"
query = request.query_params.get("query", {})
meta = request.GET.get('meta')
return custom_response_handler(request,
xform,
query,
export_type,
token,
meta,
dataview)
def destroy(self, request, *args, **kwargs):
dataview = self.get_object()
user = request.user
dataview.soft_delete(user)
return Response(status=status.HTTP_204_NO_CONTENT)
def dataview_post_save_callback(sender, instance=None, created=False,
**kwargs):
safe_delete('{}{}'.format(PROJECT_LINKED_DATAVIEWS, instance.project.pk))
def dataview_post_delete_callback(sender, instance, **kwargs):
if instance.project:
safe_delete('{}{}'.format(PROJECT_LINKED_DATAVIEWS,
instance.project.pk))
post_save.connect(dataview_post_save_callback,
sender=DataView,
dispatch_uid='dataview_post_save_callback')
post_delete.connect(dataview_post_delete_callback,
sender=DataView,
dispatch_uid='dataview_post_delete_callback')
|
the-stack_106_28063 | # Configuration file for the Sphinx_PyAEDT documentation builder.
# -- Project information -----------------------------------------------------
import sys
import os
import pathlib
import warnings
import pyvista
import numpy as np
import json
from sphinx_gallery.sorting import FileNameSortKey
local_path = os.path.dirname(os.path.realpath(__file__))
module_path = pathlib.Path(local_path)
root_path = module_path.parent.parent
sys.path.append(os.path.abspath(os.path.join(local_path)))
sys.path.append(os.path.join(root_path))
sys.path.append(os.path.join(root_path))
project = "PyAEDT"
copyright = "(c) 2021 ANSYS, Inc. All rights reserved"
author = "Ansys Inc."
# Check for the local config file, otherwise use default desktop configuration
local_config_file = os.path.join(local_path, "local_config.json")
if os.path.exists(local_config_file):
with open(local_config_file) as f:
config = json.load(f)
else:
config = {"run_examples": True}
# read in version from file
with open(os.path.join(root_path, "pyaedt", "version.txt"), "r") as f:
release = version = f.readline()
# -- General configuration ---------------------------------------------------
# Add any Sphinx_PyAEDT extension module names here, as strings. They can be
# extensions coming with Sphinx_PyAEDT (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.intersphinx",
"sphinx.ext.autodoc",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx_copybutton",
"recommonmark",
"sphinx.ext.graphviz",
"sphinx.ext.mathjax",
"sphinx.ext.inheritance_diagram",
"numpydoc",
]
# Intersphinx mapping
intersphinx_mapping = {
"python": ("https://docs.python.org/dev", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference", None),
"numpy": ("https://numpy.org/devdocs", None),
"matplotlib": ("https://matplotlib.org/stable", None),
"imageio": ("https://imageio.readthedocs.io/en/stable", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable", None),
"pytest": ("https://docs.pytest.org/en/stable", None),
}
# numpydoc configuration
numpydoc_use_plots = True
numpydoc_show_class_members = False
numpydoc_xref_param_type = True
numpydoc_validate = True
numpydoc_validation_checks = {
# general
"GL06", # Found unknown section
"GL07", # Sections are in the wrong order.
"GL08", # The object does not have a docstring
"GL09", # Deprecation warning should precede extended summary
"GL10", # reST directives {directives} must be followed by two colons
# Summary
"SS01", # No summary found
"SS02", # Summary does not start with a capital letter
"SS03", # Summary does not end with a period
"SS04", # Summary contains heading whitespaces
"SS05", # Summary must start with infinitive verb, not third person
# Parameters
"PR10", # Parameter "{param_name}" requires a space before the colon '
# separating the parameter name and type",
}
numpydoc_validation_exclude = { # set of regex
r"\.AEDTMessageManager.add_message$", # bad SS05
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# disable generating the sphinx nested documentation
if "PYAEDT_CI_NO_AUTODOC" in os.environ:
templates_path.clear()
# Copy button customization ---------------------------------------------------
# exclude traditional Python prompts from the copied code
copybutton_prompt_text = r">>> ?|\.\.\. "
copybutton_prompt_is_regexp = True
# The language for content autogenerated by Sphinx_PyAEDT. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "Python"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "sphinx_boogergreen_theme_1", "Thumbs.db", ".DS_Store", "*.txt"]
inheritance_graph_attrs = dict(rankdir="RL", size='"8.0, 10.0"', fontsize=14, ratio="compress")
inheritance_node_attrs = dict(shape="ellipse", fontsize=14, height=0.75, color="dodgerblue1", style="filled")
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
source_suffix = {".rst": "restructuredtext", ".md": "markdown"}
# The master toctree document.
master_doc = "index"
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# Manage errors
pyvista.set_error_output_file("errors.txt")
# Ensure that offscreen rendering is used for docs generation
pyvista.OFF_SCREEN = True
# Preferred plotting style for documentation
# pyvista.set_plot_theme('document')
# must be less than or equal to the XVFB window size
pyvista.rcParams["window_size"] = np.array([1024, 768])
# Save figures in specified directory
pyvista.FIGURE_PATH = os.path.join(os.path.abspath("./images/"), "auto-generated/")
if not os.path.exists(pyvista.FIGURE_PATH):
os.makedirs(pyvista.FIGURE_PATH)
# gallery build requires AEDT install
if os.name != "posix" and "PYAEDT_CI_NO_EXAMPLES" not in os.environ:
# suppress annoying matplotlib bug
warnings.filterwarnings(
"ignore",
category=UserWarning,
message="Matplotlib is currently using agg, which is a non-GUI backend, so cannot show the figure.",
)
# necessary for pyvista when building the sphinx gallery
pyvista.BUILDING_GALLERY = True
if config["run_examples"]:
extensions.append("sphinx_gallery.gen_gallery")
sphinx_gallery_conf = {
# convert rst to md for ipynb
"pypandoc": True,
# path to your examples scripts
"examples_dirs": ["../../examples/"],
# path where to save gallery generated examples
"gallery_dirs": ["examples"],
# Patter to search for examples files
"filename_pattern": r"\.py",
# Remove the "Download all examples" button from the top level gallery
"download_all_examples": False,
# Sort gallery examples by file name instead of number of lines (default)
"within_subsection_order": FileNameSortKey,
# directory where function granular galleries are stored
"backreferences_dir": None,
# Modules for which function level galleries are created. In
"doc_module": "ansys-mapdl-core",
"image_scrapers": ("pyvista", "matplotlib"),
"ignore_pattern": "flycheck*",
"thumbnail_size": (350, 350),
# 'first_notebook_cell': ("%matplotlib inline\n"
# "from pyvista import set_plot_theme\n"
# "set_plot_theme('document')"),
}
# -- Options for HTML output -------------------------------------------------
html_show_sourcelink = True
html_theme = "pyansys_sphinx_theme"
html_logo = "https://docs.pyansys.com/_static/pyansys-logo-black-cropped.png"
html_theme_options = {
"github_url": "https://github.com/pyansys/PyAEDT",
"show_prev_next": False,
"logo_link": "https://aedtdocs.pyansys.com/" # navigate to the main page
}
html_static_path = ["_static"]
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "pyaedtdoc"
|
the-stack_106_28067 | import os # 環境変数を設定する為に、 os モジュールをインポート
import sys
from PySide2.QtWidgets import QApplication
from PySide2.QtQml import QQmlApplicationEngine
from PySide2.QtCore import QUrl
def main():
""" 環境変数に Qt Quick Controls 2 のコンフィグファイル設定 を追加する
環境変数 QT_QUICK_CONTROLS_CONF に対して、本 Code と同じ
ディレクトリにある qtquickcontrols2.conf
( Qt Quick Controls 2 の Configuration File ファイル)
を設定
"""
os.environ["QT_QUICK_CONTROLS_CONF"] = "qtquickcontrols2.conf"
app = QApplication([])
engine = QQmlApplicationEngine()
url = QUrl("../Ui/Main.qml")
engine.load(url)
if not engine.rootObjects():
sys.exit(-1)
""" QMLのrootオブジェクトのtitle プロパティを変更
QQmlApplicationEngine経由で、rootObjects()を参照し
setProperty()でセットする
See : http://doc.qt.io/qt-5/qtqml-cppintegration-interactqmlfromcpp.html
https://doc.qt.io/qtforpython/overviews/properties.html#reading-and-writing-properties-with-the-meta-object-system
"""
root = engine.rootObjects()[0]
root.setProperty("title", "Style-Imagine")
ret = app.exec_()
sys.exit(ret)
if __name__ == '__main__':
main() |
the-stack_106_28068 | """This module contains rules for the network genrules."""
load("//lib/bazel:c_rules.bzl", "makani_c_library")
# This is a genrule for files that use network.yaml as a source.
def makani_network_genrule(**kwargs):
kwargs["cmd"] = " ".join([
"$(location %s) --autogen_root=$(GENDIR)" % kwargs["tools"][0],
"--output_dir=$(GENDIR)/avionics/network",
"--network_file=$(location network.yaml)",
])
kwargs["srcs"] = ["network.yaml"]
native.genrule(**kwargs)
def makani_cvt_genrule(
name,
all_nodes = False,
all_q7s = False,
all_tms570s = False,
aio_labels = "",
aio_nodes = "",
**kwargs):
cvt_deps = [
"//avionics/common:cvt",
"//avionics/common:cvt_entries",
"//avionics/common:pack_avionics_messages",
"//avionics/network:aio_node",
"//avionics/network:message_type",
"//common:macros",
]
makani_c_library(
name = name,
srcs = [
"cvt_entries_" + name + ".c",
],
deps = select({
"//lib/bazel:tms570_mode": cvt_deps,
"//conditions:default": cvt_deps + [
"//control:pack_control_telemetry",
"//control:pack_ground_telemetry",
"//sim:pack_sim_messages",
"//sim:pack_sim_telemetry",
],
}),
)
native.genrule(
name = "cvt_entries_" + name + "_genrule",
outs = ["cvt_entries_" + name + ".c"],
tools = ["//avionics/network:generate_cvt"],
srcs = ["network.yaml"],
cmd = " ".join([
"$(location //avionics/network:generate_cvt)",
"--all_nodes=" + str(all_nodes),
"--all_q7s=" + str(all_q7s),
"--all_tms570s=" + str(all_tms570s),
"--aio_labels=" + aio_labels,
"--aio_nodes=" + aio_nodes,
"--autogen_root=$(GENDIR)",
"--output_dir=$(GENDIR)/avionics/network",
"--output_source=cvt_entries_" + name + ".c",
"--network_file=$(location network.yaml)",
]),
**kwargs
)
|
the-stack_106_28069 | def add_node(v):
global node_count
if v in nodes:
print("Node already exists")
else:
node_count+=1
nodes.append(v)
for n in graph:
n.append(0)
temp=[]
for i in range(node_count):
temp.append(0)
graph.append(temp)
def add_edges(v1,v2,cost):
if v1 not in nodes:
print(v1,"not present in the graph")
elif v2 not in nodes:
print(v2,"node not present in the graph")
else:
index1=nodes.index(v1)
index2=nodes.index(v2)
graph[index1][index2]=cost
graph[index2][index1]=cost
def delete_node(v):
global node_count
if v not in nodes:
print(v,"not in graph")
else:
index1=nodes.index(v)
node_count= node_count-1
nodes.remove(v)
graph.pop(index1)
for i in graph:
i.pop(index1)
def print_matrix():
for i in range(node_count):
for j in range(node_count):
print(graph[i][j],end=" ")
print()
nodes=[]
graph=[]
node_count=0
add_node("A") #function calling
add_node("B")
add_node("C")
add_node("D")
add_edges("A","C",10)
add_edges("A","D",20)
delete_node("D")
print("After deleting")
print_matrix()
|
the-stack_106_28070 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .proxy_resource import ProxyResource
class ServerSecurityAlertPolicy(ProxyResource):
"""A server security alert policy.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param state: Required. Specifies the state of the policy, whether it is
enabled or disabled. Possible values include: 'New', 'Enabled', 'Disabled'
:type state: str or ~azure.mgmt.sql.models.SecurityAlertPolicyState
:param disabled_alerts: Specifies an array of alerts that are disabled.
Allowed values are: Sql_Injection, Sql_Injection_Vulnerability,
Access_Anomaly, Data_Exfiltration, Unsafe_Action
:type disabled_alerts: list[str]
:param email_addresses: Specifies an array of e-mail addresses to which
the alert is sent.
:type email_addresses: list[str]
:param email_account_admins: Specifies that the alert is sent to the
account administrators.
:type email_account_admins: bool
:param storage_endpoint: Specifies the blob storage endpoint (e.g.
https://MyAccount.blob.core.windows.net). This blob storage will hold all
Threat Detection audit logs.
:type storage_endpoint: str
:param storage_account_access_key: Specifies the identifier key of the
Threat Detection audit storage account.
:type storage_account_access_key: str
:param retention_days: Specifies the number of days to keep in the Threat
Detection audit logs.
:type retention_days: int
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'state': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'state': {'key': 'properties.state', 'type': 'SecurityAlertPolicyState'},
'disabled_alerts': {'key': 'properties.disabledAlerts', 'type': '[str]'},
'email_addresses': {'key': 'properties.emailAddresses', 'type': '[str]'},
'email_account_admins': {'key': 'properties.emailAccountAdmins', 'type': 'bool'},
'storage_endpoint': {'key': 'properties.storageEndpoint', 'type': 'str'},
'storage_account_access_key': {'key': 'properties.storageAccountAccessKey', 'type': 'str'},
'retention_days': {'key': 'properties.retentionDays', 'type': 'int'},
}
def __init__(self, **kwargs):
super(ServerSecurityAlertPolicy, self).__init__(**kwargs)
self.state = kwargs.get('state', None)
self.disabled_alerts = kwargs.get('disabled_alerts', None)
self.email_addresses = kwargs.get('email_addresses', None)
self.email_account_admins = kwargs.get('email_account_admins', None)
self.storage_endpoint = kwargs.get('storage_endpoint', None)
self.storage_account_access_key = kwargs.get('storage_account_access_key', None)
self.retention_days = kwargs.get('retention_days', None)
|
the-stack_106_28071 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import dataclasses
import logging
import os
from typing import Any, Callable, List, Optional, Sequence, Tuple
from absl.testing import absltest
import jax
from jax import dtypes
from jax import numpy as jnp
from jax import test_util as jtu
from jax import tree_util
from jax.config import config
from jax.experimental import jax2tf
from jax.interpreters import masking
from jax._src import util
import numpy as np
import tensorflow as tf # type: ignore[import]
from tensorflow.compiler.xla import xla_data_pb2 # type: ignore[import]
DType = Any
def _make_tf_input_signature(*tf_args) -> List[tf.TensorSpec]:
# tf_args can be PyTrees
def _make_one_array_signature(tf_arg):
return tf.TensorSpec(np.shape(tf_arg), jax2tf.dtype_of_val(tf_arg))
return tf.nest.map_structure(_make_one_array_signature, list(tf_args))
def _run_tf_function(func_tf: Callable, *tf_args, mode: str):
if mode == "eager":
return func_tf(*tf_args) # EAGER
elif mode == "graph":
return tf.function(
func_tf,
autograph=False,
input_signature=_make_tf_input_signature(*tf_args))(*tf_args) # GRAPH
elif mode == "compiled":
# Adding an explicit input_signature prevents TF from constant-folding
# the computation eagerly before compilation
return tf.function(
func_tf,
autograph=False,
jit_compile=True,
input_signature=_make_tf_input_signature(*tf_args))(
*tf_args) # COMPILED
else:
assert False, (
f"Expected 'eager', 'graph', or 'compiled' for mode: got '{mode}'")
## Helper functions for matching OpMetadata in TF graphs
@dataclasses.dataclass(order=True, frozen=True)
class OpMetadataGraph:
tf_type: str # The standard Tf.Operation.type
op_type: str # The rest are OpMetadata fields from _Xla... attributes
op_name: str
source_file: str
source_line: str
def SaveAndLoadModel(model: tf.Module,
save_gradients=True) -> tf.Module:
# Roundtrip through saved model on disk.
model_dir = os.path.join(absltest.get_default_test_tmpdir(), str(id(model)))
tf.saved_model.save(
model, model_dir,
options=tf.saved_model.SaveOptions(experimental_custom_gradients=save_gradients))
restored_model = tf.saved_model.load(model_dir)
return restored_model
def SaveAndLoadFunction(f_tf: Callable,
input_signature: Sequence[tf.TensorSpec],
variables: Sequence[tf.Variable] = (),
save_gradients=True) -> Tuple[Callable, tf.train.Checkpoint]:
# Roundtrip through saved model on disk. Return the Checkpoint also
# for the cases when there are variables.
model = tf.train.Checkpoint()
model.f = tf.function(f_tf,
autograph=False,
input_signature=input_signature)
model.variables = variables
restored = SaveAndLoadModel(model, save_gradients=save_gradients)
return restored.f, restored
class JaxToTfTestCase(jtu.JaxTestCase):
def setUp(self):
super().setUp()
# Ensure that all TF ops are created on the proper device (TPU or GPU or CPU)
tf_preferred_devices = (
tf.config.list_logical_devices("TPU") +
tf.config.list_logical_devices("GPU") +
tf.config.list_logical_devices())
self.tf_default_device = tf_preferred_devices[0]
logging.info(f"Running jax2tf converted code on {self.tf_default_device}.")
# We need --config=cuda build flag for TF to see the GPUs
self.assertEqual(jtu.device_under_test().upper(),
self.tf_default_device.device_type)
with contextlib.ExitStack() as stack:
stack.enter_context(tf.device(self.tf_default_device))
self.addCleanup(stack.pop_all().close)
def assertDtypesMatch(self, x, y, *, canonicalize_dtypes=True):
"""Compares dtypes across JAX and TF dtypes. Overrides super method."""
def to_numpy_dtype(dt):
return dt if isinstance(dt, np.dtype) else dt.as_numpy_dtype
if not config.x64_enabled and canonicalize_dtypes:
self.assertEqual(
dtypes.canonicalize_dtype(to_numpy_dtype(jtu._dtype(x))),
dtypes.canonicalize_dtype(to_numpy_dtype(jtu._dtype(y))))
else:
self.assertEqual(
to_numpy_dtype(jtu._dtype(x)), to_numpy_dtype(jtu._dtype(y)))
def ConvertAndCompare(self,
func_jax: Callable,
*args,
enable_xla: bool = True,
limitations: Sequence = ()):
"""Compares jax_func(*args) with convert(jax_func)(*args).
It compares the result of JAX, TF ("eager" mode),
TF with tf.function ("graph" mode), and TF with
tf.function(jit_compile=True) ("compiled" mode). In each mode,
either we expect to encounter a known limitation, or the value should
match the value from the JAX execution.
Args:
func_jax: the function to invoke (``func_jax(*args)``)
args: the arguments.
enable_xla: if True, allows the use of XLA ops in jax2tf.convert
(default: True).
limitations: the set of limitations for this harness (not yet filtered
by mode).
"""
# Run JAX. Should not fail, we assume that the harness has been filtered
# already by JAX unimplemented primitives.
result_jax = func_jax(*args) # JAX
result_tf = None
func_tf = jax2tf.convert(func_jax, enable_xla=enable_xla)
unexpected_successes: List[str] = []
# Run the "compiled" mode first, it is most important
for mode in ("compiled", "eager", "graph"):
def log_message(extra):
return f"[{self._testMethodName}] mode={mode}: {extra}"
jax2tf_limits = tuple(filter(lambda l: l.filter(mode=mode), limitations))
skip_tf_run = [l for l in jax2tf_limits if l.skip_tf_run]
if skip_tf_run:
logging.info(log_message(f"Skip TF run due to limitations {skip_tf_run}"))
continue
try:
result_tf = _run_tf_function(func_tf, *args, mode=mode)
tf_exception = None
except Exception as e:
tf_exception = e
expect_tf_error = [l for l in jax2tf_limits if l.expect_tf_error]
if tf_exception:
if expect_tf_error:
logging.info(log_message(
"Found expected TF error with enabled limitations "
f"{expect_tf_error}; TF error is {tf_exception}"))
continue
else:
raise tf_exception
else:
if expect_tf_error:
# It is more ergonomic to print all successful modes once
logging.warning(log_message(
f"Unexpected success with known limitations {expect_tf_error}"))
unexpected_successes.append(f"{mode}: {expect_tf_error}")
if (jtu.device_under_test() == "gpu" and
"dot_general_preferred" in self._testMethodName):
logging.info(log_message(f"Arguments are {args}, JAX result is {result_jax}\nand TF result is {result_tf}"))
skip_comparison = [l for l in jax2tf_limits if l.skip_comparison]
if skip_comparison:
logging.warning(log_message(f"Skip result comparison due to {skip_comparison}"))
continue
max_tol = None
max_tol_lim = None if not jax2tf_limits else jax2tf_limits[0].get_max_tolerance_limitation(jax2tf_limits)
if max_tol_lim is not None:
max_tol = max_tol_lim.tol
logging.info(log_message(f"Using tol={max_tol} due to {max_tol_lim}"))
# Convert results to np.arrays
result_tf = tf.nest.map_structure(lambda t: t.numpy(), result_tf) # type: ignore
custom_assert_lim = [l for l in jax2tf_limits if l.custom_assert]
assert len(custom_assert_lim) <= 1, f"Expecting at most one applicable limitation with custom_assert, found {custom_assert_lim}"
try:
err_msg = f"TF mode {mode}."
log_hlo_on_error = mode == "compiled" or jtu.device_under_test() == "tpu"
if log_hlo_on_error:
err_msg += " See the logs for JAX and TF HLO comparisons."
if custom_assert_lim:
logging.info(log_message(f"Running custom_assert with tol={max_tol} due to {custom_assert_lim[0]}"))
custom_assert_lim[0].custom_assert(self, result_jax, result_tf,
args=args, tol=max_tol,
err_msg=err_msg)
else:
logging.info(log_message(f"Running default assert with tol={max_tol}"))
self.assertAllClose(result_jax, result_tf, atol=max_tol, rtol=max_tol,
err_msg=err_msg)
except AssertionError as e:
# Print the HLO for comparison
if not log_hlo_on_error:
print(f"[{self._testMethodName}] Not logging HLO because the "
f"mode was {mode}")
raise
logging.info(f"[{self._testMethodName}] Logging HLO for exception in mode {mode}: {e}")
jax_comp = jax.xla_computation(func_jax)(*args)
jax_hlo = jax_comp.as_hlo_text()
logging.info(f"[{self._testMethodName}] "
f"JAX NON_OPT HLO\n{jax_hlo}")
tf_args_signature = _make_tf_input_signature(*args)
# If we give the signature, we cannot pass scalars
tf_args_no_scalars = tuple(
map(lambda a, sig: tf.convert_to_tensor(a, dtype=sig.dtype),
args, tf_args_signature))
tf_func_compiled = tf.function(
func_tf,
autograph=False,
jit_compile=True,
input_signature=tf_args_signature)
tf_hlo = tf_func_compiled.experimental_get_compiler_ir(*tf_args_no_scalars)(
stage="hlo")
logging.info(f"[{self._testMethodName}] TF NON OPT HLO\n{tf_hlo}")
backend = jax.lib.xla_bridge.get_backend()
modules = backend.compile(jax_comp).hlo_modules()
jax_opt_hlo = modules[0].to_string()
logging.info(f"[{self._testMethodName}] "
f"JAX OPT HLO\n{jax_opt_hlo}")
# TODO(b/189265364): Remove this workaround
if (jtu.device_under_test() == "gpu" and
"dot_general" in self._testMethodName):
print(f"[{self._testMethodName}] Not logging TF OPT HLO because of "
f"crash in tf.experimental_get_compiler_ir (b/189265364)")
else:
tf_opt_hlo = tf_func_compiled.experimental_get_compiler_ir(*tf_args_no_scalars)(
stage="optimized_hlo")
logging.info(f"[{self._testMethodName}] TF OPT HLO\n{tf_opt_hlo}")
raise
# end "for mode"
if unexpected_successes:
msg = (f"[{self._testMethodName}] The following are unexpected "
"successful modes:\n" + "\n".join(unexpected_successes))
logging.warning(msg)
# Uncomment the below if you want to see warnings as failures
# self.assertEmpty(msg)
return result_jax, result_tf
def TransformConvertAndCompare(self, func: Callable, arg,
transform: Optional[str]):
"""Like ConvertAndCompare but first applies a transformation.
`func` must be a function from one argument to one result. `arg` is
the argument before the transformation.
`transform` can be None, "jit", "jvp", "grad", "vmap", "jvp_vmap",
"grad_vmap"
"""
if transform is None:
return self.ConvertAndCompare(func, arg)
if transform == "jit":
return self.ConvertAndCompare(jax.jit(func), arg)
if transform == "jvp":
t_func = lambda x, xt: jax.jvp(func, (x,), (xt,))
return self.ConvertAndCompare(t_func, arg, np.full_like(arg, 0.1))
if transform == "grad":
return self.ConvertAndCompare(jax.grad(func), arg)
if transform == "vmap":
t_arg = np.stack([arg] * 4)
return self.ConvertAndCompare(jax.vmap(func), t_arg)
if transform == "jvp_vmap":
jvp_func = lambda x, xt: jax.jvp(jax.vmap(func), (x,), (xt,))
t_arg = np.stack([arg] * 4)
return self.ConvertAndCompare(jvp_func, t_arg, np.full_like(t_arg, 0.1))
if transform == "grad_vmap":
grad_func = jax.grad(lambda x: jnp.sum(jax.vmap(func)(x)))
t_arg = np.stack([arg] * 4)
return self.ConvertAndCompare(grad_func, t_arg)
assert False, transform
def CheckShapePolymorphism(self, f_jax: Callable, *,
input_signature: Sequence[tf.TensorSpec],
polymorphic_shapes: Optional[Sequence[Any]],
expected_output_signature: Optional[tf.TensorSpec] = None,
enable_xla: bool = True):
"""Converts a function using polymorphic shapes.
Args:
f_jax: a JAX function of `n` arguments
input_signature: used as the input signature for the tf.function.
polymorphic_shapes: Specifies input shapes to be treated polymorphically
during conversion.
expected_output_signature: if given, this function tests whether the
actual output signature is equal to this one.
enable_xla: Whether to enable XLA conversion for jax2tf.convert.
"""
f_tf = jax2tf.convert(f_jax, polymorphic_shapes=polymorphic_shapes,
enable_xla=enable_xla)
f_tf_func = tf.function(f_tf, autograph=False, input_signature=input_signature)
concrete_f_tf = f_tf_func.get_concrete_function(*input_signature)
if expected_output_signature:
# Strangely, output_shapes can be a single shape for a function with a
# single result, or a list/tuple of shapes.
concrete_output_tf_shape = concrete_f_tf.output_shapes
if not isinstance(concrete_output_tf_shape, (tuple, list)): # Single result
assert not isinstance(expected_output_signature, (tuple, list))
expected_output_signature = [expected_output_signature]
concrete_output_tf_shape = [concrete_output_tf_shape]
for expected, found in util.safe_zip(expected_output_signature,
concrete_output_tf_shape):
self.assertEqual(tuple(expected.shape), tuple(found))
return f_tf
def MakeInputSignature(self, *polymorphic_shapes):
"""From a pytree of in_shape string specification, make a pytree of tf.TensorSpec.
Dimension variables are replaced with None.
"""
def polymorphic_shape_to_tensorspec(poly_shape: str) -> tf.TensorSpec:
in_spec = masking.parse_spec(poly_shape)
return tf.TensorSpec(
tuple(
int(dim_spec) if dim_spec.is_constant else None
for dim_spec in in_spec),
dtype=tf.float32)
return tree_util.tree_multimap(polymorphic_shape_to_tensorspec, polymorphic_shapes)
def CheckOpMetadata(self, jax_fun, x,
expected: Sequence[OpMetadataGraph],
include_xla_op_metadata=True):
"""Checks that the tf.Graph obtained by converting `jax_fun` for argument
`x` contains all the given OpMetadata.
If `not include_xla_op_metadata` then disable the generation of the
OpMetadata attributes, and check that we don't find any ops with
metadata.
"""
f_tf = tf.function(
jax2tf.convert(jax_fun,
include_xla_op_metadata=include_xla_op_metadata),
autograph=False,
input_signature=[tf.TensorSpec(x.shape, x.dtype)])
# Trace the TF function to a graph
f_tf_concrete = f_tf.get_concrete_function(tf.convert_to_tensor(x))
found_tf_ops = []
def iter_nested_graph(graph: tf.Graph):
for n in graph._nodes_by_id.values():
try:
op_metadata = n.get_attr("_XlaOpMetadata")
op_metadata_proto = xla_data_pb2.OpMetadata()
op_metadata_proto.ParseFromString(op_metadata)
found_tf_ops.append(
OpMetadataGraph(
tf_type=n.type,
op_name=op_metadata_proto.op_name,
op_type=op_metadata_proto.op_type,
source_file=op_metadata_proto.source_file,
source_line=op_metadata_proto.source_line))
except ValueError:
continue
# Look for nested graphs. There probably is a better way!
if n.type == "StatelessWhile":
iter_nested_graph(n._body_graph)
iter_nested_graph(n._cond_graph)
if n.type == "StatelessCase":
for idx in range(10): # How can I tell how many cases there are?
branch = getattr(n, f"_branch_graph_{idx}", None)
if branch is None:
break
iter_nested_graph(branch)
iter_nested_graph(f_tf_concrete.graph)
try:
if include_xla_op_metadata:
self.assertContainsSubset(expected, found_tf_ops)
else:
self.assertEmpty(found_tf_ops)
except Exception:
print("Found nodes:\n ", "\n ".join([str(md) for md in found_tf_ops]))
raise
|
the-stack_106_28073 | # MIT License
#
# Copyright (C) IBM Corporation 2018
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Module providing convenience functions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import os
import numpy as np
import tensorflow as tf
import keras
import keras.backend as k
from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D
from keras.models import Sequential
import torch
import torch.nn as nn
import torch.optim as optim
from art.classifiers import KerasClassifier, PyTorchClassifier, TFClassifier
logger = logging.getLogger(__name__)
# -------------------------------------------------------------------------------------------- RANDOM NUMBER GENERATORS
def master_seed(seed):
"""
Set the seed for all random number generators used in the library. This ensures experiments reproducibility and
stable testing.
:param seed: The value to be seeded in the random number generators.
:type seed: `int`
"""
import numbers
import random
if not isinstance(seed, numbers.Integral):
raise TypeError('The seed for random number generators has to be an integer.')
# Set Python seed
random.seed(seed)
# Set Numpy seed
np.random.seed(seed)
np.random.RandomState(seed)
# Now try to set seed for all specific frameworks
try:
import tensorflow as tf
logger.info('Setting random seed for TensorFlow.')
tf.set_random_seed(seed)
except ImportError:
logger.info('Could not set random seed for TensorFlow.')
try:
import mxnet as mx
logger.info('Setting random seed for MXNet.')
mx.random.seed(seed)
except ImportError:
logger.info('Could not set random seed for MXNet.')
try:
logger.info('Setting random seed for PyTorch.')
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
except ImportError:
logger.info('Could not set random seed for PyTorch')
# ----------------------------------------------------------------------------------------------------- MATHS UTILITIES
def projection(v, eps, p):
"""
Project the values in `v` on the L_p norm ball of size `eps`.
:param v: Array of perturbations to clip.
:type v: `np.ndarray`
:param eps: Maximum norm allowed.
:type eps: `float`
:param p: L_p norm to use for clipping. Only 1, 2 and `np.Inf` supported for now.
:type p: `int`
:return: Values of `v` after projection.
:rtype: `np.ndarray`
"""
# Pick a small scalar to avoid division by 0
tol = 10e-8
v_ = v.reshape((v.shape[0], -1))
if p == 2:
v_ = v_ * np.expand_dims(np.minimum(1., eps / (np.linalg.norm(v_, axis=1) + tol)), axis=1)
elif p == 1:
v_ = v_ * np.expand_dims(np.minimum(1., eps / (np.linalg.norm(v_, axis=1, ord=1) + tol)), axis=1)
elif p == np.inf:
v_ = np.sign(v_) * np.minimum(abs(v_), eps)
else:
raise NotImplementedError('Values of `p` different from 1, 2 and `np.inf` are currently not supported.')
v = v_.reshape(v.shape)
return v
def random_sphere(nb_points, nb_dims, radius, norm):
"""
Generate randomly `m x n`-dimension points with radius `radius` and centered around 0.
:param nb_points: Number of random data points
:type nb_points: `int`
:param nb_dims: Dimensionality
:type nb_dims: `int`
:param radius: Radius
:type radius: `float`
:param norm: Current support: 1, 2, np.inf
:type norm: `int`
:return: The generated random sphere
:rtype: `np.ndarray`
"""
if norm == 1:
a = np.zeros(shape=(nb_points, nb_dims + 1))
a[:, -1] = np.sqrt(np.random.uniform(0, radius ** 2, nb_points))
for i in range(nb_points):
a[i, 1:-1] = np.sort(np.random.uniform(0, a[i, -1], nb_dims - 1))
res = (a[:, 1:] - a[:, :-1]) * np.random.choice([-1, 1], (nb_points, nb_dims))
elif norm == 2:
from scipy.special import gammainc
a = np.random.randn(nb_points, nb_dims)
s2 = np.sum(a ** 2, axis=1)
base = gammainc(nb_dims / 2.0, s2 / 2.0) ** (1 / nb_dims) * radius / np.sqrt(s2)
res = a * (np.tile(base, (nb_dims, 1))).T
elif norm == np.inf:
res = np.random.uniform(float(-radius), float(radius), (nb_points, nb_dims))
else:
raise NotImplementedError("Norm {} not supported".format(norm))
return res
def original_to_tanh(x_original, clip_min, clip_max, tanh_smoother=0.999999):
"""
Transform input from original to tanh space.
:param x_original: An array with the input to be transformed.
:type x_original: `np.ndarray`
:param clip_min: Minimum clipping value.
:type clip_min: `float` or `np.ndarray`
:param clip_max: Maximum clipping value.
:type clip_max: `float` or `np.ndarray`
:param tanh_smoother: Scalar for multiplying arguments of arctanh to avoid division by zero.
:type tanh_smoother: `float`
:return: An array holding the transformed input.
:rtype: `np.ndarray`
"""
x_tanh = np.clip(x_original, clip_min, clip_max)
x_tanh = (x_tanh - clip_min) / (clip_max - clip_min)
x_tanh = np.arctanh(((x_tanh * 2) - 1) * tanh_smoother)
return x_tanh
def tanh_to_original(x_tanh, clip_min, clip_max, tanh_smoother=0.999999):
"""
Transform input from tanh to original space.
:param x_tanh: An array with the input to be transformed.
:type x_tanh: `np.ndarray`
:param clip_min: Minimum clipping value.
:type clip_min: `float` or `np.ndarray`
:param clip_max: Maximum clipping value.
:type clip_max: `float` or `np.ndarray`
:param tanh_smoother: Scalar for dividing arguments of tanh to avoid division by zero.
:type tanh_smoother: `float`
:return: An array holding the transformed input.
:rtype: `np.ndarray`
"""
x_original = (np.tanh(x_tanh) / tanh_smoother + 1) / 2
return x_original * (clip_max - clip_min) + clip_min
# --------------------------------------------------------------------------------------- LABELS MANIPULATION FUNCTIONS
def to_categorical(labels, nb_classes=None):
"""
Convert an array of labels to binary class matrix.
:param labels: An array of integer labels of shape `(nb_samples,)`
:type labels: `np.ndarray`
:param nb_classes: The number of classes (possible labels)
:type nb_classes: `int`
:return: A binary matrix representation of `y` in the shape `(nb_samples, nb_classes)`
:rtype: `np.ndarray`
"""
labels = np.array(labels, dtype=np.int32)
if not nb_classes:
nb_classes = np.max(labels) + 1
categorical = np.zeros((labels.shape[0], nb_classes), dtype=np.float32)
categorical[np.arange(labels.shape[0]), np.squeeze(labels)] = 1
return categorical
def random_targets(labels, nb_classes):
"""
Given a set of correct labels, randomly choose target labels different from the original ones. These can be
one-hot encoded or integers.
:param labels: The correct labels
:type labels: `np.ndarray`
:param nb_classes: The number of classes for this model
:type nb_classes: `int`
:return: An array holding the randomly-selected target classes, one-hot encoded.
:rtype: `np.ndarray`
"""
if len(labels.shape) > 1:
labels = np.argmax(labels, axis=1)
result = np.zeros(labels.shape)
for class_ind in range(nb_classes):
other_classes = list(range(nb_classes))
other_classes.remove(class_ind)
in_cl = labels == class_ind
result[in_cl] = np.random.choice(other_classes)
return to_categorical(result, nb_classes)
def least_likely_class(x, classifier):
"""
Compute the least likely class predictions for sample `x`. This strategy for choosing attack targets was used in
(Kurakin et al., 2016). See https://arxiv.org/abs/1607.02533.
:param x: A data sample of shape accepted by `classifier`.
:type x: `np.ndarray`
:param classifier: The classifier used for computing predictions.
:type classifier: `Classifier`
:return: Least-likely class predicted by `classifier` for sample `x` in one-hot encoding.
:rtype: `np.ndarray`
"""
return to_categorical(np.argmin(classifier.predict(x), axis=1), nb_classes=classifier.nb_classes)
def second_most_likely_class(x, classifier):
"""
Compute the second most likely class predictions for sample `x`. This strategy can be used for choosing target
labels for an attack to improve its chances to succeed.
:param x: A data sample of shape accepted by `classifier`.
:type x: `np.ndarray`
:param classifier: The classifier used for computing predictions.
:type classifier: `Classifier`
:return: Second most likely class predicted by `classifier` for sample `x` in one-hot encoding.
:rtype: `np.ndarray`
"""
return to_categorical(np.argpartition(classifier.predict(x), -2, axis=1)[:, -2], nb_classes=classifier.nb_classes)
def get_label_conf(y_vec):
"""
Returns the confidence and the label of the most probable class given a vector of class confidences
:param y_vec: (np.ndarray) vector of class confidences, nb of instances as first dimension
:return: (np.ndarray, np.ndarray) confidences and labels
"""
assert len(y_vec.shape) == 2
confs, labels = np.amax(y_vec, axis=1), np.argmax(y_vec, axis=1)
return confs, labels
def get_labels_np_array(preds):
"""
Returns the label of the most probable class given a array of class confidences.
:param preds: (np.ndarray) array of class confidences, nb of instances as first dimension
:return: (np.ndarray) labels
"""
preds_max = np.amax(preds, axis=1, keepdims=True)
y = (preds == preds_max).astype(float)
return y
def preprocess(x, y, nb_classes=10, max_value=255):
"""Scales `x` to [0, 1] and converts `y` to class categorical confidences.
:param x: Data instances
:type x: `np.ndarray`
:param y: Labels
:type y: `np.ndarray`
:param nb_classes: Number of classes in dataset
:type nb_classes: `int`
:param max_value: Original maximum allowed value for features
:type max_value: `int`
:return: rescaled values of `x`, `y`
:rtype: `tuple`
"""
x = x.astype('float32') / max_value
y = to_categorical(y, nb_classes)
return x, y
# -------------------------------------------------------------------------------------------------------- IO FUNCTIONS
def load_cifar10(raw=False):
"""Loads CIFAR10 dataset from config.CIFAR10_PATH or downloads it if necessary.
:param raw: `True` if no preprocessing should be applied to the data. Otherwise, data is normalized to 1.
:type raw: `bool`
:return: `(x_train, y_train), (x_test, y_test), min, max`
:rtype: `(np.ndarray, np.ndarray), (np.ndarray, np.ndarray), float, float`
"""
def load_batch(fpath):
"""
Utility function for loading CIFAR batches, as written in Keras.
:param fpath: Full path to the batch file.
:return: `(data, labels)`
"""
import sys
from six.moves import cPickle
with open(fpath, 'rb') as f:
if sys.version_info < (3,):
d = cPickle.load(f)
else:
d = cPickle.load(f, encoding='bytes')
d_decoded = {}
for k, v in d.items():
d_decoded[k.decode('utf8')] = v
d = d_decoded
data = d['data']
labels = d['labels']
data = data.reshape(data.shape[0], 3, 32, 32)
return data, labels
from art import DATA_PATH
path = get_file('cifar-10-batches-py', extract=True, path=DATA_PATH,
url='http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz')
num_train_samples = 50000
x_train = np.zeros((num_train_samples, 3, 32, 32), dtype=np.uint8)
y_train = np.zeros((num_train_samples,), dtype=np.uint8)
for i in range(1, 6):
fpath = os.path.join(path, 'data_batch_' + str(i))
data, labels = load_batch(fpath)
x_train[(i - 1) * 10000: i * 10000, :, :, :] = data
y_train[(i - 1) * 10000: i * 10000] = labels
fpath = os.path.join(path, 'test_batch')
x_test, y_test = load_batch(fpath)
y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))
# Set channels last
x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)
min_, max_ = 0, 255
if not raw:
min_, max_ = 0., 1.
x_train, y_train = preprocess(x_train, y_train)
x_test, y_test = preprocess(x_test, y_test)
return (x_train, y_train), (x_test, y_test), min_, max_
def load_mnist(raw=False):
"""Loads MNIST dataset from `DATA_PATH` or downloads it if necessary.
:param raw: `True` if no preprocessing should be applied to the data. Otherwise, data is normalized to 1.
:type raw: `bool`
:return: `(x_train, y_train), (x_test, y_test), min, max`
:rtype: `(np.ndarray, np.ndarray), (np.ndarray, np.ndarray), float, float`
"""
from art import DATA_PATH
path = get_file('mnist.npz', path=DATA_PATH, url='https://s3.amazonaws.com/img-datasets/mnist.npz')
f = np.load(path)
x_train = f['x_train']
y_train = f['y_train']
x_test = f['x_test']
y_test = f['y_test']
f.close()
# Add channel axis
min_, max_ = 0, 255
if not raw:
min_, max_ = 0., 1.
x_train = np.expand_dims(x_train, axis=3)
x_test = np.expand_dims(x_test, axis=3)
x_train, y_train = preprocess(x_train, y_train)
x_test, y_test = preprocess(x_test, y_test)
return (x_train, y_train), (x_test, y_test), min_, max_
def load_stl():
"""
Loads the STL-10 dataset from `DATA_PATH` or downloads it if necessary.
:return: `(x_train, y_train), (x_test, y_test), min, max`
:rtype: `(np.ndarray, np.ndarray), (np.ndarray, np.ndarray), float, float`
"""
from os.path import join
from art import DATA_PATH
min_, max_ = 0., 1.
# Download and extract data if needed
path = get_file('stl10_binary', path=DATA_PATH, extract=True,
url='https://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz')
with open(join(path, str('train_X.bin')), str('rb')) as f:
x_train = np.fromfile(f, dtype=np.uint8)
x_train = np.reshape(x_train, (-1, 3, 96, 96))
with open(join(path, str('test_X.bin')), str('rb')) as f:
x_test = np.fromfile(f, dtype=np.uint8)
x_test = np.reshape(x_test, (-1, 3, 96, 96))
# Set channel last
x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)
with open(join(path, str('train_y.bin')), str('rb')) as f:
y_train = np.fromfile(f, dtype=np.uint8)
y_train -= 1
with open(join(path, str('test_y.bin')), str('rb')) as f:
y_test = np.fromfile(f, dtype=np.uint8)
y_test -= 1
x_train, y_train = preprocess(x_train, y_train)
x_test, y_test = preprocess(x_test, y_test)
return (x_train, y_train), (x_test, y_test), min_, max_
def load_dataset(name):
"""
Loads or downloads the dataset corresponding to `name`. Options are: `mnist`, `cifar10` and `stl10`.
:param name: Name of the dataset
:type name: `str`
:return: The dataset separated in training and test sets as `(x_train, y_train), (x_test, y_test), min, max`
:rtype: `(np.ndarray, np.ndarray), (np.ndarray, np.ndarray), float, float`
:raises NotImplementedError: If the dataset is unknown.
"""
if "mnist" in name:
return load_mnist()
elif "cifar10" in name:
return load_cifar10()
elif "stl10" in name:
return load_stl()
raise NotImplementedError("There is no loader for dataset '{}'.".format(name))
def _extract(full_path, path):
import tarfile
import zipfile
import shutil
if full_path.endswith('tar'):
if tarfile.is_tarfile(full_path):
archive = tarfile.open(full_path, "r:")
elif full_path.endswith('tar.gz'):
if tarfile.is_tarfile(full_path):
archive = tarfile.open(full_path, "r:gz")
elif full_path.endswith('zip'):
if zipfile.is_zipfile(full_path):
archive = zipfile.ZipFile(full_path)
else:
return False
else:
return False
try:
archive.extractall(path)
except (tarfile.TarError, RuntimeError, KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
def get_file(filename, url, path=None, extract=False):
"""
Downloads a file from a URL if it not already in the cache. The file at indicated by `url` is downloaded to the
path `path` (default is ~/.art/data). and given the name `filename`. Files in tar, tar.gz, tar.bz, and zip formats
can also be extracted. This is a simplified version of the function with the same name in Keras.
:param filename: Name of the file.
:type filename: `str`
:param url: Download URL.
:type url: `str`
:param path: Folder to store the download. If not specified, `~/.art/data` is used instead.
:type: `str`
:param extract: If true, tries to extract the archive.
:type extract: `bool`
:return: Path to the downloaded file.
:rtype: `str`
"""
if path is None:
from art import DATA_PATH
path_ = os.path.expanduser(DATA_PATH)
else:
path_ = os.path.expanduser(path)
if not os.access(path_, os.W_OK):
path_ = os.path.join('/tmp', '.art')
if not os.path.exists(path_):
os.makedirs(path_)
if extract:
extract_path = os.path.join(path_, filename)
full_path = extract_path + '.tar.gz'
else:
full_path = os.path.join(path_, filename)
# Determine if dataset needs downloading
download = not os.path.exists(full_path)
if download:
logger.info('Downloading data from %s', url)
error_msg = 'URL fetch failure on {}: {} -- {}'
try:
try:
from six.moves.urllib.error import HTTPError, URLError
from six.moves.urllib.request import urlretrieve
urlretrieve(url, full_path)
except HTTPError as e:
raise Exception(error_msg.format(url, e.code, e.msg))
except URLError as e:
raise Exception(error_msg.format(url, e.errno, e.reason))
except (Exception, KeyboardInterrupt):
if os.path.exists(full_path):
os.remove(full_path)
raise
if extract:
if not os.path.exists(extract_path):
_extract(full_path, path_)
return extract_path
return full_path
def make_directory(dir_path):
"""
Creates the specified tree of directories if needed.
:param dir_path: (str) directory or file path
:return: None
"""
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def clip_and_round(x, clip_values, round_samples):
"""
Rounds the input to the correct level of granularity.
Useful to ensure data passed to classifier can be represented
in the correct domain, e.g., [0, 255] integers verses [0,1]
or [0, 255] floating points.
:param x: Sample input with shape as expected by the model.
:type x: `np.ndarray`
:param clip_values: Tuple of the form `(min, max)` representing the minimum and maximum values allowed
for features.
:type clip_values: `tuple`
:param round_samples: The resolution of the input domain to round the data to, e.g., 1.0, or 1/255. Set to 0 to disable.
:type round_samples: `float`
"""
if round_samples == 0:
return x
x = np.clip(x, *clip_values)
x = np.around(x / round_samples) * round_samples
return x
# -------------------------------------------------------------------------------------------------- PRE-TRAINED MODELS
def _tf_initializer_w_conv2d(_, dtype, partition_info):
"""
Initializer of weights in convolution layer for Tensorflow.
:return: Tensorflow constant
:rtype: tf.constant
"""
_ = partition_info
w_conv2d = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models', 'W_CONV2D.npy'))
return tf.constant(w_conv2d, dtype)
def _kr_initializer_w_conv2d(_, dtype=None):
"""
Initializer of weights in convolution layer for Keras.
:return: Keras variable
:rtype: k.variable
"""
w_conv2d = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models', 'W_CONV2D.npy'))
return k.variable(value=w_conv2d, dtype=dtype)
def _tf_initializer_b_conv2d(_, dtype, partition_info):
"""
Initializer of biases in convolution layer for Tensorflow.
:return: Tensorflow constant
:rtype: tf.constant
"""
_ = partition_info
b_conv2d = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models', 'B_CONV2D.npy'))
return tf.constant(b_conv2d, dtype)
def _kr_initializer_b_conv2d(_, dtype=None):
"""
Initializer of weights in convolution layer for Keras.
:return: Keras variable
:rtype: k.variable
"""
b_conv2d = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models', 'B_CONV2D.npy'))
return k.variable(value=b_conv2d, dtype=dtype)
def _tf_initializer_w_dense(_1, dtype, partition_info):
"""
Initializer of weights in dense layer for Tensorflow.
:return: Tensorflow constant
:rtype: tf.constant
"""
_ = partition_info
w_dense = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models', 'W_DENSE.npy'))
return tf.constant(w_dense, dtype)
def _kr_initializer_w_dense(_, dtype=None):
"""
Initializer of weights in dense layer for Keras.
:return: Keras varibale
:rtype: k.variable
"""
w_dense = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models', 'W_DENSE.npy'))
return k.variable(value=w_dense, dtype=dtype)
def _tf_initializer_b_dense(_1, dtype, partition_info):
"""
Initializer of biases in dense layer for Tensorflow.
:return: Tensorflow constant
:rtype: tf.constant
"""
_ = partition_info
b_dense = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models', 'B_DENSE.npy'))
return tf.constant(b_dense, dtype)
def _kr_initializer_b_dense(_, dtype=None):
"""
Initializer of biases in dense layer for Keras.
:return: Keras variable
:rtype: k.variable
"""
b_dense = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models', 'B_DENSE.npy'))
return k.variable(value=b_dense, dtype=dtype)
def get_classifier_tf():
"""
Standard Tensorflow classifier for unit testing.
The following hyper-parameters were used to obtain the weights and biases:
learning_rate: 0.01
batch size: 10
number of epochs: 2
optimizer: tf.train.AdamOptimizer
:return: TFClassifier, tf.Session()
"""
# Define input and output placeholders
input_ph = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])
output_ph = tf.placeholder(tf.int32, shape=[None, 10])
# Define the tensorflow graph
conv = tf.layers.conv2d(input_ph, 1, 7, activation=tf.nn.relu, kernel_initializer=_tf_initializer_w_conv2d,
bias_initializer=_tf_initializer_b_conv2d)
conv = tf.layers.max_pooling2d(conv, 4, 4)
flattened = tf.contrib.layers.flatten(conv)
# Logits layer
logits = tf.layers.dense(flattened, 10, kernel_initializer=_tf_initializer_w_dense,
bias_initializer=_tf_initializer_b_dense)
# Train operator
loss = tf.reduce_mean(tf.losses.softmax_cross_entropy(logits=logits, onehot_labels=output_ph))
# Tensorflow session and initialization
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# Train the classifier
tfc = TFClassifier(clip_values=(0, 1), input_ph=input_ph, logits=logits, output_ph=output_ph, train=None,
loss=loss, learning=None, sess=sess)
return tfc, sess
def get_classifier_kr():
"""
Standard Keras classifier for unit testing
The weights and biases are identical to the Tensorflow model in get_classifier_tf().
:return: KerasClassifier, tf.Session()
"""
# Initialize a tf session
sess = tf.Session()
k.set_session(sess)
# Create simple CNN
model = Sequential()
model.add(Conv2D(1, kernel_size=(7, 7), activation='relu', input_shape=(28, 28, 1),
kernel_initializer=_kr_initializer_w_conv2d, bias_initializer=_kr_initializer_b_conv2d))
model.add(MaxPooling2D(pool_size=(4, 4)))
model.add(Flatten())
model.add(Dense(10, activation='softmax', kernel_initializer=_kr_initializer_w_dense,
bias_initializer=_kr_initializer_b_dense))
model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(lr=0.01),
metrics=['accuracy'])
# Get classifier
krc = KerasClassifier((0, 1), model, use_logits=False)
return krc, sess
def get_classifier_pt():
"""
Standard PyTorch classifier for unit testing
:return: PyTorchClassifier
"""
class Model(nn.Module):
"""
Create model for pytorch.
The weights and biases are identical to the Tensorflow model in get_classifier_tf().
"""
def __init__(self):
super(Model, self).__init__()
w_conv2d = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models', 'W_CONV2D.npy'))
b_conv2d = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models', 'B_CONV2D.npy'))
w_dense = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models', 'W_DENSE.npy'))
b_dense = np.load(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models', 'B_DENSE.npy'))
self.conv = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=7)
w_conv2d_pt = np.swapaxes(w_conv2d, 0, 2)
w_conv2d_pt = np.swapaxes(w_conv2d_pt, 1, 3)
self.conv.weight = nn.Parameter(torch.Tensor(w_conv2d_pt))
self.conv.bias = nn.Parameter(torch.Tensor(b_conv2d))
self.pool = nn.MaxPool2d(4, 4)
self.fullyconnected = nn.Linear(25, 10)
self.fullyconnected.weight = nn.Parameter(torch.Tensor(np.transpose(w_dense)))
self.fullyconnected.bias = nn.Parameter(torch.Tensor(b_dense))
def forward(self, x):
import torch.nn.functional as f
x = self.pool(f.relu(self.conv(x)))
x = x.view(-1, 25)
logit_output = self.fullyconnected(x)
return logit_output
# Define the network
model = Model()
# Define a loss function and optimizer
loss_fn = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.01)
# Get classifier
ptc = PyTorchClassifier((0, 1), model, loss_fn, optimizer, (1, 28, 28), 10)
return ptc
|
the-stack_106_28075 | from aiohttp.test_utils import unittest_run_loop
from OpenCast.app.command import playlist as PlaylistCmd
from OpenCast.app.notification import WSResponse
from OpenCast.domain.event import playlist as PlaylistEvt
from OpenCast.domain.service.identity import IdentityService
from .util import MonitorControllerTestCase
class RootMonitorControllerTest(MonitorControllerTestCase):
def setUp(self):
super(RootMonitorControllerTest, self).setUp()
@unittest_run_loop
async def test_event_listening(self):
async with self.client.ws_connect("/api/events") as ws:
playlist_id = IdentityService.id_playlist()
cmd_id = IdentityService.id_command(PlaylistCmd.CreatePlaylist, playlist_id)
created_evt = PlaylistEvt.PlaylistCreated(
cmd_id,
playlist_id,
"name",
[],
False,
)
self.evt_dispatcher.dispatch(created_evt)
await self.expect_ws_events(ws, [created_evt])
update_cmd_id = IdentityService.id_command(
PlaylistCmd.UpdatePlaylistContent, playlist_id
)
updated_evt = PlaylistEvt.PlaylistContentUpdated(
update_cmd_id, playlist_id, []
)
self.evt_dispatcher.dispatch(created_evt)
self.evt_dispatcher.dispatch(updated_evt)
await self.expect_ws_events(ws, [created_evt, updated_evt])
@unittest_run_loop
async def test_ws_message(self):
async with self.client.ws_connect("/api/events") as ws:
self.infra_facade.player.play_time.return_value = 1000
await ws.send_str("play_time")
await self.expect_ws_events(
ws, [WSResponse(None, "play_time", {"play_time": 1000})]
)
|
the-stack_106_28076 | # Copyright 2020 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
KFServing
Python SDK for KFServing # noqa: E501
The version of the OpenAPI document: v0.1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kfserving.configuration import Configuration
class V1beta1PredictorExtensionSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'args': 'list[str]',
'command': 'list[str]',
'env': 'list[V1EnvVar]',
'env_from': 'list[V1EnvFromSource]',
'image': 'str',
'image_pull_policy': 'str',
'lifecycle': 'V1Lifecycle',
'liveness_probe': 'V1Probe',
'name': 'str',
'ports': 'list[V1ContainerPort]',
'protocol_version': 'str',
'readiness_probe': 'V1Probe',
'resources': 'V1ResourceRequirements',
'runtime_version': 'str',
'security_context': 'V1SecurityContext',
'startup_probe': 'V1Probe',
'stdin': 'bool',
'stdin_once': 'bool',
'storage_uri': 'str',
'termination_message_path': 'str',
'termination_message_policy': 'str',
'tty': 'bool',
'volume_devices': 'list[V1VolumeDevice]',
'volume_mounts': 'list[V1VolumeMount]',
'working_dir': 'str'
}
attribute_map = {
'args': 'args',
'command': 'command',
'env': 'env',
'env_from': 'envFrom',
'image': 'image',
'image_pull_policy': 'imagePullPolicy',
'lifecycle': 'lifecycle',
'liveness_probe': 'livenessProbe',
'name': 'name',
'ports': 'ports',
'protocol_version': 'protocolVersion',
'readiness_probe': 'readinessProbe',
'resources': 'resources',
'runtime_version': 'runtimeVersion',
'security_context': 'securityContext',
'startup_probe': 'startupProbe',
'stdin': 'stdin',
'stdin_once': 'stdinOnce',
'storage_uri': 'storageUri',
'termination_message_path': 'terminationMessagePath',
'termination_message_policy': 'terminationMessagePolicy',
'tty': 'tty',
'volume_devices': 'volumeDevices',
'volume_mounts': 'volumeMounts',
'working_dir': 'workingDir'
}
def __init__(self, args=None, command=None, env=None, env_from=None, image=None, image_pull_policy=None, lifecycle=None, liveness_probe=None, name=None, ports=None, protocol_version=None, readiness_probe=None, resources=None, runtime_version=None, security_context=None, startup_probe=None, stdin=None, stdin_once=None, storage_uri=None, termination_message_path=None, termination_message_policy=None, tty=None, volume_devices=None, volume_mounts=None, working_dir=None, local_vars_configuration=None): # noqa: E501
"""V1beta1PredictorExtensionSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._args = None
self._command = None
self._env = None
self._env_from = None
self._image = None
self._image_pull_policy = None
self._lifecycle = None
self._liveness_probe = None
self._name = None
self._ports = None
self._protocol_version = None
self._readiness_probe = None
self._resources = None
self._runtime_version = None
self._security_context = None
self._startup_probe = None
self._stdin = None
self._stdin_once = None
self._storage_uri = None
self._termination_message_path = None
self._termination_message_policy = None
self._tty = None
self._volume_devices = None
self._volume_mounts = None
self._working_dir = None
self.discriminator = None
if args is not None:
self.args = args
if command is not None:
self.command = command
if env is not None:
self.env = env
if env_from is not None:
self.env_from = env_from
if image is not None:
self.image = image
if image_pull_policy is not None:
self.image_pull_policy = image_pull_policy
if lifecycle is not None:
self.lifecycle = lifecycle
if liveness_probe is not None:
self.liveness_probe = liveness_probe
if name is not None:
self.name = name
if ports is not None:
self.ports = ports
if protocol_version is not None:
self.protocol_version = protocol_version
if readiness_probe is not None:
self.readiness_probe = readiness_probe
if resources is not None:
self.resources = resources
if runtime_version is not None:
self.runtime_version = runtime_version
if security_context is not None:
self.security_context = security_context
if startup_probe is not None:
self.startup_probe = startup_probe
if stdin is not None:
self.stdin = stdin
if stdin_once is not None:
self.stdin_once = stdin_once
if storage_uri is not None:
self.storage_uri = storage_uri
if termination_message_path is not None:
self.termination_message_path = termination_message_path
if termination_message_policy is not None:
self.termination_message_policy = termination_message_policy
if tty is not None:
self.tty = tty
if volume_devices is not None:
self.volume_devices = volume_devices
if volume_mounts is not None:
self.volume_mounts = volume_mounts
if working_dir is not None:
self.working_dir = working_dir
@property
def args(self):
"""Gets the args of this V1beta1PredictorExtensionSpec. # noqa: E501
Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa: E501
:return: The args of this V1beta1PredictorExtensionSpec. # noqa: E501
:rtype: list[str]
"""
return self._args
@args.setter
def args(self, args):
"""Sets the args of this V1beta1PredictorExtensionSpec.
Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa: E501
:param args: The args of this V1beta1PredictorExtensionSpec. # noqa: E501
:type: list[str]
"""
self._args = args
@property
def command(self):
"""Gets the command of this V1beta1PredictorExtensionSpec. # noqa: E501
Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa: E501
:return: The command of this V1beta1PredictorExtensionSpec. # noqa: E501
:rtype: list[str]
"""
return self._command
@command.setter
def command(self, command):
"""Sets the command of this V1beta1PredictorExtensionSpec.
Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa: E501
:param command: The command of this V1beta1PredictorExtensionSpec. # noqa: E501
:type: list[str]
"""
self._command = command
@property
def env(self):
"""Gets the env of this V1beta1PredictorExtensionSpec. # noqa: E501
List of environment variables to set in the container. Cannot be updated. # noqa: E501
:return: The env of this V1beta1PredictorExtensionSpec. # noqa: E501
:rtype: list[V1EnvVar]
"""
return self._env
@env.setter
def env(self, env):
"""Sets the env of this V1beta1PredictorExtensionSpec.
List of environment variables to set in the container. Cannot be updated. # noqa: E501
:param env: The env of this V1beta1PredictorExtensionSpec. # noqa: E501
:type: list[V1EnvVar]
"""
self._env = env
@property
def env_from(self):
"""Gets the env_from of this V1beta1PredictorExtensionSpec. # noqa: E501
List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. # noqa: E501
:return: The env_from of this V1beta1PredictorExtensionSpec. # noqa: E501
:rtype: list[V1EnvFromSource]
"""
return self._env_from
@env_from.setter
def env_from(self, env_from):
"""Sets the env_from of this V1beta1PredictorExtensionSpec.
List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. # noqa: E501
:param env_from: The env_from of this V1beta1PredictorExtensionSpec. # noqa: E501
:type: list[V1EnvFromSource]
"""
self._env_from = env_from
@property
def image(self):
"""Gets the image of this V1beta1PredictorExtensionSpec. # noqa: E501
Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. # noqa: E501
:return: The image of this V1beta1PredictorExtensionSpec. # noqa: E501
:rtype: str
"""
return self._image
@image.setter
def image(self, image):
"""Sets the image of this V1beta1PredictorExtensionSpec.
Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. # noqa: E501
:param image: The image of this V1beta1PredictorExtensionSpec. # noqa: E501
:type: str
"""
self._image = image
@property
def image_pull_policy(self):
"""Gets the image_pull_policy of this V1beta1PredictorExtensionSpec. # noqa: E501
Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images # noqa: E501
:return: The image_pull_policy of this V1beta1PredictorExtensionSpec. # noqa: E501
:rtype: str
"""
return self._image_pull_policy
@image_pull_policy.setter
def image_pull_policy(self, image_pull_policy):
"""Sets the image_pull_policy of this V1beta1PredictorExtensionSpec.
Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images # noqa: E501
:param image_pull_policy: The image_pull_policy of this V1beta1PredictorExtensionSpec. # noqa: E501
:type: str
"""
self._image_pull_policy = image_pull_policy
@property
def lifecycle(self):
"""Gets the lifecycle of this V1beta1PredictorExtensionSpec. # noqa: E501
:return: The lifecycle of this V1beta1PredictorExtensionSpec. # noqa: E501
:rtype: V1Lifecycle
"""
return self._lifecycle
@lifecycle.setter
def lifecycle(self, lifecycle):
"""Sets the lifecycle of this V1beta1PredictorExtensionSpec.
:param lifecycle: The lifecycle of this V1beta1PredictorExtensionSpec. # noqa: E501
:type: V1Lifecycle
"""
self._lifecycle = lifecycle
@property
def liveness_probe(self):
"""Gets the liveness_probe of this V1beta1PredictorExtensionSpec. # noqa: E501
:return: The liveness_probe of this V1beta1PredictorExtensionSpec. # noqa: E501
:rtype: V1Probe
"""
return self._liveness_probe
@liveness_probe.setter
def liveness_probe(self, liveness_probe):
"""Sets the liveness_probe of this V1beta1PredictorExtensionSpec.
:param liveness_probe: The liveness_probe of this V1beta1PredictorExtensionSpec. # noqa: E501
:type: V1Probe
"""
self._liveness_probe = liveness_probe
@property
def name(self):
"""Gets the name of this V1beta1PredictorExtensionSpec. # noqa: E501
Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. # noqa: E501
:return: The name of this V1beta1PredictorExtensionSpec. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1beta1PredictorExtensionSpec.
Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. # noqa: E501
:param name: The name of this V1beta1PredictorExtensionSpec. # noqa: E501
:type: str
"""
self._name = name
@property
def ports(self):
"""Gets the ports of this V1beta1PredictorExtensionSpec. # noqa: E501
List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated. # noqa: E501
:return: The ports of this V1beta1PredictorExtensionSpec. # noqa: E501
:rtype: list[V1ContainerPort]
"""
return self._ports
@ports.setter
def ports(self, ports):
"""Sets the ports of this V1beta1PredictorExtensionSpec.
List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated. # noqa: E501
:param ports: The ports of this V1beta1PredictorExtensionSpec. # noqa: E501
:type: list[V1ContainerPort]
"""
self._ports = ports
@property
def protocol_version(self):
"""Gets the protocol_version of this V1beta1PredictorExtensionSpec. # noqa: E501
Protocol version to use by the predictor (i.e. v1 or v2) # noqa: E501
:return: The protocol_version of this V1beta1PredictorExtensionSpec. # noqa: E501
:rtype: str
"""
return self._protocol_version
@protocol_version.setter
def protocol_version(self, protocol_version):
"""Sets the protocol_version of this V1beta1PredictorExtensionSpec.
Protocol version to use by the predictor (i.e. v1 or v2) # noqa: E501
:param protocol_version: The protocol_version of this V1beta1PredictorExtensionSpec. # noqa: E501
:type: str
"""
self._protocol_version = protocol_version
@property
def readiness_probe(self):
"""Gets the readiness_probe of this V1beta1PredictorExtensionSpec. # noqa: E501
:return: The readiness_probe of this V1beta1PredictorExtensionSpec. # noqa: E501
:rtype: V1Probe
"""
return self._readiness_probe
@readiness_probe.setter
def readiness_probe(self, readiness_probe):
"""Sets the readiness_probe of this V1beta1PredictorExtensionSpec.
:param readiness_probe: The readiness_probe of this V1beta1PredictorExtensionSpec. # noqa: E501
:type: V1Probe
"""
self._readiness_probe = readiness_probe
@property
def resources(self):
"""Gets the resources of this V1beta1PredictorExtensionSpec. # noqa: E501
:return: The resources of this V1beta1PredictorExtensionSpec. # noqa: E501
:rtype: V1ResourceRequirements
"""
return self._resources
@resources.setter
def resources(self, resources):
"""Sets the resources of this V1beta1PredictorExtensionSpec.
:param resources: The resources of this V1beta1PredictorExtensionSpec. # noqa: E501
:type: V1ResourceRequirements
"""
self._resources = resources
@property
def runtime_version(self):
"""Gets the runtime_version of this V1beta1PredictorExtensionSpec. # noqa: E501
Runtime version of the predictor docker image # noqa: E501
:return: The runtime_version of this V1beta1PredictorExtensionSpec. # noqa: E501
:rtype: str
"""
return self._runtime_version
@runtime_version.setter
def runtime_version(self, runtime_version):
"""Sets the runtime_version of this V1beta1PredictorExtensionSpec.
Runtime version of the predictor docker image # noqa: E501
:param runtime_version: The runtime_version of this V1beta1PredictorExtensionSpec. # noqa: E501
:type: str
"""
self._runtime_version = runtime_version
@property
def security_context(self):
"""Gets the security_context of this V1beta1PredictorExtensionSpec. # noqa: E501
:return: The security_context of this V1beta1PredictorExtensionSpec. # noqa: E501
:rtype: V1SecurityContext
"""
return self._security_context
@security_context.setter
def security_context(self, security_context):
"""Sets the security_context of this V1beta1PredictorExtensionSpec.
:param security_context: The security_context of this V1beta1PredictorExtensionSpec. # noqa: E501
:type: V1SecurityContext
"""
self._security_context = security_context
@property
def startup_probe(self):
"""Gets the startup_probe of this V1beta1PredictorExtensionSpec. # noqa: E501
:return: The startup_probe of this V1beta1PredictorExtensionSpec. # noqa: E501
:rtype: V1Probe
"""
return self._startup_probe
@startup_probe.setter
def startup_probe(self, startup_probe):
"""Sets the startup_probe of this V1beta1PredictorExtensionSpec.
:param startup_probe: The startup_probe of this V1beta1PredictorExtensionSpec. # noqa: E501
:type: V1Probe
"""
self._startup_probe = startup_probe
@property
def stdin(self):
"""Gets the stdin of this V1beta1PredictorExtensionSpec. # noqa: E501
Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. # noqa: E501
:return: The stdin of this V1beta1PredictorExtensionSpec. # noqa: E501
:rtype: bool
"""
return self._stdin
@stdin.setter
def stdin(self, stdin):
"""Sets the stdin of this V1beta1PredictorExtensionSpec.
Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. # noqa: E501
:param stdin: The stdin of this V1beta1PredictorExtensionSpec. # noqa: E501
:type: bool
"""
self._stdin = stdin
@property
def stdin_once(self):
"""Gets the stdin_once of this V1beta1PredictorExtensionSpec. # noqa: E501
Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false # noqa: E501
:return: The stdin_once of this V1beta1PredictorExtensionSpec. # noqa: E501
:rtype: bool
"""
return self._stdin_once
@stdin_once.setter
def stdin_once(self, stdin_once):
"""Sets the stdin_once of this V1beta1PredictorExtensionSpec.
Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false # noqa: E501
:param stdin_once: The stdin_once of this V1beta1PredictorExtensionSpec. # noqa: E501
:type: bool
"""
self._stdin_once = stdin_once
@property
def storage_uri(self):
"""Gets the storage_uri of this V1beta1PredictorExtensionSpec. # noqa: E501
This field points to the location of the trained model which is mounted onto the pod. # noqa: E501
:return: The storage_uri of this V1beta1PredictorExtensionSpec. # noqa: E501
:rtype: str
"""
return self._storage_uri
@storage_uri.setter
def storage_uri(self, storage_uri):
"""Sets the storage_uri of this V1beta1PredictorExtensionSpec.
This field points to the location of the trained model which is mounted onto the pod. # noqa: E501
:param storage_uri: The storage_uri of this V1beta1PredictorExtensionSpec. # noqa: E501
:type: str
"""
self._storage_uri = storage_uri
@property
def termination_message_path(self):
"""Gets the termination_message_path of this V1beta1PredictorExtensionSpec. # noqa: E501
Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. # noqa: E501
:return: The termination_message_path of this V1beta1PredictorExtensionSpec. # noqa: E501
:rtype: str
"""
return self._termination_message_path
@termination_message_path.setter
def termination_message_path(self, termination_message_path):
"""Sets the termination_message_path of this V1beta1PredictorExtensionSpec.
Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. # noqa: E501
:param termination_message_path: The termination_message_path of this V1beta1PredictorExtensionSpec. # noqa: E501
:type: str
"""
self._termination_message_path = termination_message_path
@property
def termination_message_policy(self):
"""Gets the termination_message_policy of this V1beta1PredictorExtensionSpec. # noqa: E501
Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. # noqa: E501
:return: The termination_message_policy of this V1beta1PredictorExtensionSpec. # noqa: E501
:rtype: str
"""
return self._termination_message_policy
@termination_message_policy.setter
def termination_message_policy(self, termination_message_policy):
"""Sets the termination_message_policy of this V1beta1PredictorExtensionSpec.
Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. # noqa: E501
:param termination_message_policy: The termination_message_policy of this V1beta1PredictorExtensionSpec. # noqa: E501
:type: str
"""
self._termination_message_policy = termination_message_policy
@property
def tty(self):
"""Gets the tty of this V1beta1PredictorExtensionSpec. # noqa: E501
Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. # noqa: E501
:return: The tty of this V1beta1PredictorExtensionSpec. # noqa: E501
:rtype: bool
"""
return self._tty
@tty.setter
def tty(self, tty):
"""Sets the tty of this V1beta1PredictorExtensionSpec.
Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. # noqa: E501
:param tty: The tty of this V1beta1PredictorExtensionSpec. # noqa: E501
:type: bool
"""
self._tty = tty
@property
def volume_devices(self):
"""Gets the volume_devices of this V1beta1PredictorExtensionSpec. # noqa: E501
volumeDevices is the list of block devices to be used by the container. # noqa: E501
:return: The volume_devices of this V1beta1PredictorExtensionSpec. # noqa: E501
:rtype: list[V1VolumeDevice]
"""
return self._volume_devices
@volume_devices.setter
def volume_devices(self, volume_devices):
"""Sets the volume_devices of this V1beta1PredictorExtensionSpec.
volumeDevices is the list of block devices to be used by the container. # noqa: E501
:param volume_devices: The volume_devices of this V1beta1PredictorExtensionSpec. # noqa: E501
:type: list[V1VolumeDevice]
"""
self._volume_devices = volume_devices
@property
def volume_mounts(self):
"""Gets the volume_mounts of this V1beta1PredictorExtensionSpec. # noqa: E501
Pod volumes to mount into the container's filesystem. Cannot be updated. # noqa: E501
:return: The volume_mounts of this V1beta1PredictorExtensionSpec. # noqa: E501
:rtype: list[V1VolumeMount]
"""
return self._volume_mounts
@volume_mounts.setter
def volume_mounts(self, volume_mounts):
"""Sets the volume_mounts of this V1beta1PredictorExtensionSpec.
Pod volumes to mount into the container's filesystem. Cannot be updated. # noqa: E501
:param volume_mounts: The volume_mounts of this V1beta1PredictorExtensionSpec. # noqa: E501
:type: list[V1VolumeMount]
"""
self._volume_mounts = volume_mounts
@property
def working_dir(self):
"""Gets the working_dir of this V1beta1PredictorExtensionSpec. # noqa: E501
Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. # noqa: E501
:return: The working_dir of this V1beta1PredictorExtensionSpec. # noqa: E501
:rtype: str
"""
return self._working_dir
@working_dir.setter
def working_dir(self, working_dir):
"""Sets the working_dir of this V1beta1PredictorExtensionSpec.
Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. # noqa: E501
:param working_dir: The working_dir of this V1beta1PredictorExtensionSpec. # noqa: E501
:type: str
"""
self._working_dir = working_dir
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1PredictorExtensionSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1PredictorExtensionSpec):
return True
return self.to_dict() != other.to_dict()
|
the-stack_106_28077 | import unittest
import rocksdbpy
import shutil
import tempfile
from rocksdbpy import WriteBatch
class TestIterator(unittest.TestCase):
def setUp(self):
self.temp = tempfile.mkdtemp()
wb = WriteBatch()
# add couple of keys and values
wb.add(b'test_add_1', b'test_value')
wb.add(b'test_add_2', b'test_value')
wb.add(b'test_add_3', b'test_value')
self.db = rocksdbpy.open_default(self.temp)
self.db.write(wb)
def tearDown(self):
self.db.close()
shutil.rmtree(self.temp)
def test_simple(self):
# get iterator in default mode which is forward
itr = self.db.iterator()
i = 1
for k, v in itr:
self.assertEqual(b'test_value', v)
self.assertEqual(f'test_add_{i}'.encode('ascii'), k)
i += 1
def test_end(self):
# get iterator in end mode which is reverse
itr = self.db.iterator(mode='end')
i = 3
for k, v in itr:
self.assertEqual(b'test_value', v)
self.assertEqual(f'test_add_{i}'.encode('ascii'), k)
i -= 1
def test_from(self):
# get iterator in from mode which is skips some keys
itr = self.db.iterator(mode='from', key=b'test_add_2')
i = 2
for k, v in itr:
self.assertEqual(b'test_value', v)
self.assertEqual(f'test_add_{i}'.encode('ascii'), k)
i += 1
def test_from_reverse(self):
# get iterator in from mode which is skips some keys and reverse
itr = self.db.iterator(mode='from', key=b'test_add_2', direction=-1)
i = 2
for k, v in itr:
self.assertEqual(b'test_value', v)
self.assertEqual(f'test_add_{i}'.encode('ascii'), k)
i -= 1
def test_count(self):
# get random iterator
itr = self.db.iterator(mode='from', direction=-1)
self.assertEqual(3, itr.len())
def test_valid(self):
# get random iterator
itr = self.db.iterator(mode='from', direction=-1)
self.assertTrue(itr.valid())
|
the-stack_106_28079 | import torch
from torch import nn
from transformers import AutoModel
class JointXLMR(nn.Module):
def __init__(self, model_config, device, slot_dim, intent_dim, intent_weight=None):
super(JointXLMR, self).__init__()
self.slot_num_labels = slot_dim
self.intent_num_labels = intent_dim
self.device = device
self.intent_weight = intent_weight if intent_weight is not None else torch.tensor([1.]*intent_dim)
print(model_config['pretrained_weights'])
self.bert = AutoModel.from_pretrained(model_config['pretrained_weights'])
self.dropout = nn.Dropout(model_config['dropout'])
self.context = model_config['context']
self.finetune = model_config['finetune']
self.context_grad = model_config['context_grad']
self.hidden_units = model_config['hidden_units']
if self.hidden_units > 0:
if self.context:
self.intent_classifier = nn.Linear(self.hidden_units, self.intent_num_labels)
self.slot_classifier = nn.Linear(self.hidden_units, self.slot_num_labels)
self.intent_hidden = nn.Linear(2 * self.bert.config.hidden_size, self.hidden_units)
self.slot_hidden = nn.Linear(2 * self.bert.config.hidden_size, self.hidden_units)
else:
self.intent_classifier = nn.Linear(self.hidden_units, self.intent_num_labels)
self.slot_classifier = nn.Linear(self.hidden_units, self.slot_num_labels)
self.intent_hidden = nn.Linear(self.bert.config.hidden_size, self.hidden_units)
self.slot_hidden = nn.Linear(self.bert.config.hidden_size, self.hidden_units)
nn.init.xavier_uniform_(self.intent_hidden.weight)
nn.init.xavier_uniform_(self.slot_hidden.weight)
else:
if self.context:
self.intent_classifier = nn.Linear(2 * self.bert.config.hidden_size, self.intent_num_labels)
self.slot_classifier = nn.Linear(2 * self.bert.config.hidden_size, self.slot_num_labels)
else:
self.intent_classifier = nn.Linear(self.bert.config.hidden_size, self.intent_num_labels)
self.slot_classifier = nn.Linear(self.bert.config.hidden_size, self.slot_num_labels)
nn.init.xavier_uniform_(self.intent_classifier.weight)
nn.init.xavier_uniform_(self.slot_classifier.weight)
self.intent_loss_fct = torch.nn.BCEWithLogitsLoss(pos_weight=self.intent_weight)
self.slot_loss_fct = torch.nn.CrossEntropyLoss()
def forward(self, word_seq_tensor, word_mask_tensor, tag_seq_tensor=None, tag_mask_tensor=None,
intent_tensor=None, context_seq_tensor=None, context_mask_tensor=None):
if not self.finetune:
self.bert.eval()
with torch.no_grad():
outputs = self.bert(input_ids=word_seq_tensor,
attention_mask=word_mask_tensor)
else:
outputs = self.bert(input_ids=word_seq_tensor,
attention_mask=word_mask_tensor)
sequence_output = outputs[0]
pooled_output = outputs[1]
if self.context and (context_seq_tensor is not None):
if not self.finetune or not self.context_grad:
with torch.no_grad():
context_output = self.bert(input_ids=context_seq_tensor, attention_mask=context_mask_tensor)[1]
else:
context_output = self.bert(input_ids=context_seq_tensor, attention_mask=context_mask_tensor)[1]
sequence_output = torch.cat(
[context_output.unsqueeze(1).repeat(1, sequence_output.size(1), 1),
sequence_output], dim=-1)
pooled_output = torch.cat([context_output, pooled_output], dim=-1)
if self.hidden_units > 0:
sequence_output = nn.functional.relu(self.slot_hidden(self.dropout(sequence_output)))
pooled_output = nn.functional.relu(self.intent_hidden(self.dropout(pooled_output)))
sequence_output = self.dropout(sequence_output)
slot_logits = self.slot_classifier(sequence_output)
outputs = (slot_logits,)
pooled_output = self.dropout(pooled_output)
intent_logits = self.intent_classifier(pooled_output)
outputs = outputs + (intent_logits,)
if tag_seq_tensor is not None:
active_tag_loss = tag_mask_tensor.view(-1) == 1
active_tag_logits = slot_logits.view(-1, self.slot_num_labels)[active_tag_loss]
active_tag_labels = tag_seq_tensor.view(-1)[active_tag_loss]
slot_loss = self.slot_loss_fct(active_tag_logits, active_tag_labels)
outputs = outputs + (slot_loss,)
if intent_tensor is not None:
intent_loss = self.intent_loss_fct(intent_logits, intent_tensor)
outputs = outputs + (intent_loss,)
return outputs # slot_logits, intent_logits, (slot_loss), (intent_loss),
|
the-stack_106_28080 | # https://realpython.com/beautiful-soup-web-scraper-python/
# Why i didnt stumble open this website a lot more earlier arg! - need to review this subject for sure!
from splinter import Browser
from selenium import webdriver
from bs4 import BeautifulSoup as bs
import pandas as pd
import requests
import time
from flask import Flask, render_template
from flask_pymongo import PyMongo
# transfer most of the code that you had written in ipynb to here. Nonetheless the code didn't work as i expected.
def scrape_urls():
executable_path = {"executable_path":"/usr/local/bin/chromedriver"}
browser = Browser("chrome", **executable_path, headless=False)
### Nasa Mars news - website 1 scrapped
# Url for Nasa Mars News
nasa_url = "https://mars.nasa.gov/news/"
browser.visit(nasa_url)
#Scrape page into Soup Object
html = browser.html
# parse HTML with BS
soup = bs(html, 'html.parser')
browser.is_element_present_by_css("li.slide", wait_time=2)
# search article for title and paragraph
article = soup.select_one("li.slide div.list_text")
## why this is not working when it works in ipynb??
title = article.find("div", class_="content_title").get_text()
paragraph = article.find("div", class_="article_teaser_body").get_text()
### JPL Mars Space Image - Featured
#URL for JPL Mars Space Images
jpl_mars_images = "https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
browser.visit(jpl_mars_images)
#pause to make sure the webpage to load first.
time.sleep(3)
#retrieve background-image url -- use .click method to go from one page to next page
#https://splinter.readthedocs.io/en/latest/finding.html <- use to click from this one page to next documentation
featured_image = browser.links.find_by_partial_text('FULL IMAGE')
featured_image.click()
time.sleep(3)
featured_image = browser.links.find_by_partial_text('more info')
featured_image.click()
time.sleep(3)
# after clicking and reach the right page, use Soup to scrape page into Soup object
html = browser.html
soup = bs(html, 'html.parser')
image_url = soup.select_one('figure.lede a img').get('src')
website_url = "http://www.jpl.nasa.gov"
featured_image_url = f"{website_url}{image_url}"
###Mars Weather
#Mars Facts
#Visit Space facts website - using pd.read_html that i found online and see from people's example
mars_fact_df = pd.read_html("https://space-facts.com/mars/")
mars_fact_df = mars_fact_df[0]
mars_fact_df = mars_fact_df.rename(columns = { 0 : "Description", 1 : "Values"})
mars_fact_df
html_table = mars_fact_df ##.to_html()
###Mars Hemispheres
# Visit hemispheres website through splinter module
hemispheres_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(hemispheres_url)
links = browser.find_by_css("a.product-item h3")
hemisphere_image_urls = []
for i in range(len(links)):
hemisphere = {}
browser.find_by_css("a.product-item h3")[i].click()
time.sleep(3)
sample_elem = browser.find_link_by_text('Sample').first
hemisphere['title'] = browser.find_by_css("h2.title").text
hemisphere['img_url'] = sample_elem['href']
hemisphere_image_urls.append(hemisphere)
browser.back()
browser.quit()
## create a function to call all of our return variable
#def callfuncs():
##title, paragraph, featured_image_url, html_table, hemisphere_image_urls = scrape_urls()
## create a dictionary to store all of our scraped variables
mars_data = {
"news_title": title,
"news_paragraph": paragraph,
"featured_image": featured_image_url,
## uncoment when you figure out the code "mars_weather": mars_weather,
"description": html_table,
"hemispheres": hemisphere_image_urls
}
print(mars_data)
return mars_data
if __name__ == "__main__":
scrape_urls() |
the-stack_106_28081 | #
# MIT License
#
# (C) Copyright 2020-2022 Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
'''
The purpose of this package is to bootstrap a trust relationship to the
service half of the cfs-trust domain. In order for trust to be established, a
node must wait for the cluster metadata values to be populated. Then, this
service injects those appropriate values into the local environment (via a file),
injects references to those values into the local SSHD configuration, and then
conditionally reloads the running instance of SSHD to pick up the new configuration.
Created on Nov 2, 2020
@author: jsl
'''
import logging
import sys
import os
import time
from requests.exceptions import RequestException, ConnectionError
from json.decoder import JSONDecodeError
from cfsssh.cloudinit.bss import get_global_metadata_key, BSSException
from cfsssh.setup.client.values import CERTIFICATE_PATH
from cfsssh.sshd import SSHD_CONFIG_PATH, reload
from cfsssh.setup.service.values import VAULT_GLOBAL_KEY
MAX_SLEEP_TIME = 16
LOG_LEVEL = logging.DEBUG
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(LOG_LEVEL)
lh = logging.StreamHandler(sys.stdout)
lh.setLevel(LOG_LEVEL)
LOGGER.addHandler(lh)
def write_certificate():
"""
We want to be able to write the key to our known location of CERTIFICATE_PATH,
but it may not exist yet. When it doesn't exist, we get a key error. Keep trying.
"""
raw_certificate = None
sleep_time = 0
max_sleep = 10
while not raw_certificate:
if sleep_time >= max_sleep:
sleep_time = max_sleep
try:
raw_certificate = get_global_metadata_key(VAULT_GLOBAL_KEY)
except (BSSException, KeyError, RequestException, ConnectionError, JSONDecodeError):
LOGGER.info("Waiting for metadata service certificate.")
if sleep_time != MAX_SLEEP_TIME:
sleep_time += 1
time.sleep(sleep_time)
LOGGER.info("Obtained certificate from metadata service.")
with open(CERTIFICATE_PATH, 'w') as certificate_file:
certificate_file.write(raw_certificate)
LOGGER.info("Wrote vault certificate to '%s'.",CERTIFICATE_PATH)
def conditionally_write_certificate():
"""
Checks for the certificate on the local root filesystem; if its
there, it does nothing. Otherwise, the key is written to disk.
"""
if os.path.exists(CERTIFICATE_PATH):
LOGGER.info("Local certificate exists; skipping.")
return
write_certificate()
def configure_sshd():
"""
The SSHD service may be running, or it may not be. In either case,
if we end up configuring the service, we need to reload it. This function
injects necessary values into sshd and then conditionally reloads it.
"""
expected_entry = 'TrustedUserCAKeys %s' % (CERTIFICATE_PATH)
with open(SSHD_CONFIG_PATH, 'r') as sshd_config_file:
sshd_configuration = sshd_config_file.read()
if expected_entry not in sshd_configuration:
LOGGER.info("'%s' requires bootstrapping; injecting values.", CERTIFICATE_PATH)
ssh_configuration = '%s\n\n%s' %(sshd_configuration, expected_entry)
with open(SSHD_CONFIG_PATH, 'w') as sshd_config_file:
sshd_config_file.write(ssh_configuration)
else:
LOGGER.info("No change in sshd configuration indicated; correct values already exist.")
reload()
# There could be a race condition here where our PID snapshot does
# not capture sshd. The window is small but theoretically possible.
# We simply reload again to cover it.
reload()
LOGGER.info("SSHD now trusts cfstrust certificates.")
def main():
LOGGER.info("CFS Trust Bootstrapping Setup started.")
conditionally_write_certificate()
configure_sshd()
LOGGER.info("CFS Trust Bootstrapping Setup complete.")
|
the-stack_106_28084 | import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
requires = ['click', 'google-api-python-client', 'oauth2client']
tests_requires = ['pytest', 'pytest-cache', 'pytest-cov']
lint_requires = ['flake8', 'black']
dev_requires = requires + tests_requires + lint_requires
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
setup(
name="google-utility-cli",
version='0.0.0',
description="Simple utility to access google api",
long_description="\n\n".join([open("README.rst").read()]),
license='MIT',
author="Hong Nguyen",
author_email="[email protected]",
url="https://google-utility-cli.readthedocs.org",
packages=find_packages(),
install_requires=requires,
entry_points={'console_scripts': [
'li-google-utility = google_utility.cli:main']},
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: Implementation :: CPython'],
extras_require={
'test': tests_requires,
'dev': dev_requires,
'lint': lint_requires,
},
cmdclass={'test': PyTest})
|
the-stack_106_28085 | """
Tiered shipping models
"""
from __future__ import unicode_literals
import datetime
import logging
import operator
from six.moves import reduce
from six import python_2_unicode_compatible
from decimal import Decimal
from django.conf import settings
from django.db import models
from django.utils.translation import get_language, ugettext_lazy as _
from shipping.modules.base import BaseShipper
from livesettings.functions import config_value
log = logging.getLogger('shipping.Tiered')
class TieredPriceException(Exception):
def __init__(self, reason):
self.reason = reason
class Shipper(BaseShipper):
def __init__(self, carrier):
self.id = carrier.key
self.carrier = carrier
super(BaseShipper, self).__init__()
def __str__(self):
"""
This is mainly helpful for debugging purposes
"""
return "Tiered_Shipper: %s" % self.id
def description(self):
"""
A basic description that will be displayed to the user when selecting their shipping options
"""
return self.carrier.description
def cost(self):
"""
Complex calculations can be done here as long as the return value is a dollar figure
"""
assert(self._calculated)
if config_value('SHIPPING_TIERED', 'MIN_PRICE_FOR') == 'NOT_DISCOUNTABLE':
total = self.cart.undiscounted_total
else:
total = Decimal("0.00")
for cartitem in self.cart.cartitem_set.all():
if cartitem.product.is_shippable:
total += cartitem.line_total
return self.carrier.price(total)
def method(self):
"""
Describes the actual delivery service (Mail, FedEx, DHL, UPS, etc)
"""
return self.carrier.method
def expectedDelivery(self):
"""
Can be a plain string or complex calcuation returning an actual date
"""
return self.carrier.delivery
def valid(self, order=None):
"""
Can do complex validation about whether or not this option is valid.
For example, may check to see if the recipient is in an allowed country
or location.
"""
if order:
if config_value('SHIPPING_TIERED', 'MIN_PRICE_FOR') == 'NOT_DISCOUNTABLE':
sub_total = order.sub_total
else:
itemprices = [item.sub_total for item in order.orderitem_set.all() if item.product.is_shippable]
if itemprices:
sub_total = reduce(operator.add, itemprices)
else:
sub_total = Decimal('0.00')
try:
price = self.carrier.price(sub_total)
except TieredPriceException:
return False
elif self.cart:
try:
price = self.cost()
except TieredPriceException:
return False
return True
@python_2_unicode_compatible
class Carrier(models.Model):
key = models.SlugField(_('Key'))
ordering = models.IntegerField(_('Ordering'), default=0)
active = models.BooleanField(_('Active'), default=True)
def _find_translation(self, language_code=None):
if not language_code:
language_code = get_language()
c = self.translations.filter(languagecode__exact=language_code)
ct = c.count()
if not c or ct == 0:
pos = language_code.find('-')
if pos > -1:
short_code = language_code[:pos]
log.debug("Carrier: Trying to find root language content for: [%s]", short_code)
c = self.translations.filter(languagecode__exact=short_code)
ct = c.count()
if ct > 0:
log.debug("Carrier: Found root language content for: [%s]", short_code)
if not c or ct == 0:
# log.debug("Trying to find default language content for: %s", self)
c = self.translations.filter(languagecode__istartswith=settings.LANGUAGE_CODE)
ct = c.count()
if not c or ct == 0:
# log.debug("Trying to find *any* language content for: %s", self)
c = self.translations.all()
ct = c.count()
if ct > 0:
trans = c[0]
else:
trans = None
return trans
def delivery(self):
"""Get the delivery, looking up by language code, falling back intelligently.
"""
trans = self._find_translation()
if trans:
return trans.delivery
else:
return ""
delivery = property(delivery)
def description(self):
"""Get the description, looking up by language code, falling back intelligently.
"""
trans = self._find_translation()
if trans:
return trans.description
else:
return ""
description = property(description)
def method(self):
"""Get the description, looking up by language code, falling back intelligently.
"""
trans = self._find_translation()
if trans:
return trans.method
else:
return ""
method = property(method)
def name(self):
"""Get the name, looking up by language code, falling back intelligently.
"""
trans = self._find_translation()
if trans:
return trans.name
else:
return ""
name = property(name)
def price(self, total):
"""Get a price for this total."""
if total == 0:
total = Decimal('0.00') # total was "0E-8", which breaks mysql
# first check for special discounts
prices = self.tiers.filter(expires__isnull=False, min_total__lte=total).exclude(expires__lt=datetime.date.today())
if not prices.count() > 0:
prices = self.tiers.filter(expires__isnull=True, min_total__lte=total)
if prices.count() > 0:
# Get the price with the quantity closest to the one specified without going over
return Decimal(prices.order_by('-min_total')[0].price)
else:
log.debug("No tiered price found for %s: total=%s", self, total)
raise TieredPriceException('No price available')
def __str__(self):
return "Carrier: %s" % self.name
class Meta:
ordering = ('ordering',)
class CarrierTranslation(models.Model):
carrier = models.ForeignKey('Carrier', related_name='translations', on_delete=models.CASCADE)
languagecode = models.CharField(_('language'), max_length=10, choices=settings.LANGUAGES, )
name = models.CharField(_('Carrier'), max_length=50, )
description = models.CharField(_('Description'), max_length=200)
method = models.CharField(_('Method'), help_text=_("i.e. US Mail"), max_length=200)
delivery = models.CharField(_('Delivery Days'), max_length=200)
class Meta:
ordering = ('languagecode', 'name')
@python_2_unicode_compatible
class ShippingTier(models.Model):
carrier = models.ForeignKey('Carrier', related_name='tiers', on_delete=models.CASCADE)
min_total = models.DecimalField(_("Min Price"), max_digits=10, decimal_places=2,
help_text=_('The minimum price for this tier to apply'))
price = models.DecimalField(_("Shipping Price"), max_digits=10, decimal_places=2, )
expires = models.DateField(_("Expires"), null=True, blank=True)
def __str__(self):
return "ShippingTier: %s @ %s" % (self.price, self.min_total)
class Meta:
ordering = ('carrier', 'price')
from . import config
|
the-stack_106_28086 | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import cirq
def test_gate_calls_validate():
class ValiGate(cirq.Gate):
def validate_args(self, qubits):
if len(qubits) == 3:
raise ValueError()
g = ValiGate()
q00 = cirq.NamedQubit('q00')
q01 = cirq.NamedQubit('q01')
q10 = cirq.NamedQubit('q10')
_ = g.on(q00)
_ = g.on(q01)
_ = g.on(q00, q10)
with pytest.raises(ValueError):
_ = g.on(q00, q10, q01)
_ = g(q00)
_ = g(q00, q10)
with pytest.raises(ValueError):
_ = g(q10, q01, q00)
|
the-stack_106_28087 | #!/usr/bin/env python3
import json
from gimme.api import GimmeRequest, GimmeCall
from gimme.auth import GimmeAuth
class Gimme(object):
def __init__(self, auth_token):
self.auth_token = auth_token
def request(self, check_sucsess=True, **kwargs):
req = GimmeRequest(self.auth_token, **kwargs)
return req
def follow_user(self, username):
kwargs = {'cmd': 'set_favorite_celebrity_insert',
'celebrityCode': username}
return self.request(**kwargs)
def unfollow_user(self, username):
kwargs = {'cmd': 'set_favorite_celebrity_delete',
'celebrityCode': username}
return self.request(**kwargs)
def timeline(self, username=False, type=0, max_timestamp=False):
kwargs = {}
if username:
kwargs['cmd'] = 'get_celebrity_message_list_for_fan'
kwargs['code'] = username
else:
kwargs['cmd'] = 'get_celebrity_timeline_list'
kwargs['type'] = str(type)
if max_timestamp:
kwargs['createdAt'] = max_timestamp
response = self.request(**kwargs)
feed = response['list']
return feed
def itimeline(self, username=False, type=0, max_timestamp=False):
"""Like timeline(), except that it returns an generator and yields
messages until it hits an empty feed."""
max_timestamp = max_timestamp
while True:
feed = self.timeline(type=type,
username=username,
max_timestamp=max_timestamp)
if len(feed) == 0:
break
yield feed
max_timestamp = feed[-1]['createdAt']
@property
def user_info(self):
kwargs = {'cmd': 'get_my_profile'}
response = self.request(**kwargs)
return response
@property
def follows(self):
kwargs = {'cmd': 'get_favorite_celebrity_list'}
return self.request(**kwargs)
@property
def celebrities(self):
kwargs = {'cmd': 'get_celebrity_list'}
return self.request(**kwargs)
if __name__ == "__main__":
pass
|
the-stack_106_28089 | import argparse
import json
import logging
import sys
import random as rand
import numpy as np
import experiments
from experiments import plotting
from datetime import datetime
from data import loader
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def run_experiment(experiment_details, experiment, timing_key, dim, skiprerun, verbose, timings):
t = datetime.now()
for details in experiment_details:
exp = experiment(details, verbose=verbose)
if not skiprerun:
logger.info("Running {} experiment: {} ({})".format(timing_key, details.ds_readable_name, dim))
logger.info(" Details: {}".format(details))
exp.perform()
if dim is not None:
logger.info("Running with dimension {}".format(dim))
if skiprerun:
logger.info(" Details: {}".format(details))
exp.perform_cluster(dim)
t_d = datetime.now() - t
timings[timing_key] = t_d.seconds
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Perform some UL and DR')
parser.add_argument('--threads', type=int, default=1, help='Number of threads (defaults to 1, -1 for auto)')
parser.add_argument('--seed', type=int, help='A random seed to set, if desired')
parser.add_argument('--dim', type=int, help='The dim parameter to use for clustering with a specific experiment '
'(This MUST be used with a specific experiment)')
parser.add_argument('--skiprerun', action='store_true',
help='If true, do not re-run the main experiment before clustering '
'(This MUST be used with --dim and a specific experiment)')
parser.add_argument('--dataset1', action='store_true', help='Run only data set 1')
parser.add_argument('--dataset2', action='store_true', help='Run only data set 2')
parser.add_argument('--benchmark', action='store_true', help='Run the benchmark experiments')
parser.add_argument('--ica', action='store_true', help='Run the ICA experiments')
parser.add_argument('--pca', action='store_true', help='Run the PCA experiments')
parser.add_argument('--lda', action='store_true', help='Run the LDA experiments')
parser.add_argument('--svd', action='store_true', help='Run the SVD experiments')
parser.add_argument('--rf', action='store_true', help='Run the RF experiments')
parser.add_argument('--rp', action='store_true', help='Run the RP experiments')
parser.add_argument('--all', action='store_true', help='Run all experiments')
parser.add_argument('--plot', action='store_true', help='Plot data results')
parser.add_argument('--verbose', action='store_true', help='If true, provide verbose output')
args = parser.parse_args()
verbose = args.verbose
threads = args.threads
if args.dim or args.skiprerun:
if not args.ica and not args.pca and not args.rf and not args.rp and not args.lda and not args.svd:
logger.error("Cannot specify dimension/skiprerun without specifying a specific experiment")
parser.print_help()
sys.exit(1)
if args.skiprerun and not args.dim:
logger.error("Cannot specify skiprerun without specifying a specific experiment")
parser.print_help()
sys.exit(1)
if args.dataset1 and args.dataset2:
logger.error("Can only specify one of '--dataset1' or '--dataset2', not both")
parser.print_help()
sys.exit(1)
seed = args.seed
if seed is None:
seed = np.random.randint(0, (2 ** 32) - 1)
logger.info("Using seed {}".format(seed))
np.random.seed(seed)
rand.seed(seed)
logger.info("Loading data")
logger.info("----------")
datasets = []
dataset1_details = {
'data': loader.SeizureRecognitionData(verbose=verbose, seed=seed),
'name': 'seizure_recognition',
'readable_name': 'Seizure Recognition',
'best_nn_params': {'NN__activation': ['logistic'], 'NN__alpha': [0.01],
'NN__hidden_layer_sizes': [(89, 89, 89)], 'NN__learning_rate_init': [0.008]}
}
dataset2_details = {
'data': loader.CreditDefaultData(verbose=verbose, seed=seed),
'name': 'credit_default',
'readable_name': 'Credit Default',
'best_nn_params': {'NN__activation': ['relu'], 'NN__alpha': [0.01],
'NN__hidden_layer_sizes': [(11, 11, 11)], 'NN__learning_rate_init': [0.128]}
}
if args.dataset1:
datasets.append(dataset1_details)
elif args.dataset2:
datasets.append(dataset2_details)
elif not args.dataset1 and not args.dataset2:
datasets.append(dataset1_details)
datasets.append(dataset2_details)
experiment_details = []
for ds in datasets:
data = ds['data']
data.load_and_process()
data.build_train_test_split()
data.scale_standard()
experiment_details.append(experiments.ExperimentDetails(
data, ds['name'], ds['readable_name'], ds['best_nn_params'],
threads=threads,
seed=seed
))
if args.all or args.benchmark or args.ica or args.pca or args.lda or args.svd or args.rf or args.rp:
if verbose:
logger.info("----------")
logger.info("Running experiments")
timings = {}
if args.benchmark or args.all:
run_experiment(experiment_details, experiments.BenchmarkExperiment, 'Benchmark', args.dim, args.skiprerun,
verbose, timings)
if args.ica or args.all:
run_experiment(experiment_details, experiments.ICAExperiment, 'ICA', args.dim, args.skiprerun,
verbose, timings)
if args.pca or args.all:
run_experiment(experiment_details, experiments.PCAExperiment, 'PCA', args.dim, args.skiprerun,
verbose, timings)
# NOTE: These were experimented with but ultimately were not used for this assignment.
# if args.lda or args.all:
# run_experiment(experiment_details, experiments.LDAExperiment, 'LDA', args.dim, args.skiprerun,
# verbose, timings)
# if args.svd or args.all:
# run_experiment(experiment_details, experiments.SVDExperiment, 'SVD', args.dim, args.skiprerun,
# verbose, timings)
if args.rf or args.all:
run_experiment(experiment_details, experiments.RFExperiment, 'RF', args.dim, args.skiprerun,
verbose, timings)
if args.rp or args.all:
run_experiment(experiment_details, experiments.RPExperiment, 'RP', args.dim, args.skiprerun,
verbose, timings)
logger.info("Timings: {}".format(timings))
if args.plot:
if verbose:
logger.info("----------")
logger.info("Plotting results")
plotting.plot_results()
|
the-stack_106_28091 | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import argparse
import pathlib
class Args(argparse.ArgumentParser):
"""
Defines global default arguments.
"""
def __init__(self, **overrides):
"""
Args:
**overrides (dict, optional): Keyword arguments used to override default argument values
"""
super().__init__(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
self.add_argument('--seed', default=42, type=int, help='Seed for random number generators')
self.add_argument('--resolution', default=256, type=int, help='Resolution of images')
# Data parameters
self.add_argument('--challenge', choices=['singlecoil', 'multicoil'], required=True,
help='Which challenge')
self.add_argument('--data-path', type=pathlib.Path, required=True,
help='Path to the dataset')
self.add_argument('--sample-rate', type=float, default=1.,
help='Fraction of total volumes to include')
# Mask parameters
self.add_argument('--accelerations', nargs='+', default=[8], type=int,
help='Ratio of k-space columns to be sampled. If multiple values are '
'provided, then one of those is chosen uniformly at random for '
'each volume.')
self.add_argument('--center-fractions', nargs='+', default=[0.04], type=float,
help='Fraction of low-frequency k-space columns to be sampled. Should '
'have the same length as accelerations')
# Override defaults with passed overrides
self.set_defaults(**overrides)
|
the-stack_106_28093 | import json
import uuid
import numpy as np
import pandas as pd
from vgn.grasp import Grasp
from vgn.perception import *
from vgn.utils.transform import Rotation, Transform
def write_setup(root, size, intrinsic, max_opening_width, finger_depth):
data = {
"size": size,
"intrinsic": intrinsic.to_dict(),
"max_opening_width": max_opening_width,
"finger_depth": finger_depth,
}
write_json(data, root / "setup.json")
def read_setup(root):
data = read_json(root / "setup.json")
size = data["size"]
intrinsic = CameraIntrinsic.from_dict(data["intrinsic"])
max_opening_width = data["max_opening_width"]
finger_depth = data["finger_depth"]
return size, intrinsic, max_opening_width, finger_depth
def write_sensor_data(root, depth_imgs, extrinsics):
scene_id = uuid.uuid4().hex
path = root / "scenes" / (scene_id + ".npz")
np.savez_compressed(path, depth_imgs=depth_imgs, extrinsics=extrinsics)
return scene_id
def read_sensor_data(root, scene_id):
data = np.load(root / "scenes" / (scene_id + ".npz"))
return data["depth_imgs"], data["extrinsics"]
def write_grasp(root, scene_id, grasp, label):
# TODO concurrent writes could be an issue
csv_path = root / "grasps.csv"
if not csv_path.exists():
create_csv(
csv_path,
["scene_id", "qx", "qy", "qz", "qw", "x", "y", "z", "width", "label"],
)
qx, qy, qz, qw = grasp.pose.rotation.as_quat()
x, y, z = grasp.pose.translation
width = grasp.width
append_csv(csv_path, scene_id, qx, qy, qz, qw, x, y, z, width, label)
def read_grasp(df, i):
scene_id = df.loc[i, "scene_id"]
orientation = Rotation.from_quat(df.loc[i, "qx":"qw"].to_numpy(np.double))
position = df.loc[i, "x":"z"].to_numpy(np.double)
width = df.loc[i, "width"]
label = df.loc[i, "label"]
grasp = Grasp(Transform(orientation, position), width)
return scene_id, grasp, label
def read_df(root):
return pd.read_csv(root / "grasps.csv")
def write_df(df, root):
df.to_csv(root / "grasps.csv", index=False)
def write_voxel_grid(root, scene_id, voxel_grid):
path = root / "scenes" / (scene_id + ".npz")
np.savez_compressed(path, grid=voxel_grid)
def read_voxel_grid(root, scene_id):
path = root / "scenes" / (scene_id + ".npz")
return np.load(path)["grid"]
def read_json(path):
with path.open("r") as f:
data = json.load(f)
return data
def write_json(data, path):
with path.open("w") as f:
json.dump(data, f, indent=4)
def create_csv(path, columns):
with path.open("w") as f:
f.write(",".join(columns))
f.write("\n")
def append_csv(path, *args):
row = ",".join([str(arg) for arg in args])
with path.open("a") as f:
f.write(row)
f.write("\n")
|
the-stack_106_28094 | import logging
import random
import signal
import habitat
from habitat.profiling.operation import OperationProfiler
from database import Recorder
logger = logging.getLogger(__name__)
class Measurer:
def __init__(
self,
op_name,
recorder_config,
index_to_config,
config_to_profiler_args,
index_filter=None,
):
self._op_name = op_name
self._recorder_config = recorder_config
self._index_to_config = index_to_config
self._config_to_profiler_args = config_to_profiler_args
self._index_filter = index_filter
self._shutdown_early = False
self._initialize()
def _initialize(self):
def signal_handler(signal, frame):
logger.info('Received shutdown command. Will shutdown after '
'completing current measurement.')
self._shutdown_early = True
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
def add_args(self, parser):
parser.add_argument('device', type=str)
parser.add_argument('--seed', type=int, default=1337)
parser.add_argument('--num-points', type=int, default=200000)
parser.add_argument('--rank', type=int, default=0)
parser.add_argument('--world-size', type=int, default=1)
parser.add_argument('--no-kernels', action='store_true')
parser.add_argument('--skip', type=int)
def measure_configurations(self, args, num_configs):
# Store the arguments for future use
self._args = args
if args.rank >= args.world_size:
raise ValueError('Rank must be less than world size.')
if args.num_points % args.world_size != 0:
raise ValueError(
'Number of points must be divisible by the world size.')
# Want to ensure we measure the same configurations across each device
random.seed(args.seed)
logger.info('Total configurations: %d', num_configs)
to_record = random.sample(range(num_configs), args.num_points)
if self._index_filter is not None:
to_record = list(filter(
lambda idx: self._index_filter(args, idx),
to_record,
))
slice_size = len(to_record) // args.world_size
else:
slice_size = args.num_points // args.world_size
logger.info("Total configurations after filtering: %d", len(to_record))
logger.info("Slice size: %d", slice_size)
if args.world_size != 1:
# If we split the sample set across multiple workers, we
# want to increase the number of overlapping samples between
# a machine with just one worker if this recording script is
# stopped early. This is because the workers process the
# configurations sequentially.
random.shuffle(to_record)
offset = slice_size * args.rank
to_record = to_record[offset:offset + slice_size]
file_name = '{}-{}-{}.sqlite'.format(
self._op_name,
args.device,
args.rank,
)
self._recorder = Recorder(file_name, self._recorder_config)
num_recordings = self._recorder.get_num_recordings()
# We make 2 recordings per configuration
num_configs_measured = num_recordings // 2
logger.info(
"--- Found %d recordings in %s, so skipping the first %d configurations ---",
num_recordings,
file_name,
num_configs_measured,
)
# A device doesn't need to be passed in here
self._profiler = OperationProfiler(device=None, measure_for=3)
logger.info('Warming up...')
self._measure(self._index_to_config(args, to_record[0]))
self._measure(self._index_to_config(args, to_record[1]))
self._measure(self._index_to_config(args, to_record[2]))
logger.info(
'Starting to record. This process records slice %d of %d.',
args.rank + 1,
args.world_size,
)
try:
for idx, config_id in enumerate(to_record):
if idx < num_configs_measured:
continue
if args.skip is not None and idx < args.skip:
continue
config = self._index_to_config(args, config_id)
self._record(config, *self._measure(config))
if (idx + 1) % 100 == 0:
logger.info('[{}/{}] Processed'.format(idx + 1, slice_size))
if idx % 100 == 0:
self._recorder.commit()
if self._shutdown_early:
break
finally:
self._recorder.commit()
def _measure(self, config):
try:
kwargs = self._config_to_profiler_args(config)
if kwargs is None:
return None, None
return self._profiler.measure_operation(
record_kernels=not self._args.no_kernels,
**kwargs,
)
except RuntimeError as e:
msg = str(e)
if ("out of memory" not in msg and
"cuDNN error" not in msg and
"Calculated padded" not in msg):
logger.exception('Unexpected error during measurement.')
return None, None
def _record(self, config, forward_result, backward_result):
if forward_result is not None:
self._recorder.record(
config=config,
is_forward=True,
run_time_ms=forward_result.run_time_ms,
recorded_kernels=forward_result.kernels,
)
if backward_result is not None:
self._recorder.record(
config=config,
is_forward=False,
run_time_ms=backward_result.run_time_ms,
recorded_kernels=backward_result.kernels,
)
|
the-stack_106_28095 | from scipy.stats import beta
def beta_pdf(a, b, values):
probs = []
start = 0
for idx in range(len(values) - 1):
end = values[idx] + (values[idx + 1] - values[idx]) / 2
w = end - start
pdf = beta.pdf(values[idx], a, b).item()
probs.append(pdf * w)
start = end
w = 1.0 - start
pdf = beta.pdf(values[-1], a, b).item()
probs.append(pdf * w)
return probs
|
the-stack_106_28098 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
"""Lambda function to reserve available CIDR blocks"""
import os
import logging
import traceback
from utils import cidr_lookups, cidr_lock
from utils.cidr_lookups import InputValidationError, NoValidSubnetError, InvalidCloudProviderError, MissingRegionError
# Initialize Logger
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.INFO)
# CIDR DDB Table
ALLOCATED_CIDR_DDB_TABLE_NAME = os.environ['ALLOCATED_CIDR_DDB_TABLE_NAME']
def handler(event, context):
"""Lambda handler"""
try:
LOGGER.info('Received CIDR reserve request event: %s', event)
try:
# Extract and validate request params
request_params = cidr_lookups.extract_post_request_params(event)
except InputValidationError as err:
LOGGER.error(err)
return {
'statusCode': 400,
'body': str(err.message)
}
# Unpack params
account_alias = request_params.get('account_alias')
cidr_size = request_params.get('size')
region = request_params.get('region')
cloud_provider = request_params.get('cloud_provider')
LOGGER.info("Request info: subnet size {}, region {}, account_alias {}, cloud {}"
.format(cidr_size, region, region, account_alias, cloud_provider))
# Get CIDR lock
try:
cidr_lock.sync_obtain_table_lock(ALLOCATED_CIDR_DDB_TABLE_NAME)
except cidr_lock.FailedToGetLockException:
LOGGER.exception("Returning after failed to get lock:{}".format(cidr_lock.FailedToGetLockException))
return {
'statusCode': 500,
'body': "Failed to get CIDR table lock."
}
# Retrieve regions CIDR list
try:
region_cidr_list = cidr_lookups.retrieve_region_cidr(region, cloud_provider)
except InvalidCloudProviderError:
# Clear CIDR lock
cidr_lock.clear_table_lock(ALLOCATED_CIDR_DDB_TABLE_NAME)
return {
'statusCode': 400,
'body': "Invalid cloud provider."
}
except MissingRegionError:
# Clear CIDR lock
cidr_lock.clear_table_lock(ALLOCATED_CIDR_DDB_TABLE_NAME)
return {
'statusCode': 404,
'body': "No root CIDR list found for the specified region."
}
LOGGER.info("Retrieved region CIDR list: %s", region_cidr_list)
# Retrieve allocated VPC CIDRs in region
locked_cidr_list = cidr_lookups.retrieve_used_cidrs(region, False, False, cloud_provider.lower(),
ALLOCATED_CIDR_DDB_TABLE_NAME)
LOGGER.info('Retrieve locked CIDR blocks in %s: %s', region, locked_cidr_list)
# Find the next available CIDR, if one exists
try:
available_cidr = cidr_lookups.find_available_cidr(region_cidr_list,
locked_cidr_list,
cidr_size)
except NoValidSubnetError as e:
traceback.print_exc()
LOGGER.info("No valid subnet found: %s", str(e))
# Clear CIDR lock
cidr_lock.clear_table_lock(ALLOCATED_CIDR_DDB_TABLE_NAME)
return {
'statusCode': 404,
'body': "No CIDR blocks of appropriate size found."
}
# Reserve CIDR
LOGGER.info('Allocating CIDR block %s in %s', available_cidr, region)
response = cidr_lookups.reserve_cidr(available_cidr, region,
account_alias, cloud_provider,
ALLOCATED_CIDR_DDB_TABLE_NAME)
LOGGER.info('CIDR allocation status: %s', response)
# Clear CIDR lock
cidr_lock.clear_table_lock(ALLOCATED_CIDR_DDB_TABLE_NAME)
return response
except Exception as error:
traceback.print_exc()
LOGGER.error("Error: %s", str(error))
# Clear CIDR lock
cidr_lock.clear_table_lock(ALLOCATED_CIDR_DDB_TABLE_NAME)
return {
'statusCode': 500,
'body': str(error)
}
|
the-stack_106_28099 | import os
import random
import argparse
import numpy as np
from sklearn.preprocessing import StandardScaler
import joblib
import tensorflow as tf
from tensorflow import keras as K
import gym
from fn_framework import FNAgent, Trainer, Observer, Experience
tf.compat.v1.disable_eager_execution()
################################################################################
class PolicyGradientAgent(FNAgent):
#
def __init__(self, actions):
# PolicyGradientAgent使用自身的策略(而非epsilon)
super().__init__(epsilon = 0.0, actions = actions)
self.estimate_probs = True
self.scaler = StandardScaler()
self._updater = None
#
def save(self, model_path):
super().save(model_path)
joblib.dump(self.scaler, self.scaler_path(model_path))
#
@classmethod
def load(cls, env, model_path):
actions = list(range(env.action_space.n))
agent = cls(actions)
agent.model = K.models.load_model(model_path)
agent.initialized = True
agent.scaler = joblib.load(agent.scaler_path(model_path))
return agent
#
def scaler_path(self, model_path):
fname, _ = os.path.splitext(model_path)
fname += "_scaler.pkl"
return fname
#
def initialize(self, experiences, optimizer):
states = np.vstack([e.s for e in experiences])
feature_size = states.shape[1]
self.model = K.models.Sequential([
K.layers.Dense(10, activation = "relu", input_shape = (feature_size,)),
K.layers.Dense(10, activation = "relu"),
K.layers.Dense(len(self.actions), activation = "softmax")
])
self.set_updater(optimizer)
self.scaler.fit(states)
self.initialized = True
print("Done initialization. From now, begin training!")
#
def set_updater(self, optimizer):
actions = tf.compat.v1.placeholder(shape = (None), dtype = "int32")
rewards = tf.compat.v1.placeholder(shape = (None), dtype = "float32")
one_hot_actions = tf.one_hot(actions, len(self.actions), axis = 1)
action_probs = self.model.output
selected_action_probs = tf.reduce_sum(one_hot_actions * action_probs,
axis = 1)
clipped = tf.clip_by_value(selected_action_probs, 1e-10, 1.0)
loss = - tf.math.log(clipped) * rewards
loss = tf.reduce_mean(loss)
updates = optimizer.get_updates(loss = loss,
params = self.model.trainable_weights)
self._updater = K.backend.function(
inputs = [self.model.input,
actions, rewards],
outputs = [loss],
updates = updates)
#
def estimate(self, s):
normalized = self.scaler.transform(s)
action_probs = self.model.predict(normalized)[0]
return action_probs
#
def update(self, states, actions, rewards):
normalizeds = self.scaler.transform(states)
actions = np.array(actions)
rewards = np.array(rewards)
self._updater([normalizeds, actions, rewards])
################################################################################
class CartPoleObserver(Observer):
#
def transform(self, state):
return np.array(state).reshape((1, -1))
################################################################################
class PolicyGradientTrainer(Trainer):
#
def __init__(self, buffer_size = 256, batch_size = 32, gamma = 0.9,
report_interval = 10, log_dir = ""):
super().__init__(buffer_size, batch_size, gamma,
report_interval, log_dir)
#
def train(self, env, episode_count = 220, initial_count = -1, render = False):
actions = list(range(env.action_space.n))
agent = PolicyGradientAgent(actions)
self.train_loop(env, agent, episode_count, initial_count, render)
return agent
#
def episode_begin(self, episode, agent):
if agent.initialized:
self.experiences = []
#
def make_batch(self, policy_experiences):
length = min(self.batch_size, len(policy_experiences))
batch = random.sample(policy_experiences, length)
states = np.vstack([e.s for e in batch])
actions = [e.a for e in batch]
rewards = [e.r for e in batch]
scaler = StandardScaler()
rewards = np.array(rewards).reshape((-1, 1))
rewards = scaler.fit_transform(rewards).flatten()
return states, actions, rewards
#
def episode_end(self, episode, step_count, agent):
rewards = [e.r for e in self.get_recent(step_count)]
self.reward_log.append(sum(rewards))
if not agent.initialized:
if len(self.experiences) == self.buffer_size:
optimizer = K.optimizers.Adam(learning_rate = 0.01)
agent.initialize(self.experiences, optimizer)
self.training = True
else:
policy_experiences = []
for t, e in enumerate(self.experiences):
s, a, r, n_s, d = e
d_r = [_r * (self.gamma ** i) for i, _r in
enumerate(rewards[t:])]
d_r = sum(d_r)
d_e = Experience(s, a, d_r, n_s, d)
policy_experiences.append(d_e)
agent.update(*self.make_batch(policy_experiences))
if self.is_event(episode, self.report_interval):
recent_rewards = self.reward_log[-self.report_interval:]
self.logger.describe("reward", recent_rewards, episode = episode)
################################################################################
def main(play):
env = CartPoleObserver(gym.make("CartPole-v0"))
trainer = PolicyGradientTrainer()
path = trainer.logger.path_of("policy_gradient_agent.h5")
if play:
agent = PolicyGradientAgent.load(env, path)
agent.play(env)
else:
trained = trainer.train(env)
trainer.logger.plot("Rewards", trainer.reward_log,
trainer.report_interval)
trained.save(path)
################################################################################
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = "PG Agent")
parser.add_argument("--play", action = "store_true",
help = "play with trained model")
args = parser.parse_args()
main(args.play)
|
the-stack_106_28101 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import time
import matplotlib.pyplot as plt
import torch
import tqdm
from torchsde import BrownianTree
def run_torch(ks=(0, 1, 2, 5, 6, 7, 8, 9, 10, 11, 12)):
w0 = torch.zeros(b, d)
t_cons = []
t_queries = []
t_alls = []
for k in tqdm.tqdm(ks):
now = time.time()
bm_vanilla = BrownianTree(t0=t0, t1=t1, w0=w0, cache_depth=k)
t_con = time.time() - now
t_cons.append(t_con)
now = time.time()
for t in ts:
bm_vanilla(t).to(device)
t_query = time.time() - now
t_queries.append(t_query)
t_all = t_con + t_query
t_alls.append(t_all)
logging.warning(f'k={k}, t_con={t_con:.4f}, t_query={t_query:.4f}, t_all={t_all:.4f}')
img_path = os.path.join('.', 'diagnostics', 'plots', 'profile_btree.png')
plt.figure()
plt.plot(ks, t_cons, label='cons')
plt.plot(ks, t_queries, label='queries')
plt.plot(ks, t_alls, label='all')
plt.title(f'b={b}, d={d}, repetitions={reps}, device={w0.device}')
plt.xlabel('Cache level')
plt.ylabel('Time (secs)')
plt.legend()
plt.savefig(img_path)
plt.close()
def main():
run_torch()
if __name__ == "__main__":
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
torch.manual_seed(0)
reps = 500
b, d = 512, 10
t0, t1 = 0., 1.
ts = torch.rand(size=(reps,)).numpy()
main()
|
the-stack_106_28103 | import os
import discord
import aiohttp
import random
import time
import rethinkdb as r
from discord.ext import commands
from collections import Counter
from datetime import datetime
from pyfiglet import Figlet
from config import database, prefixes, token, webhooks
def _prefixes(bot, msg):
return commands.when_mentioned_or(*prefixes)(bot, msg)
class UniversalBot(commands.AutoShardedBot):
def __init__(self):
super().__init__(
command_prefix=_prefixes,
description="bad bot",
status=discord.Status.dnd,
activity=discord.Game(name="Starting up..."),
pm_help=False,
help_attrs={
"hidden": True
}
)
self._last_exception = None
self.counter = Counter()
self.command_usage = Counter()
async def _init_rethink():
r.set_loop_type("asyncio")
self.r_conn = await r.connect(
host=database["host"],
port=database["port"],
db=database["db"],
user=database["user"],
password=database["password"]
)
self.loop.create_task(_init_rethink())
for file in os.listdir("modules"):
if file.endswith(".py"):
name = file[:-3]
try:
self.load_extension(f"modules.{name}")
except Exception as e:
print(f"Failed to load {name}: {e}")
async def on_command_error(self, context, exception):
if isinstance(exception, commands.CommandNotFound):
return
async def on_command(self, ctx):
try:
if ctx.author.id not in [227110473466773504, 302523498226647041]:
self.command_usage[str(ctx.command)] += 1
except:
pass
try:
if ctx.author.id not in [227110473466773504, 302523498226647041]:
async with aiohttp.ClientSession() as cs:
webhook = discord.Webhook.from_url(
url=webhooks["command"],
adapter=discord.AsyncWebhookAdapter(cs)
)
await webhook.send(f"[`{datetime.utcnow().strftime('%m-%d-%Y %H:%M:%S')}`] [`{ctx.guild.name} "
f"({ctx.guild.id})`] User **{ctx.author.name}#{ctx.author.discriminator} ({ctx.author.id})** "
f"ran the command **{ctx.command.name}**.")
except Exception as e:
async with aiohttp.ClientSession() as cs:
webhook = discord.Webhook.from_url(
url=webhooks["command"],
adapter=discord.AsyncWebhookAdapter(cs)
)
await webhook.send(f"Command Logger Failed:\n`{type(e).__name__}`\n```py\n{e}\n```")
async def send_cmd_help(self, ctx):
if ctx.invoked_subcommand:
pages = await self.formatter.format_help_for(ctx, ctx.invoked_subcommand)
for page in pages:
await ctx.send(page)
else:
pages = await self.formatter.format_help_for(ctx, ctx.command)
for page in pages:
await ctx.send(page)
async def __level_handler(self, message):
if not isinstance(message.channel, discord.TextChannel):
return
if message.content == "" or not len(message.content) > 5:
return
if random.randint(1, 10) == 1:
author = message.author
level_system = await r.table("levelSystem").get(str(author.id)).run(self.r_conn)
guildXP = await r.table("guildXP").get(str(author.id)).run(self.r_conn)
if not guildXP or not guildXP.get(str(message.author.id)):
data = {
str(message.author.id): {
"lastxp": str(int(time.time())),
"xp": 0
}
}
if not guildXP:
data["id"] = str(message.guild.id)
return await r.table("guildXP").get(str(message.guild.id)).update(data).run(self.r_conn)
if (int(time.time()) - int(guildXP.get(str(message.author.id))["lastxp"])) >= 120:
xp = guildXP.get(str(message.author.id))["xp"] + random.randint(10, 40)
data = {
str(message.author.id): {
"xp": xp,
"lastxp": str(int(time.time()))
}
}
await r.table("guildXP").get(str(message.guild.id)).update(data).run(self.r_conn)
if not level_system:
data = {
"id": str(author.id),
"xp": 0,
"lastxp": "0",
"blacklisted": False,
"lastxptimes": []
}
return await r.table("levelSystem").insert(data).run(self.r_conn)
if level_system.get("blacklisted", False):
return
if (int(time.time()) - int(level_system["lastxp"])) >= 120:
lastxptimes = level_system["lastxptimes"]
lastxptimes.append(str(int(time.time())))
xp = level_system["xp"] + random.randint(10, 40)
data = {
"xp": xp,
"lastxp": str(int(time.time())),
"lastxptimes": lastxptimes
}
await r.table("levelSystem").get(str(author.id)).update(data).run(self.r_conn)
async def on_message(self, message):
self.counter["messages_read"] += 1
if message.author.bot:
return
await self.process_commands(message)
await self.__level_handler(message)
async def close(self):
self.r_conn.close()
# self.redis.close()
await super().close()
async def on_shard_ready(self, shard_id):
print(f"Shard {shard_id} Connected.")
async def on_ready(self):
if not hasattr(self, "uptime"):
self.uptime = datetime.utcnow()
print(Figlet().renderText("UniversalBot"))
print(f"Shards: {self.shard_count}")
print(f"Servers: {len(self.guilds)}")
print(f"Users: {len(set(self.get_all_members()))}")
await self.change_presence(
status=discord.Status.online,
activity=discord.Game(f"{prefixes[0]}help | {self.shard_count} Shards")
)
def bot_uptime(self):
now = datetime.utcnow()
delta = now - self.uptime
hours, remainder = divmod(int(delta.total_seconds()), 3600)
minutes, seconds = divmod(remainder, 60)
days, hours = divmod(hours, 24)
fmt = "{h} hours, {m} minutes, and {s} seconds"
if days:
fmt = "{d} days, " + fmt
return fmt.format(d=days, h=hours, m=minutes, s=seconds)
def run(self):
super().run(token)
if __name__ == "__main__":
UniversalBot().run()
|
the-stack_106_28104 | from .util import *
from .query_result import QueryResult
class Graph(object):
"""
Graph, collection of nodes and edges.
"""
def __init__(self, name, redis_con):
"""
Create a new graph.
"""
self.name = name
self.redis_con = redis_con
self.nodes = {}
self.edges = []
self._labels = [] # List of node labels.
self._properties = [] # List of properties.
self._relationshipTypes = [] # List of relation types.
def get_label(self, idx):
try:
label = self._labels[idx]
except IndexError:
# Refresh graph labels.
lbls = self.labels()
# Unpack data.
self._labels = [None] * len(lbls)
for i, l in enumerate(lbls):
self._labels[i] = l[0]
label = self._labels[idx]
return label
def get_relation(self, idx):
try:
relationshipType = self._relationshipTypes[idx]
except IndexError:
# Refresh graph relations.
rels = self.relationshipTypes()
# Unpack data.
self._relationshipTypes = [None] * len(rels)
for i, r in enumerate(rels):
self._relationshipTypes[i] = r[0]
relationshipType = self._relationshipTypes[idx]
return relationshipType
def get_property(self, idx):
try:
propertie = self._properties[idx]
except IndexError:
# Refresh properties.
props = self.propertyKeys()
# Unpack data.
self._properties = [None] * len(props)
for i, p in enumerate(props):
self._properties[i] = p[0]
propertie = self._properties[idx]
return propertie
def add_node(self, node):
"""
Adds a node to the graph.
"""
if node.alias is None:
node.alias = random_string()
self.nodes[node.alias] = node
def add_edge(self, edge):
"""
Addes an edge to the graph.
"""
# Make sure edge both ends are in the graph
assert self.nodes[edge.src_node.alias] is not None and self.nodes[edge.dest_node.alias] is not None
self.edges.append(edge)
def commit(self):
"""
Create entire graph.
"""
if len(self.nodes) == 0 and len(self.edges) == 0:
return None
query = 'CREATE '
for _, node in self.nodes.items():
query += str(node) + ','
query += ','.join([str(edge) for edge in self.edges])
# Discard leading comma.
if query[-1] is ',':
query = query[:-1]
return self.query(query)
def flush(self):
"""
Commit the graph and reset the edges and nodes to zero length
"""
self.commit()
self.nodes = {}
self.edges = []
def build_params_header(self, params):
assert type(params) == dict
# Header starts with "CYPHER"
params_header = "CYPHER "
for key, value in params.items():
# If value is string add quotation marks.
if type(value) == str:
value = quote_string(value)
# Value is None, replace with "null" string.
elif value is None:
value = "null"
params_header += str(key) + "=" + str(value) + " "
return params_header
def query(self, q, params=None, timeout=None):
"""
Executes a query against the graph.
"""
if params is not None:
q = self.build_params_header(params) + q
command = ["GRAPH.QUERY", self.name, q, "--compact"]
if timeout:
if not isinstance(timeout, int):
raise Exception("Timeout argument must be a positive integer")
command += ["timeout", timeout]
response = self.redis_con.execute_command(*command)
return QueryResult(self, response)
def _execution_plan_to_string(self, plan):
return "\n".join(plan)
def execution_plan(self, query, params=None):
"""
Get the execution plan for given query,
GRAPH.EXPLAIN returns an array of operations.
"""
if params is not None:
query = self.build_params_header(params) + query
plan = self.redis_con.execute_command("GRAPH.EXPLAIN", self.name, query, query)
return self._execution_plan_to_string(plan)
def delete(self):
"""
Deletes graph.
"""
return self.redis_con.execute_command("GRAPH.DELETE", self.name)
def merge(self, pattern):
"""
Merge pattern.
"""
query = 'MERGE '
query += str(pattern)
return self.query(query)
# Procedures.
def call_procedure(self, procedure, *args, **kwagrs):
args = [quote_string(arg) for arg in args]
q = 'CALL %s(%s)' % (procedure, ','.join(args))
y = kwagrs.get('y', None)
if y:
q += ' YIELD %s' % ','.join(y)
return self.query(q)
def labels(self):
return self.call_procedure("db.labels").result_set
def relationshipTypes(self):
return self.call_procedure("db.relationshipTypes").result_set
def propertyKeys(self):
return self.call_procedure("db.propertyKeys").result_set
|
the-stack_106_28109 | import re
import os
import shutil
rxDiacritics = re.compile('[ëç]')
rxDiaPartsStem = re.compile('( stem:)( *[^\r\n]+)')
rxDiaPartsFlex = re.compile('(-flex:)( *[^\r\n]+)')
rxStemVariants = re.compile('[^ |/]+')
rxFlexVariants = re.compile('[^ /]+')
dictDiacritics = {'ë': 'e', 'ç': 'c'}
def collect_lemmata():
lemmata = ''
lexrules = ''
derivations = ''
for fname in os.listdir('.'):
if fname.endswith('.txt') and fname.startswith('sqi_lexemes'):
f = open(fname, 'r', encoding='utf-8-sig')
lemmata += f.read() + '\n'
f.close()
elif fname.endswith('.txt') and fname.startswith('sqi_lexrules'):
f = open(fname, 'r', encoding='utf-8-sig')
lexrules += f.read() + '\n'
f.close()
elif fname.endswith('.txt') and fname.startswith('sqi_derivations'):
f = open(fname, 'r', encoding='utf-8-sig')
derivations += f.read() + '\n'
f.close()
lemmataSet = set(re.findall('-lexeme\n(?: [^\r\n]*\n)+', lemmata, flags=re.DOTALL))
# lemmata = '\n'.join(sorted(list(lemmataSet),
# key=lambda l: (re.search('gramm: *([^\r\n]*)', l).group(1), l)))
lemmata = '\n'.join(sorted(list(lemmataSet)))
return lemmata, lexrules, derivations
def collect_paradigms():
fIn = open('paradigms.txt', 'r', encoding='utf-8-sig')
text = fIn.read()
fIn.close()
return text
def add_diacriticless(morph):
"""
Add a diacriticless variant to a stem or an inflection
"""
morph = morph.group(0)
if rxDiacritics.search(morph) is None:
return morph
return morph + '//' + rxDiacritics.sub(lambda m: dictDiacritics[m.group(0)], morph)
def process_diacritics_stem(line):
"""
Remove diacritics from one line that contains stems.
"""
morphCorrected = rxStemVariants.sub(add_diacriticless, line.group(2))
return line.group(1) + morphCorrected
def process_diacritics_flex(line):
"""
Remove diacritics from one line that contains inflections.
"""
morphCorrected = rxFlexVariants.sub(add_diacriticless, line.group(2))
return line.group(1) + morphCorrected
def simplify(text):
"""
Add diacriticless variants for stems and inflections.
"""
text = rxDiaPartsStem.sub(process_diacritics_stem, text)
text = rxDiaPartsFlex.sub(process_diacritics_flex, text)
return text
def prepare_files():
"""
Put all the lemmata to lexemes.txt. Put all the lexical
rules to lexical_rules.txt. Put all the derivations to
derivations.txt. Create separate versions of
relevant files for diacriticless texts.
Put all grammar files to uniparser_albanian/data_strict/
(original version) or uniparser_albanian/data_nodiacritics/
(diacriticless version).
"""
lemmata, lexrules, derivations = collect_lemmata()
paradigms = collect_paradigms()
fOutLemmata = open('uniparser_albanian/data_strict/lexemes.txt', 'w', encoding='utf-8')
fOutLemmata.write(lemmata)
fOutLemmata.close()
fOutLemmataNodiacritics = open('uniparser_albanian/data_nodiacritics/lexemes.txt', 'w', encoding='utf-8')
fOutLemmataNodiacritics.write(simplify(lemmata))
fOutLemmataNodiacritics.close()
if len(lexrules) > 0:
fOutLexrules = open('uniparser_albanian/data_strict/lex_rules.txt', 'w', encoding='utf-8')
fOutLexrules.write(lexrules)
fOutLexrules.close()
fOutLexrules = open('uniparser_albanian/data_nodiacritics/lex_rules.txt', 'w', encoding='utf-8')
fOutLexrules.write(lexrules)
fOutLexrules.close()
fOutParadigms = open('uniparser_albanian/data_strict/paradigms.txt', 'w', encoding='utf-8')
fOutParadigms.write(paradigms)
fOutParadigms.close()
fOutParadigmsNodiacritics = open('uniparser_albanian/data_nodiacritics/paradigms.txt', 'w', encoding='utf-8')
fOutParadigmsNodiacritics.write(simplify(paradigms))
fOutParadigmsNodiacritics.close()
fOutDerivations = open('uniparser_albanian/data_strict/derivations.txt', 'w', encoding='utf-8')
fOutDerivations.write(derivations)
fOutDerivations.close()
fOutDerivations = open('uniparser_albanian/data_nodiacritics/derivations.txt', 'w', encoding='utf-8')
fOutDerivations.write(derivations)
fOutDerivations.close()
if os.path.exists('bad_analyses.txt'):
shutil.copy2('bad_analyses.txt', 'uniparser_albanian/data_strict/')
shutil.copy2('bad_analyses.txt', 'uniparser_albanian/data_nodiacritics/')
if os.path.exists('albanian_disambiguation.txt'):
shutil.copy2('albanian_disambiguation.cg3', 'uniparser_albanian/data_strict/')
shutil.copy2('albanian_disambiguation.cg3', 'uniparser_albanian/data_nodiacritics/')
def parse_wordlists():
"""
Analyze wordlists/wordlist.csv.
"""
from uniparser_albanian import AlbanianAnalyzer
a = AlbanianAnalyzer(mode='strict')
a.analyze_wordlist(freqListFile='wordlists/wordlist.csv',
parsedFile='wordlists/wordlist_analyzed.txt',
unparsedFile='wordlists/wordlist_unanalyzed.txt',
verbose=True)
if __name__ == '__main__':
prepare_files()
parse_wordlists()
|
the-stack_106_28112 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2016 Ivo Tzvetkov
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
__version__ = '1.0.0'
setup(
name='weblogger',
version=__version__,
description='A service for logging messages sent over HTTP to various backends',
long_description='A service for logging messages sent over HTTP to various backends',
author='Ivo Tzvetkov',
author_email='[email protected]',
license='MIT',
url='http://github.com/ivotkv/weblogger',
download_url='https://github.com/ivotkv/weblogger/tarball/v' + __version__,
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6"
],
keywords=[
"web", "logger", "logging", "log"
],
packages=[
'weblogger'
],
entry_points = {
'console_scripts': [
'weblogger=weblogger.server:main'
]
},
install_requires=[
'PyYAML',
'tornado>=5,<6',
'httpagentparser'
]
)
|
the-stack_106_28113 | #
# The ndarray object from _testbuffer.c is a complete implementation of
# a PEP-3118 buffer provider. It is independent from NumPy's ndarray
# and the tests don't require NumPy.
#
# If NumPy is present, some tests check both ndarray implementations
# against each other.
#
# Most ndarray tests also check that memoryview(ndarray) behaves in
# the same way as the original. Thus, a substantial part of the
# memoryview tests is now in this module.
#
import unittest
from test import support
from itertools import permutations, product
from random import randrange, sample, choice
from sysconfig import get_config_var
import warnings
import sys, array, io
from decimal import Decimal
from fractions import Fraction
try:
from _testbuffer import *
except ImportError:
ndarray = None
try:
import struct
except ImportError:
struct = None
try:
import ctypes
except ImportError:
ctypes = None
try:
with warnings.catch_warnings():
from numpy import ndarray as numpy_array
except ImportError:
numpy_array = None
SHORT_TEST = True
# ======================================================================
# Random lists by format specifier
# ======================================================================
# Native format chars and their ranges.
NATIVE = {
'?':0, 'c':0, 'b':0, 'B':0,
'h':0, 'H':0, 'i':0, 'I':0,
'l':0, 'L':0, 'n':0, 'N':0,
'f':0, 'd':0, 'P':0
}
# NumPy does not have 'n' or 'N':
if numpy_array:
del NATIVE['n']
del NATIVE['N']
if struct:
try:
# Add "qQ" if present in native mode.
struct.pack('Q', 2**64-1)
NATIVE['q'] = 0
NATIVE['Q'] = 0
except struct.error:
pass
# Standard format chars and their ranges.
STANDARD = {
'?':(0, 2), 'c':(0, 1<<8),
'b':(-(1<<7), 1<<7), 'B':(0, 1<<8),
'h':(-(1<<15), 1<<15), 'H':(0, 1<<16),
'i':(-(1<<31), 1<<31), 'I':(0, 1<<32),
'l':(-(1<<31), 1<<31), 'L':(0, 1<<32),
'q':(-(1<<63), 1<<63), 'Q':(0, 1<<64),
'f':(-(1<<63), 1<<63), 'd':(-(1<<1023), 1<<1023)
}
def native_type_range(fmt):
"""Return range of a native type."""
if fmt == 'c':
lh = (0, 256)
elif fmt == '?':
lh = (0, 2)
elif fmt == 'f':
lh = (-(1<<63), 1<<63)
elif fmt == 'd':
lh = (-(1<<1023), 1<<1023)
else:
for exp in (128, 127, 64, 63, 32, 31, 16, 15, 8, 7):
try:
struct.pack(fmt, (1<<exp)-1)
break
except struct.error:
pass
lh = (-(1<<exp), 1<<exp) if exp & 1 else (0, 1<<exp)
return lh
fmtdict = {
'':NATIVE,
'@':NATIVE,
'<':STANDARD,
'>':STANDARD,
'=':STANDARD,
'!':STANDARD
}
if struct:
for fmt in fmtdict['@']:
fmtdict['@'][fmt] = native_type_range(fmt)
MEMORYVIEW = NATIVE.copy()
ARRAY = NATIVE.copy()
for k in NATIVE:
if not k in "bBhHiIlLfd":
del ARRAY[k]
BYTEFMT = NATIVE.copy()
for k in NATIVE:
if not k in "Bbc":
del BYTEFMT[k]
fmtdict['m'] = MEMORYVIEW
fmtdict['@m'] = MEMORYVIEW
fmtdict['a'] = ARRAY
fmtdict['b'] = BYTEFMT
fmtdict['@b'] = BYTEFMT
# Capabilities of the test objects:
MODE = 0
MULT = 1
cap = { # format chars # multiplier
'ndarray': (['', '@', '<', '>', '=', '!'], ['', '1', '2', '3']),
'array': (['a'], ['']),
'numpy': ([''], ['']),
'memoryview': (['@m', 'm'], ['']),
'bytefmt': (['@b', 'b'], ['']),
}
def randrange_fmt(mode, char, obj):
"""Return random item for a type specified by a mode and a single
format character."""
x = randrange(*fmtdict[mode][char])
if char == 'c':
x = bytes(chr(x), 'latin1')
if char == '?':
x = bool(x)
if char == 'f' or char == 'd':
x = struct.pack(char, x)
x = struct.unpack(char, x)[0]
if obj == 'numpy' and x == b'\x00':
# http://projects.scipy.org/numpy/ticket/1925
x = b'\x01'
return x
def gen_item(fmt, obj):
"""Return single random item."""
mode, chars = fmt.split('#')
x = []
for c in chars:
x.append(randrange_fmt(mode, c, obj))
return x[0] if len(x) == 1 else tuple(x)
def gen_items(n, fmt, obj):
"""Return a list of random items (or a scalar)."""
if n == 0:
return gen_item(fmt, obj)
lst = [0] * n
for i in range(n):
lst[i] = gen_item(fmt, obj)
return lst
def struct_items(n, obj):
mode = choice(cap[obj][MODE])
xfmt = mode + '#'
fmt = mode.strip('amb')
nmemb = randrange(2, 10) # number of struct members
for _ in range(nmemb):
char = choice(tuple(fmtdict[mode]))
multiplier = choice(cap[obj][MULT])
xfmt += (char * int(multiplier if multiplier else 1))
fmt += (multiplier + char)
items = gen_items(n, xfmt, obj)
item = gen_item(xfmt, obj)
return fmt, items, item
def randitems(n, obj='ndarray', mode=None, char=None):
"""Return random format, items, item."""
if mode is None:
mode = choice(cap[obj][MODE])
if char is None:
char = choice(tuple(fmtdict[mode]))
multiplier = choice(cap[obj][MULT])
fmt = mode + '#' + char * int(multiplier if multiplier else 1)
items = gen_items(n, fmt, obj)
item = gen_item(fmt, obj)
fmt = mode.strip('amb') + multiplier + char
return fmt, items, item
def iter_mode(n, obj='ndarray'):
"""Iterate through supported mode/char combinations."""
for mode in cap[obj][MODE]:
for char in fmtdict[mode]:
yield randitems(n, obj, mode, char)
def iter_format(nitems, testobj='ndarray'):
"""Yield (format, items, item) for all possible modes and format
characters plus one random compound format string."""
for t in iter_mode(nitems, testobj):
yield t
if testobj != 'ndarray':
raise StopIteration
yield struct_items(nitems, testobj)
def is_byte_format(fmt):
return 'c' in fmt or 'b' in fmt or 'B' in fmt
def is_memoryview_format(fmt):
"""format suitable for memoryview"""
x = len(fmt)
return ((x == 1 or (x == 2 and fmt[0] == '@')) and
fmt[x-1] in MEMORYVIEW)
NON_BYTE_FORMAT = [c for c in fmtdict['@'] if not is_byte_format(c)]
# ======================================================================
# Multi-dimensional tolist(), slicing and slice assignments
# ======================================================================
def atomp(lst):
"""Tuple items (representing structs) are regarded as atoms."""
return not isinstance(lst, list)
def listp(lst):
return isinstance(lst, list)
def prod(lst):
"""Product of list elements."""
if len(lst) == 0:
return 0
x = lst[0]
for v in lst[1:]:
x *= v
return x
def strides_from_shape(ndim, shape, itemsize, layout):
"""Calculate strides of a contiguous array. Layout is 'C' or
'F' (Fortran)."""
if ndim == 0:
return ()
if layout == 'C':
strides = list(shape[1:]) + [itemsize]
for i in range(ndim-2, -1, -1):
strides[i] *= strides[i+1]
else:
strides = [itemsize] + list(shape[:-1])
for i in range(1, ndim):
strides[i] *= strides[i-1]
return strides
def _ca(items, s):
"""Convert flat item list to the nested list representation of a
multidimensional C array with shape 's'."""
if atomp(items):
return items
if len(s) == 0:
return items[0]
lst = [0] * s[0]
stride = len(items) // s[0] if s[0] else 0
for i in range(s[0]):
start = i*stride
lst[i] = _ca(items[start:start+stride], s[1:])
return lst
def _fa(items, s):
"""Convert flat item list to the nested list representation of a
multidimensional Fortran array with shape 's'."""
if atomp(items):
return items
if len(s) == 0:
return items[0]
lst = [0] * s[0]
stride = s[0]
for i in range(s[0]):
lst[i] = _fa(items[i::stride], s[1:])
return lst
def carray(items, shape):
if listp(items) and not 0 in shape and prod(shape) != len(items):
raise ValueError("prod(shape) != len(items)")
return _ca(items, shape)
def farray(items, shape):
if listp(items) and not 0 in shape and prod(shape) != len(items):
raise ValueError("prod(shape) != len(items)")
return _fa(items, shape)
def indices(shape):
"""Generate all possible tuples of indices."""
iterables = [range(v) for v in shape]
return product(*iterables)
def getindex(ndim, ind, strides):
"""Convert multi-dimensional index to the position in the flat list."""
ret = 0
for i in range(ndim):
ret += strides[i] * ind[i]
return ret
def transpose(src, shape):
"""Transpose flat item list that is regarded as a multi-dimensional
matrix defined by shape: dest...[k][j][i] = src[i][j][k]... """
if not shape:
return src
ndim = len(shape)
sstrides = strides_from_shape(ndim, shape, 1, 'C')
dstrides = strides_from_shape(ndim, shape[::-1], 1, 'C')
dest = [0] * len(src)
for ind in indices(shape):
fr = getindex(ndim, ind, sstrides)
to = getindex(ndim, ind[::-1], dstrides)
dest[to] = src[fr]
return dest
def _flatten(lst):
"""flatten list"""
if lst == []:
return lst
if atomp(lst):
return [lst]
return _flatten(lst[0]) + _flatten(lst[1:])
def flatten(lst):
"""flatten list or return scalar"""
if atomp(lst): # scalar
return lst
return _flatten(lst)
def slice_shape(lst, slices):
"""Get the shape of lst after slicing: slices is a list of slice
objects."""
if atomp(lst):
return []
return [len(lst[slices[0]])] + slice_shape(lst[0], slices[1:])
def multislice(lst, slices):
"""Multi-dimensional slicing: slices is a list of slice objects."""
if atomp(lst):
return lst
return [multislice(sublst, slices[1:]) for sublst in lst[slices[0]]]
def m_assign(llst, rlst, lslices, rslices):
"""Multi-dimensional slice assignment: llst and rlst are the operands,
lslices and rslices are lists of slice objects. llst and rlst must
have the same structure.
For a two-dimensional example, this is not implemented in Python:
llst[0:3:2, 0:3:2] = rlst[1:3:1, 1:3:1]
Instead we write:
lslices = [slice(0,3,2), slice(0,3,2)]
rslices = [slice(1,3,1), slice(1,3,1)]
multislice_assign(llst, rlst, lslices, rslices)
"""
if atomp(rlst):
return rlst
rlst = [m_assign(l, r, lslices[1:], rslices[1:])
for l, r in zip(llst[lslices[0]], rlst[rslices[0]])]
llst[lslices[0]] = rlst
return llst
def cmp_structure(llst, rlst, lslices, rslices):
"""Compare the structure of llst[lslices] and rlst[rslices]."""
lshape = slice_shape(llst, lslices)
rshape = slice_shape(rlst, rslices)
if (len(lshape) != len(rshape)):
return -1
for i in range(len(lshape)):
if lshape[i] != rshape[i]:
return -1
if lshape[i] == 0:
return 0
return 0
def multislice_assign(llst, rlst, lslices, rslices):
"""Return llst after assigning: llst[lslices] = rlst[rslices]"""
if cmp_structure(llst, rlst, lslices, rslices) < 0:
raise ValueError("lvalue and rvalue have different structures")
return m_assign(llst, rlst, lslices, rslices)
# ======================================================================
# Random structures
# ======================================================================
#
# PEP-3118 is very permissive with respect to the contents of a
# Py_buffer. In particular:
#
# - shape can be zero
# - strides can be any integer, including zero
# - offset can point to any location in the underlying
# memory block, provided that it is a multiple of
# itemsize.
#
# The functions in this section test and verify random structures
# in full generality. A structure is valid iff it fits in the
# underlying memory block.
#
# The structure 't' (short for 'tuple') is fully defined by:
#
# t = (memlen, itemsize, ndim, shape, strides, offset)
#
def verify_structure(memlen, itemsize, ndim, shape, strides, offset):
"""Verify that the parameters represent a valid array within
the bounds of the allocated memory:
char *mem: start of the physical memory block
memlen: length of the physical memory block
offset: (char *)buf - mem
"""
if offset % itemsize:
return False
if offset < 0 or offset+itemsize > memlen:
return False
if any(v % itemsize for v in strides):
return False
if ndim <= 0:
return ndim == 0 and not shape and not strides
if 0 in shape:
return True
imin = sum(strides[j]*(shape[j]-1) for j in range(ndim)
if strides[j] <= 0)
imax = sum(strides[j]*(shape[j]-1) for j in range(ndim)
if strides[j] > 0)
return 0 <= offset+imin and offset+imax+itemsize <= memlen
def get_item(lst, indices):
for i in indices:
lst = lst[i]
return lst
def memory_index(indices, t):
"""Location of an item in the underlying memory."""
memlen, itemsize, ndim, shape, strides, offset = t
p = offset
for i in range(ndim):
p += strides[i]*indices[i]
return p
def is_overlapping(t):
"""The structure 't' is overlapping if at least one memory location
is visited twice while iterating through all possible tuples of
indices."""
memlen, itemsize, ndim, shape, strides, offset = t
visited = 1<<memlen
for ind in indices(shape):
i = memory_index(ind, t)
bit = 1<<i
if visited & bit:
return True
visited |= bit
return False
def rand_structure(itemsize, valid, maxdim=5, maxshape=16, shape=()):
"""Return random structure:
(memlen, itemsize, ndim, shape, strides, offset)
If 'valid' is true, the returned structure is valid, otherwise invalid.
If 'shape' is given, use that instead of creating a random shape.
"""
if not shape:
ndim = randrange(maxdim+1)
if (ndim == 0):
if valid:
return itemsize, itemsize, ndim, (), (), 0
else:
nitems = randrange(1, 16+1)
memlen = nitems * itemsize
offset = -itemsize if randrange(2) == 0 else memlen
return memlen, itemsize, ndim, (), (), offset
minshape = 2
n = randrange(100)
if n >= 95 and valid:
minshape = 0
elif n >= 90:
minshape = 1
shape = [0] * ndim
for i in range(ndim):
shape[i] = randrange(minshape, maxshape+1)
else:
ndim = len(shape)
maxstride = 5
n = randrange(100)
zero_stride = True if n >= 95 and n & 1 else False
strides = [0] * ndim
strides[ndim-1] = itemsize * randrange(-maxstride, maxstride+1)
if not zero_stride and strides[ndim-1] == 0:
strides[ndim-1] = itemsize
for i in range(ndim-2, -1, -1):
maxstride *= shape[i+1] if shape[i+1] else 1
if zero_stride:
strides[i] = itemsize * randrange(-maxstride, maxstride+1)
else:
strides[i] = ((1,-1)[randrange(2)] *
itemsize * randrange(1, maxstride+1))
imin = imax = 0
if not 0 in shape:
imin = sum(strides[j]*(shape[j]-1) for j in range(ndim)
if strides[j] <= 0)
imax = sum(strides[j]*(shape[j]-1) for j in range(ndim)
if strides[j] > 0)
nitems = imax - imin
if valid:
offset = -imin * itemsize
memlen = offset + (imax+1) * itemsize
else:
memlen = (-imin + imax) * itemsize
offset = -imin-itemsize if randrange(2) == 0 else memlen
return memlen, itemsize, ndim, shape, strides, offset
def randslice_from_slicelen(slicelen, listlen):
"""Create a random slice of len slicelen that fits into listlen."""
maxstart = listlen - slicelen
start = randrange(maxstart+1)
maxstep = (listlen - start) // slicelen if slicelen else 1
step = randrange(1, maxstep+1)
stop = start + slicelen * step
s = slice(start, stop, step)
_, _, _, control = slice_indices(s, listlen)
if control != slicelen:
raise RuntimeError
return s
def randslice_from_shape(ndim, shape):
"""Create two sets of slices for an array x with shape 'shape'
such that shapeof(x[lslices]) == shapeof(x[rslices])."""
lslices = [0] * ndim
rslices = [0] * ndim
for n in range(ndim):
l = shape[n]
slicelen = randrange(1, l+1) if l > 0 else 0
lslices[n] = randslice_from_slicelen(slicelen, l)
rslices[n] = randslice_from_slicelen(slicelen, l)
return tuple(lslices), tuple(rslices)
def rand_aligned_slices(maxdim=5, maxshape=16):
"""Create (lshape, rshape, tuple(lslices), tuple(rslices)) such that
shapeof(x[lslices]) == shapeof(y[rslices]), where x is an array
with shape 'lshape' and y is an array with shape 'rshape'."""
ndim = randrange(1, maxdim+1)
minshape = 2
n = randrange(100)
if n >= 95:
minshape = 0
elif n >= 90:
minshape = 1
all_random = True if randrange(100) >= 80 else False
lshape = [0]*ndim; rshape = [0]*ndim
lslices = [0]*ndim; rslices = [0]*ndim
for n in range(ndim):
small = randrange(minshape, maxshape+1)
big = randrange(minshape, maxshape+1)
if big < small:
big, small = small, big
# Create a slice that fits the smaller value.
if all_random:
start = randrange(-small, small+1)
stop = randrange(-small, small+1)
step = (1,-1)[randrange(2)] * randrange(1, small+2)
s_small = slice(start, stop, step)
_, _, _, slicelen = slice_indices(s_small, small)
else:
slicelen = randrange(1, small+1) if small > 0 else 0
s_small = randslice_from_slicelen(slicelen, small)
# Create a slice of the same length for the bigger value.
s_big = randslice_from_slicelen(slicelen, big)
if randrange(2) == 0:
rshape[n], lshape[n] = big, small
rslices[n], lslices[n] = s_big, s_small
else:
rshape[n], lshape[n] = small, big
rslices[n], lslices[n] = s_small, s_big
return lshape, rshape, tuple(lslices), tuple(rslices)
def randitems_from_structure(fmt, t):
"""Return a list of random items for structure 't' with format
'fmtchar'."""
memlen, itemsize, _, _, _, _ = t
return gen_items(memlen//itemsize, '#'+fmt, 'numpy')
def ndarray_from_structure(items, fmt, t, flags=0):
"""Return ndarray from the tuple returned by rand_structure()"""
memlen, itemsize, ndim, shape, strides, offset = t
return ndarray(items, shape=shape, strides=strides, format=fmt,
offset=offset, flags=ND_WRITABLE|flags)
def numpy_array_from_structure(items, fmt, t):
"""Return numpy_array from the tuple returned by rand_structure()"""
memlen, itemsize, ndim, shape, strides, offset = t
buf = bytearray(memlen)
for j, v in enumerate(items):
struct.pack_into(fmt, buf, j*itemsize, v)
return numpy_array(buffer=buf, shape=shape, strides=strides,
dtype=fmt, offset=offset)
# ======================================================================
# memoryview casts
# ======================================================================
def cast_items(exporter, fmt, itemsize, shape=None):
"""Interpret the raw memory of 'exporter' as a list of items with
size 'itemsize'. If shape=None, the new structure is assumed to
be 1-D with n * itemsize = bytelen. If shape is given, the usual
constraint for contiguous arrays prod(shape) * itemsize = bytelen
applies. On success, return (items, shape). If the constraints
cannot be met, return (None, None). If a chunk of bytes is interpreted
as NaN as a result of float conversion, return ('nan', None)."""
bytelen = exporter.nbytes
if shape:
if prod(shape) * itemsize != bytelen:
return None, shape
elif shape == []:
if exporter.ndim == 0 or itemsize != bytelen:
return None, shape
else:
n, r = divmod(bytelen, itemsize)
shape = [n]
if r != 0:
return None, shape
mem = exporter.tobytes()
byteitems = [mem[i:i+itemsize] for i in range(0, len(mem), itemsize)]
items = []
for v in byteitems:
item = struct.unpack(fmt, v)[0]
if item != item:
return 'nan', shape
items.append(item)
return (items, shape) if shape != [] else (items[0], shape)
def gencastshapes():
"""Generate shapes to test casting."""
for n in range(32):
yield [n]
ndim = randrange(4, 6)
minshape = 1 if randrange(100) > 80 else 2
yield [randrange(minshape, 5) for _ in range(ndim)]
ndim = randrange(2, 4)
minshape = 1 if randrange(100) > 80 else 2
yield [randrange(minshape, 5) for _ in range(ndim)]
# ======================================================================
# Actual tests
# ======================================================================
def genslices(n):
"""Generate all possible slices for a single dimension."""
return product(range(-n, n+1), range(-n, n+1), range(-n, n+1))
def genslices_ndim(ndim, shape):
"""Generate all possible slice tuples for 'shape'."""
iterables = [genslices(shape[n]) for n in range(ndim)]
return product(*iterables)
def rslice(n, allow_empty=False):
"""Generate random slice for a single dimension of length n.
If zero=True, the slices may be empty, otherwise they will
be non-empty."""
minlen = 0 if allow_empty or n == 0 else 1
slicelen = randrange(minlen, n+1)
return randslice_from_slicelen(slicelen, n)
def rslices(n, allow_empty=False):
"""Generate random slices for a single dimension."""
for _ in range(5):
yield rslice(n, allow_empty)
def rslices_ndim(ndim, shape, iterations=5):
"""Generate random slice tuples for 'shape'."""
# non-empty slices
for _ in range(iterations):
yield tuple(rslice(shape[n]) for n in range(ndim))
# possibly empty slices
for _ in range(iterations):
yield tuple(rslice(shape[n], allow_empty=True) for n in range(ndim))
# invalid slices
yield tuple(slice(0,1,0) for _ in range(ndim))
def rpermutation(iterable, r=None):
pool = tuple(iterable)
r = len(pool) if r is None else r
yield tuple(sample(pool, r))
def ndarray_print(nd):
"""Print ndarray for debugging."""
try:
x = nd.tolist()
except (TypeError, NotImplementedError):
x = nd.tobytes()
if isinstance(nd, ndarray):
offset = nd.offset
flags = nd.flags
else:
offset = 'unknown'
flags = 'unknown'
print("ndarray(%s, shape=%s, strides=%s, suboffsets=%s, offset=%s, "
"format='%s', itemsize=%s, flags=%s)" %
(x, nd.shape, nd.strides, nd.suboffsets, offset,
nd.format, nd.itemsize, flags))
sys.stdout.flush()
ITERATIONS = 100
MAXDIM = 5
MAXSHAPE = 10
if SHORT_TEST:
ITERATIONS = 10
MAXDIM = 3
MAXSHAPE = 4
genslices = rslices
genslices_ndim = rslices_ndim
permutations = rpermutation
@unittest.skipUnless(struct, 'struct module required for this test.')
@unittest.skipUnless(ndarray, 'ndarray object required for this test')
class TestBufferProtocol(unittest.TestCase):
def setUp(self):
# The suboffsets tests need sizeof(void *).
self.sizeof_void_p = get_sizeof_void_p()
def verify(self, result, obj=-1,
itemsize={1}, fmt=-1, readonly={1},
ndim={1}, shape=-1, strides=-1,
lst=-1, sliced=False, cast=False):
# Verify buffer contents against expected values. Default values
# are deliberately initialized to invalid types.
if shape:
expected_len = prod(shape)*itemsize
else:
if not fmt: # array has been implicitly cast to unsigned bytes
expected_len = len(lst)
else: # ndim = 0
expected_len = itemsize
# Reconstruct suboffsets from strides. Support for slicing
# could be added, but is currently only needed for test_getbuf().
suboffsets = ()
if result.suboffsets:
self.assertGreater(ndim, 0)
suboffset0 = 0
for n in range(1, ndim):
if shape[n] == 0:
break
if strides[n] <= 0:
suboffset0 += -strides[n] * (shape[n]-1)
suboffsets = [suboffset0] + [-1 for v in range(ndim-1)]
# Not correct if slicing has occurred in the first dimension.
stride0 = self.sizeof_void_p
if strides[0] < 0:
stride0 = -stride0
strides = [stride0] + list(strides[1:])
self.assertIs(result.obj, obj)
self.assertEqual(result.nbytes, expected_len)
self.assertEqual(result.itemsize, itemsize)
self.assertEqual(result.format, fmt)
self.assertEqual(result.readonly, readonly)
self.assertEqual(result.ndim, ndim)
self.assertEqual(result.shape, tuple(shape))
if not (sliced and suboffsets):
self.assertEqual(result.strides, tuple(strides))
self.assertEqual(result.suboffsets, tuple(suboffsets))
if isinstance(result, ndarray) or is_memoryview_format(fmt):
rep = result.tolist() if fmt else result.tobytes()
self.assertEqual(rep, lst)
if not fmt: # array has been cast to unsigned bytes,
return # the remaining tests won't work.
# PyBuffer_GetPointer() is the definition how to access an item.
# If PyBuffer_GetPointer(indices) is correct for all possible
# combinations of indices, the buffer is correct.
#
# Also test tobytes() against the flattened 'lst', with all items
# packed to bytes.
if not cast: # casts chop up 'lst' in different ways
b = bytearray()
buf_err = None
for ind in indices(shape):
try:
item1 = get_pointer(result, ind)
item2 = get_item(lst, ind)
if isinstance(item2, tuple):
x = struct.pack(fmt, *item2)
else:
x = struct.pack(fmt, item2)
b.extend(x)
except BufferError:
buf_err = True # re-exporter does not provide full buffer
break
self.assertEqual(item1, item2)
if not buf_err:
# test tobytes()
self.assertEqual(result.tobytes(), b)
# lst := expected multi-dimensional logical representation
# flatten(lst) := elements in C-order
ff = fmt if fmt else 'B'
flattened = flatten(lst)
# Rules for 'A': if the array is already contiguous, return
# the array unaltered. Otherwise, return a contiguous 'C'
# representation.
for order in ['C', 'F', 'A']:
expected = result
if order == 'F':
if not is_contiguous(result, 'A') or \
is_contiguous(result, 'C'):
# For constructing the ndarray, convert the
# flattened logical representation to Fortran order.
trans = transpose(flattened, shape)
expected = ndarray(trans, shape=shape, format=ff,
flags=ND_FORTRAN)
else: # 'C', 'A'
if not is_contiguous(result, 'A') or \
is_contiguous(result, 'F') and order == 'C':
# The flattened list is already in C-order.
expected = ndarray(flattened, shape=shape, format=ff)
contig = get_contiguous(result, PyBUF_READ, order)
self.assertEqual(contig.tobytes(), b)
self.assertTrue(cmp_contig(contig, expected))
if ndim == 0:
continue
nmemb = len(flattened)
ro = 0 if readonly else ND_WRITABLE
### See comment in test_py_buffer_to_contiguous for an
### explanation why these tests are valid.
# To 'C'
contig = py_buffer_to_contiguous(result, 'C', PyBUF_FULL_RO)
self.assertEqual(len(contig), nmemb * itemsize)
initlst = [struct.unpack_from(fmt, contig, n*itemsize)
for n in range(nmemb)]
if len(initlst[0]) == 1:
initlst = [v[0] for v in initlst]
y = ndarray(initlst, shape=shape, flags=ro, format=fmt)
self.assertEqual(memoryview(y), memoryview(result))
# To 'F'
contig = py_buffer_to_contiguous(result, 'F', PyBUF_FULL_RO)
self.assertEqual(len(contig), nmemb * itemsize)
initlst = [struct.unpack_from(fmt, contig, n*itemsize)
for n in range(nmemb)]
if len(initlst[0]) == 1:
initlst = [v[0] for v in initlst]
y = ndarray(initlst, shape=shape, flags=ro|ND_FORTRAN,
format=fmt)
self.assertEqual(memoryview(y), memoryview(result))
# To 'A'
contig = py_buffer_to_contiguous(result, 'A', PyBUF_FULL_RO)
self.assertEqual(len(contig), nmemb * itemsize)
initlst = [struct.unpack_from(fmt, contig, n*itemsize)
for n in range(nmemb)]
if len(initlst[0]) == 1:
initlst = [v[0] for v in initlst]
f = ND_FORTRAN if is_contiguous(result, 'F') else 0
y = ndarray(initlst, shape=shape, flags=f|ro, format=fmt)
self.assertEqual(memoryview(y), memoryview(result))
if is_memoryview_format(fmt):
try:
m = memoryview(result)
except BufferError: # re-exporter does not provide full information
return
ex = result.obj if isinstance(result, memoryview) else result
self.assertIs(m.obj, ex)
self.assertEqual(m.nbytes, expected_len)
self.assertEqual(m.itemsize, itemsize)
self.assertEqual(m.format, fmt)
self.assertEqual(m.readonly, readonly)
self.assertEqual(m.ndim, ndim)
self.assertEqual(m.shape, tuple(shape))
if not (sliced and suboffsets):
self.assertEqual(m.strides, tuple(strides))
self.assertEqual(m.suboffsets, tuple(suboffsets))
n = 1 if ndim == 0 else len(lst)
self.assertEqual(len(m), n)
rep = result.tolist() if fmt else result.tobytes()
self.assertEqual(rep, lst)
self.assertEqual(m, result)
def verify_getbuf(self, orig_ex, ex, req, sliced=False):
def simple_fmt(ex):
return ex.format == '' or ex.format == 'B'
def match(req, flag):
return ((req&flag) == flag)
if (# writable request to read-only exporter
(ex.readonly and match(req, PyBUF_WRITABLE)) or
# cannot match explicit contiguity request
(match(req, PyBUF_C_CONTIGUOUS) and not ex.c_contiguous) or
(match(req, PyBUF_F_CONTIGUOUS) and not ex.f_contiguous) or
(match(req, PyBUF_ANY_CONTIGUOUS) and not ex.contiguous) or
# buffer needs suboffsets
(not match(req, PyBUF_INDIRECT) and ex.suboffsets) or
# buffer without strides must be C-contiguous
(not match(req, PyBUF_STRIDES) and not ex.c_contiguous) or
# PyBUF_SIMPLE|PyBUF_FORMAT and PyBUF_WRITABLE|PyBUF_FORMAT
(not match(req, PyBUF_ND) and match(req, PyBUF_FORMAT))):
self.assertRaises(BufferError, ndarray, ex, getbuf=req)
return
if isinstance(ex, ndarray) or is_memoryview_format(ex.format):
lst = ex.tolist()
else:
nd = ndarray(ex, getbuf=PyBUF_FULL_RO)
lst = nd.tolist()
# The consumer may have requested default values or a NULL format.
ro = 0 if match(req, PyBUF_WRITABLE) else ex.readonly
fmt = ex.format
itemsize = ex.itemsize
ndim = ex.ndim
if not match(req, PyBUF_FORMAT):
# itemsize refers to the original itemsize before the cast.
# The equality product(shape) * itemsize = len still holds.
# The equality calcsize(format) = itemsize does _not_ hold.
fmt = ''
lst = orig_ex.tobytes() # Issue 12834
if not match(req, PyBUF_ND):
ndim = 1
shape = orig_ex.shape if match(req, PyBUF_ND) else ()
strides = orig_ex.strides if match(req, PyBUF_STRIDES) else ()
nd = ndarray(ex, getbuf=req)
self.verify(nd, obj=ex,
itemsize=itemsize, fmt=fmt, readonly=ro,
ndim=ndim, shape=shape, strides=strides,
lst=lst, sliced=sliced)
def test_ndarray_getbuf(self):
requests = (
# distinct flags
PyBUF_INDIRECT, PyBUF_STRIDES, PyBUF_ND, PyBUF_SIMPLE,
PyBUF_C_CONTIGUOUS, PyBUF_F_CONTIGUOUS, PyBUF_ANY_CONTIGUOUS,
# compound requests
PyBUF_FULL, PyBUF_FULL_RO,
PyBUF_RECORDS, PyBUF_RECORDS_RO,
PyBUF_STRIDED, PyBUF_STRIDED_RO,
PyBUF_CONTIG, PyBUF_CONTIG_RO,
)
# items and format
items_fmt = (
([True if x % 2 else False for x in range(12)], '?'),
([1,2,3,4,5,6,7,8,9,10,11,12], 'b'),
([1,2,3,4,5,6,7,8,9,10,11,12], 'B'),
([(2**31-x) if x % 2 else (-2**31+x) for x in range(12)], 'l')
)
# shape, strides, offset
structure = (
([], [], 0),
([12], [], 0),
([12], [-1], 11),
([6], [2], 0),
([6], [-2], 11),
([3, 4], [], 0),
([3, 4], [-4, -1], 11),
([2, 2], [4, 1], 4),
([2, 2], [-4, -1], 8)
)
# ndarray creation flags
ndflags = (
0, ND_WRITABLE, ND_FORTRAN, ND_FORTRAN|ND_WRITABLE,
ND_PIL, ND_PIL|ND_WRITABLE
)
# flags that can actually be used as flags
real_flags = (0, PyBUF_WRITABLE, PyBUF_FORMAT,
PyBUF_WRITABLE|PyBUF_FORMAT)
for items, fmt in items_fmt:
itemsize = struct.calcsize(fmt)
for shape, strides, offset in structure:
strides = [v * itemsize for v in strides]
offset *= itemsize
for flags in ndflags:
if strides and (flags&ND_FORTRAN):
continue
if not shape and (flags&ND_PIL):
continue
_items = items if shape else items[0]
ex1 = ndarray(_items, format=fmt, flags=flags,
shape=shape, strides=strides, offset=offset)
ex2 = ex1[::-2] if shape else None
m1 = memoryview(ex1)
if ex2:
m2 = memoryview(ex2)
if ex1.ndim == 0 or (ex1.ndim == 1 and shape and strides):
self.assertEqual(m1, ex1)
if ex2 and ex2.ndim == 1 and shape and strides:
self.assertEqual(m2, ex2)
for req in requests:
for bits in real_flags:
self.verify_getbuf(ex1, ex1, req|bits)
self.verify_getbuf(ex1, m1, req|bits)
if ex2:
self.verify_getbuf(ex2, ex2, req|bits,
sliced=True)
self.verify_getbuf(ex2, m2, req|bits,
sliced=True)
items = [1,2,3,4,5,6,7,8,9,10,11,12]
# ND_GETBUF_FAIL
ex = ndarray(items, shape=[12], flags=ND_GETBUF_FAIL)
self.assertRaises(BufferError, ndarray, ex)
# Request complex structure from a simple exporter. In this
# particular case the test object is not PEP-3118 compliant.
base = ndarray([9], [1])
ex = ndarray(base, getbuf=PyBUF_SIMPLE)
self.assertRaises(BufferError, ndarray, ex, getbuf=PyBUF_WRITABLE)
self.assertRaises(BufferError, ndarray, ex, getbuf=PyBUF_ND)
self.assertRaises(BufferError, ndarray, ex, getbuf=PyBUF_STRIDES)
self.assertRaises(BufferError, ndarray, ex, getbuf=PyBUF_C_CONTIGUOUS)
self.assertRaises(BufferError, ndarray, ex, getbuf=PyBUF_F_CONTIGUOUS)
self.assertRaises(BufferError, ndarray, ex, getbuf=PyBUF_ANY_CONTIGUOUS)
nd = ndarray(ex, getbuf=PyBUF_SIMPLE)
def test_ndarray_exceptions(self):
nd = ndarray([9], [1])
ndm = ndarray([9], [1], flags=ND_VAREXPORT)
# Initialization of a new ndarray or mutation of an existing array.
for c in (ndarray, nd.push, ndm.push):
# Invalid types.
self.assertRaises(TypeError, c, {1,2,3})
self.assertRaises(TypeError, c, [1,2,'3'])
self.assertRaises(TypeError, c, [1,2,(3,4)])
self.assertRaises(TypeError, c, [1,2,3], shape={3})
self.assertRaises(TypeError, c, [1,2,3], shape=[3], strides={1})
self.assertRaises(TypeError, c, [1,2,3], shape=[3], offset=[])
self.assertRaises(TypeError, c, [1], shape=[1], format={})
self.assertRaises(TypeError, c, [1], shape=[1], flags={})
self.assertRaises(TypeError, c, [1], shape=[1], getbuf={})
# ND_FORTRAN flag is only valid without strides.
self.assertRaises(TypeError, c, [1], shape=[1], strides=[1],
flags=ND_FORTRAN)
# ND_PIL flag is only valid with ndim > 0.
self.assertRaises(TypeError, c, [1], shape=[], flags=ND_PIL)
# Invalid items.
self.assertRaises(ValueError, c, [], shape=[1])
self.assertRaises(ValueError, c, ['XXX'], shape=[1], format="L")
# Invalid combination of items and format.
self.assertRaises(struct.error, c, [1000], shape=[1], format="B")
self.assertRaises(ValueError, c, [1,(2,3)], shape=[2], format="B")
self.assertRaises(ValueError, c, [1,2,3], shape=[3], format="QL")
# Invalid ndim.
n = ND_MAX_NDIM+1
self.assertRaises(ValueError, c, [1]*n, shape=[1]*n)
# Invalid shape.
self.assertRaises(ValueError, c, [1], shape=[-1])
self.assertRaises(ValueError, c, [1,2,3], shape=['3'])
self.assertRaises(OverflowError, c, [1], shape=[2**128])
# prod(shape) * itemsize != len(items)
self.assertRaises(ValueError, c, [1,2,3,4,5], shape=[2,2], offset=3)
# Invalid strides.
self.assertRaises(ValueError, c, [1,2,3], shape=[3], strides=['1'])
self.assertRaises(OverflowError, c, [1], shape=[1],
strides=[2**128])
# Invalid combination of strides and shape.
self.assertRaises(ValueError, c, [1,2], shape=[2,1], strides=[1])
# Invalid combination of strides and format.
self.assertRaises(ValueError, c, [1,2,3,4], shape=[2], strides=[3],
format="L")
# Invalid offset.
self.assertRaises(ValueError, c, [1,2,3], shape=[3], offset=4)
self.assertRaises(ValueError, c, [1,2,3], shape=[1], offset=3,
format="L")
# Invalid format.
self.assertRaises(ValueError, c, [1,2,3], shape=[3], format="")
self.assertRaises(struct.error, c, [(1,2,3)], shape=[1],
format="@#$")
# Striding out of the memory bounds.
items = [1,2,3,4,5,6,7,8,9,10]
self.assertRaises(ValueError, c, items, shape=[2,3],
strides=[-3, -2], offset=5)
# Constructing consumer: format argument invalid.
self.assertRaises(TypeError, c, bytearray(), format="Q")
# Constructing original base object: getbuf argument invalid.
self.assertRaises(TypeError, c, [1], shape=[1], getbuf=PyBUF_FULL)
# Shape argument is mandatory for original base objects.
self.assertRaises(TypeError, c, [1])
# PyBUF_WRITABLE request to read-only provider.
self.assertRaises(BufferError, ndarray, b'123', getbuf=PyBUF_WRITABLE)
# ND_VAREXPORT can only be specified during construction.
nd = ndarray([9], [1], flags=ND_VAREXPORT)
self.assertRaises(ValueError, nd.push, [1], [1], flags=ND_VAREXPORT)
# Invalid operation for consumers: push/pop
nd = ndarray(b'123')
self.assertRaises(BufferError, nd.push, [1], [1])
self.assertRaises(BufferError, nd.pop)
# ND_VAREXPORT not set: push/pop fail with exported buffers
nd = ndarray([9], [1])
nd.push([1], [1])
m = memoryview(nd)
self.assertRaises(BufferError, nd.push, [1], [1])
self.assertRaises(BufferError, nd.pop)
m.release()
nd.pop()
# Single remaining buffer: pop fails
self.assertRaises(BufferError, nd.pop)
del nd
# get_pointer()
self.assertRaises(TypeError, get_pointer, {}, [1,2,3])
self.assertRaises(TypeError, get_pointer, b'123', {})
nd = ndarray(list(range(100)), shape=[1]*100)
self.assertRaises(ValueError, get_pointer, nd, [5])
nd = ndarray(list(range(12)), shape=[3,4])
self.assertRaises(ValueError, get_pointer, nd, [2,3,4])
self.assertRaises(ValueError, get_pointer, nd, [3,3])
self.assertRaises(ValueError, get_pointer, nd, [-3,3])
self.assertRaises(OverflowError, get_pointer, nd, [1<<64,3])
# tolist() needs format
ex = ndarray([1,2,3], shape=[3], format='L')
nd = ndarray(ex, getbuf=PyBUF_SIMPLE)
self.assertRaises(ValueError, nd.tolist)
# memoryview_from_buffer()
ex1 = ndarray([1,2,3], shape=[3], format='L')
ex2 = ndarray(ex1)
nd = ndarray(ex2)
self.assertRaises(TypeError, nd.memoryview_from_buffer)
nd = ndarray([(1,)*200], shape=[1], format='L'*200)
self.assertRaises(TypeError, nd.memoryview_from_buffer)
n = ND_MAX_NDIM
nd = ndarray(list(range(n)), shape=[1]*n)
self.assertRaises(ValueError, nd.memoryview_from_buffer)
# get_contiguous()
nd = ndarray([1], shape=[1])
self.assertRaises(TypeError, get_contiguous, 1, 2, 3, 4, 5)
self.assertRaises(TypeError, get_contiguous, nd, "xyz", 'C')
self.assertRaises(OverflowError, get_contiguous, nd, 2**64, 'C')
self.assertRaises(TypeError, get_contiguous, nd, PyBUF_READ, 961)
self.assertRaises(UnicodeEncodeError, get_contiguous, nd, PyBUF_READ,
'\u2007')
self.assertRaises(ValueError, get_contiguous, nd, PyBUF_READ, 'Z')
self.assertRaises(ValueError, get_contiguous, nd, 255, 'A')
# cmp_contig()
nd = ndarray([1], shape=[1])
self.assertRaises(TypeError, cmp_contig, 1, 2, 3, 4, 5)
self.assertRaises(TypeError, cmp_contig, {}, nd)
self.assertRaises(TypeError, cmp_contig, nd, {})
# is_contiguous()
nd = ndarray([1], shape=[1])
self.assertRaises(TypeError, is_contiguous, 1, 2, 3, 4, 5)
self.assertRaises(TypeError, is_contiguous, {}, 'A')
self.assertRaises(TypeError, is_contiguous, nd, 201)
def test_ndarray_linked_list(self):
for perm in permutations(range(5)):
m = [0]*5
nd = ndarray([1,2,3], shape=[3], flags=ND_VAREXPORT)
m[0] = memoryview(nd)
for i in range(1, 5):
nd.push([1,2,3], shape=[3])
m[i] = memoryview(nd)
for i in range(5):
m[perm[i]].release()
self.assertRaises(BufferError, nd.pop)
del nd
def test_ndarray_format_scalar(self):
# ndim = 0: scalar
for fmt, scalar, _ in iter_format(0):
itemsize = struct.calcsize(fmt)
nd = ndarray(scalar, shape=(), format=fmt)
self.verify(nd, obj=None,
itemsize=itemsize, fmt=fmt, readonly=1,
ndim=0, shape=(), strides=(),
lst=scalar)
def test_ndarray_format_shape(self):
# ndim = 1, shape = [n]
nitems = randrange(1, 10)
for fmt, items, _ in iter_format(nitems):
itemsize = struct.calcsize(fmt)
for flags in (0, ND_PIL):
nd = ndarray(items, shape=[nitems], format=fmt, flags=flags)
self.verify(nd, obj=None,
itemsize=itemsize, fmt=fmt, readonly=1,
ndim=1, shape=(nitems,), strides=(itemsize,),
lst=items)
def test_ndarray_format_strides(self):
# ndim = 1, strides
nitems = randrange(1, 30)
for fmt, items, _ in iter_format(nitems):
itemsize = struct.calcsize(fmt)
for step in range(-5, 5):
if step == 0:
continue
shape = [len(items[::step])]
strides = [step*itemsize]
offset = itemsize*(nitems-1) if step < 0 else 0
for flags in (0, ND_PIL):
nd = ndarray(items, shape=shape, strides=strides,
format=fmt, offset=offset, flags=flags)
self.verify(nd, obj=None,
itemsize=itemsize, fmt=fmt, readonly=1,
ndim=1, shape=shape, strides=strides,
lst=items[::step])
def test_ndarray_fortran(self):
items = [1,2,3,4,5,6,7,8,9,10,11,12]
ex = ndarray(items, shape=(3, 4), strides=(1, 3))
nd = ndarray(ex, getbuf=PyBUF_F_CONTIGUOUS|PyBUF_FORMAT)
self.assertEqual(nd.tolist(), farray(items, (3, 4)))
def test_ndarray_multidim(self):
for ndim in range(5):
shape_t = [randrange(2, 10) for _ in range(ndim)]
nitems = prod(shape_t)
for shape in permutations(shape_t):
fmt, items, _ = randitems(nitems)
itemsize = struct.calcsize(fmt)
for flags in (0, ND_PIL):
if ndim == 0 and flags == ND_PIL:
continue
# C array
nd = ndarray(items, shape=shape, format=fmt, flags=flags)
strides = strides_from_shape(ndim, shape, itemsize, 'C')
lst = carray(items, shape)
self.verify(nd, obj=None,
itemsize=itemsize, fmt=fmt, readonly=1,
ndim=ndim, shape=shape, strides=strides,
lst=lst)
if is_memoryview_format(fmt):
# memoryview: reconstruct strides
ex = ndarray(items, shape=shape, format=fmt)
nd = ndarray(ex, getbuf=PyBUF_CONTIG_RO|PyBUF_FORMAT)
self.assertTrue(nd.strides == ())
mv = nd.memoryview_from_buffer()
self.verify(mv, obj=None,
itemsize=itemsize, fmt=fmt, readonly=1,
ndim=ndim, shape=shape, strides=strides,
lst=lst)
# Fortran array
nd = ndarray(items, shape=shape, format=fmt,
flags=flags|ND_FORTRAN)
strides = strides_from_shape(ndim, shape, itemsize, 'F')
lst = farray(items, shape)
self.verify(nd, obj=None,
itemsize=itemsize, fmt=fmt, readonly=1,
ndim=ndim, shape=shape, strides=strides,
lst=lst)
def test_ndarray_index_invalid(self):
# not writable
nd = ndarray([1], shape=[1])
self.assertRaises(TypeError, nd.__setitem__, 1, 8)
mv = memoryview(nd)
self.assertEqual(mv, nd)
self.assertRaises(TypeError, mv.__setitem__, 1, 8)
# cannot be deleted
nd = ndarray([1], shape=[1], flags=ND_WRITABLE)
self.assertRaises(TypeError, nd.__delitem__, 1)
mv = memoryview(nd)
self.assertEqual(mv, nd)
self.assertRaises(TypeError, mv.__delitem__, 1)
# overflow
nd = ndarray([1], shape=[1], flags=ND_WRITABLE)
self.assertRaises(OverflowError, nd.__getitem__, 1<<64)
self.assertRaises(OverflowError, nd.__setitem__, 1<<64, 8)
mv = memoryview(nd)
self.assertEqual(mv, nd)
self.assertRaises(IndexError, mv.__getitem__, 1<<64)
self.assertRaises(IndexError, mv.__setitem__, 1<<64, 8)
# format
items = [1,2,3,4,5,6,7,8]
nd = ndarray(items, shape=[len(items)], format="B", flags=ND_WRITABLE)
self.assertRaises(struct.error, nd.__setitem__, 2, 300)
self.assertRaises(ValueError, nd.__setitem__, 1, (100, 200))
mv = memoryview(nd)
self.assertEqual(mv, nd)
self.assertRaises(ValueError, mv.__setitem__, 2, 300)
self.assertRaises(TypeError, mv.__setitem__, 1, (100, 200))
items = [(1,2), (3,4), (5,6)]
nd = ndarray(items, shape=[len(items)], format="LQ", flags=ND_WRITABLE)
self.assertRaises(ValueError, nd.__setitem__, 2, 300)
self.assertRaises(struct.error, nd.__setitem__, 1, (b'\x001', 200))
def test_ndarray_index_scalar(self):
# scalar
nd = ndarray(1, shape=(), flags=ND_WRITABLE)
mv = memoryview(nd)
self.assertEqual(mv, nd)
x = nd[()]; self.assertEqual(x, 1)
x = nd[...]; self.assertEqual(x.tolist(), nd.tolist())
x = mv[()]; self.assertEqual(x, 1)
x = mv[...]; self.assertEqual(x.tolist(), nd.tolist())
self.assertRaises(TypeError, nd.__getitem__, 0)
self.assertRaises(TypeError, mv.__getitem__, 0)
self.assertRaises(TypeError, nd.__setitem__, 0, 8)
self.assertRaises(TypeError, mv.__setitem__, 0, 8)
self.assertEqual(nd.tolist(), 1)
self.assertEqual(mv.tolist(), 1)
nd[()] = 9; self.assertEqual(nd.tolist(), 9)
mv[()] = 9; self.assertEqual(mv.tolist(), 9)
nd[...] = 5; self.assertEqual(nd.tolist(), 5)
mv[...] = 5; self.assertEqual(mv.tolist(), 5)
def test_ndarray_index_null_strides(self):
ex = ndarray(list(range(2*4)), shape=[2, 4], flags=ND_WRITABLE)
nd = ndarray(ex, getbuf=PyBUF_CONTIG)
# Sub-views are only possible for full exporters.
self.assertRaises(BufferError, nd.__getitem__, 1)
# Same for slices.
self.assertRaises(BufferError, nd.__getitem__, slice(3,5,1))
def test_ndarray_index_getitem_single(self):
# getitem
for fmt, items, _ in iter_format(5):
nd = ndarray(items, shape=[5], format=fmt)
for i in range(-5, 5):
self.assertEqual(nd[i], items[i])
self.assertRaises(IndexError, nd.__getitem__, -6)
self.assertRaises(IndexError, nd.__getitem__, 5)
if is_memoryview_format(fmt):
mv = memoryview(nd)
self.assertEqual(mv, nd)
for i in range(-5, 5):
self.assertEqual(mv[i], items[i])
self.assertRaises(IndexError, mv.__getitem__, -6)
self.assertRaises(IndexError, mv.__getitem__, 5)
# getitem with null strides
for fmt, items, _ in iter_format(5):
ex = ndarray(items, shape=[5], flags=ND_WRITABLE, format=fmt)
nd = ndarray(ex, getbuf=PyBUF_CONTIG|PyBUF_FORMAT)
for i in range(-5, 5):
self.assertEqual(nd[i], items[i])
if is_memoryview_format(fmt):
mv = nd.memoryview_from_buffer()
self.assertIs(mv.__eq__(nd), NotImplemented)
for i in range(-5, 5):
self.assertEqual(mv[i], items[i])
# getitem with null format
items = [1,2,3,4,5]
ex = ndarray(items, shape=[5])
nd = ndarray(ex, getbuf=PyBUF_CONTIG_RO)
for i in range(-5, 5):
self.assertEqual(nd[i], items[i])
# getitem with null shape/strides/format
items = [1,2,3,4,5]
ex = ndarray(items, shape=[5])
nd = ndarray(ex, getbuf=PyBUF_SIMPLE)
for i in range(-5, 5):
self.assertEqual(nd[i], items[i])
def test_ndarray_index_setitem_single(self):
# assign single value
for fmt, items, single_item in iter_format(5):
nd = ndarray(items, shape=[5], format=fmt, flags=ND_WRITABLE)
for i in range(5):
items[i] = single_item
nd[i] = single_item
self.assertEqual(nd.tolist(), items)
self.assertRaises(IndexError, nd.__setitem__, -6, single_item)
self.assertRaises(IndexError, nd.__setitem__, 5, single_item)
if not is_memoryview_format(fmt):
continue
nd = ndarray(items, shape=[5], format=fmt, flags=ND_WRITABLE)
mv = memoryview(nd)
self.assertEqual(mv, nd)
for i in range(5):
items[i] = single_item
mv[i] = single_item
self.assertEqual(mv.tolist(), items)
self.assertRaises(IndexError, mv.__setitem__, -6, single_item)
self.assertRaises(IndexError, mv.__setitem__, 5, single_item)
# assign single value: lobject = robject
for fmt, items, single_item in iter_format(5):
nd = ndarray(items, shape=[5], format=fmt, flags=ND_WRITABLE)
for i in range(-5, 4):
items[i] = items[i+1]
nd[i] = nd[i+1]
self.assertEqual(nd.tolist(), items)
if not is_memoryview_format(fmt):
continue
nd = ndarray(items, shape=[5], format=fmt, flags=ND_WRITABLE)
mv = memoryview(nd)
self.assertEqual(mv, nd)
for i in range(-5, 4):
items[i] = items[i+1]
mv[i] = mv[i+1]
self.assertEqual(mv.tolist(), items)
def test_ndarray_index_getitem_multidim(self):
shape_t = (2, 3, 5)
nitems = prod(shape_t)
for shape in permutations(shape_t):
fmt, items, _ = randitems(nitems)
for flags in (0, ND_PIL):
# C array
nd = ndarray(items, shape=shape, format=fmt, flags=flags)
lst = carray(items, shape)
for i in range(-shape[0], shape[0]):
self.assertEqual(lst[i], nd[i].tolist())
for j in range(-shape[1], shape[1]):
self.assertEqual(lst[i][j], nd[i][j].tolist())
for k in range(-shape[2], shape[2]):
self.assertEqual(lst[i][j][k], nd[i][j][k])
# Fortran array
nd = ndarray(items, shape=shape, format=fmt,
flags=flags|ND_FORTRAN)
lst = farray(items, shape)
for i in range(-shape[0], shape[0]):
self.assertEqual(lst[i], nd[i].tolist())
for j in range(-shape[1], shape[1]):
self.assertEqual(lst[i][j], nd[i][j].tolist())
for k in range(shape[2], shape[2]):
self.assertEqual(lst[i][j][k], nd[i][j][k])
def test_ndarray_sequence(self):
nd = ndarray(1, shape=())
self.assertRaises(TypeError, eval, "1 in nd", locals())
mv = memoryview(nd)
self.assertEqual(mv, nd)
self.assertRaises(TypeError, eval, "1 in mv", locals())
for fmt, items, _ in iter_format(5):
nd = ndarray(items, shape=[5], format=fmt)
for i, v in enumerate(nd):
self.assertEqual(v, items[i])
self.assertTrue(v in nd)
if is_memoryview_format(fmt):
mv = memoryview(nd)
for i, v in enumerate(mv):
self.assertEqual(v, items[i])
self.assertTrue(v in mv)
def test_ndarray_slice_invalid(self):
items = [1,2,3,4,5,6,7,8]
# rvalue is not an exporter
xl = ndarray(items, shape=[8], flags=ND_WRITABLE)
ml = memoryview(xl)
self.assertRaises(TypeError, xl.__setitem__, slice(0,8,1), items)
self.assertRaises(TypeError, ml.__setitem__, slice(0,8,1), items)
# rvalue is not a full exporter
xl = ndarray(items, shape=[8], flags=ND_WRITABLE)
ex = ndarray(items, shape=[8], flags=ND_WRITABLE)
xr = ndarray(ex, getbuf=PyBUF_ND)
self.assertRaises(BufferError, xl.__setitem__, slice(0,8,1), xr)
# zero step
nd = ndarray(items, shape=[8], format="L", flags=ND_WRITABLE)
mv = memoryview(nd)
self.assertRaises(ValueError, nd.__getitem__, slice(0,1,0))
self.assertRaises(ValueError, mv.__getitem__, slice(0,1,0))
nd = ndarray(items, shape=[2,4], format="L", flags=ND_WRITABLE)
mv = memoryview(nd)
self.assertRaises(ValueError, nd.__getitem__,
(slice(0,1,1), slice(0,1,0)))
self.assertRaises(ValueError, nd.__getitem__,
(slice(0,1,0), slice(0,1,1)))
self.assertRaises(TypeError, nd.__getitem__, "@%$")
self.assertRaises(TypeError, nd.__getitem__, ("@%$", slice(0,1,1)))
self.assertRaises(TypeError, nd.__getitem__, (slice(0,1,1), {}))
# memoryview: not implemented
self.assertRaises(NotImplementedError, mv.__getitem__,
(slice(0,1,1), slice(0,1,0)))
self.assertRaises(TypeError, mv.__getitem__, "@%$")
# differing format
xl = ndarray(items, shape=[8], format="B", flags=ND_WRITABLE)
xr = ndarray(items, shape=[8], format="b")
ml = memoryview(xl)
mr = memoryview(xr)
self.assertRaises(ValueError, xl.__setitem__, slice(0,1,1), xr[7:8])
self.assertEqual(xl.tolist(), items)
self.assertRaises(ValueError, ml.__setitem__, slice(0,1,1), mr[7:8])
self.assertEqual(ml.tolist(), items)
# differing itemsize
xl = ndarray(items, shape=[8], format="B", flags=ND_WRITABLE)
yr = ndarray(items, shape=[8], format="L")
ml = memoryview(xl)
mr = memoryview(xr)
self.assertRaises(ValueError, xl.__setitem__, slice(0,1,1), xr[7:8])
self.assertEqual(xl.tolist(), items)
self.assertRaises(ValueError, ml.__setitem__, slice(0,1,1), mr[7:8])
self.assertEqual(ml.tolist(), items)
# differing ndim
xl = ndarray(items, shape=[2, 4], format="b", flags=ND_WRITABLE)
xr = ndarray(items, shape=[8], format="b")
ml = memoryview(xl)
mr = memoryview(xr)
self.assertRaises(ValueError, xl.__setitem__, slice(0,1,1), xr[7:8])
self.assertEqual(xl.tolist(), [[1,2,3,4], [5,6,7,8]])
self.assertRaises(NotImplementedError, ml.__setitem__, slice(0,1,1),
mr[7:8])
# differing shape
xl = ndarray(items, shape=[8], format="b", flags=ND_WRITABLE)
xr = ndarray(items, shape=[8], format="b")
ml = memoryview(xl)
mr = memoryview(xr)
self.assertRaises(ValueError, xl.__setitem__, slice(0,2,1), xr[7:8])
self.assertEqual(xl.tolist(), items)
self.assertRaises(ValueError, ml.__setitem__, slice(0,2,1), mr[7:8])
self.assertEqual(ml.tolist(), items)
# _testbuffer.c module functions
self.assertRaises(TypeError, slice_indices, slice(0,1,2), {})
self.assertRaises(TypeError, slice_indices, "###########", 1)
self.assertRaises(ValueError, slice_indices, slice(0,1,0), 4)
x = ndarray(items, shape=[8], format="b", flags=ND_PIL)
self.assertRaises(TypeError, x.add_suboffsets)
ex = ndarray(items, shape=[8], format="B")
x = ndarray(ex, getbuf=PyBUF_SIMPLE)
self.assertRaises(TypeError, x.add_suboffsets)
def test_ndarray_slice_zero_shape(self):
items = [1,2,3,4,5,6,7,8,9,10,11,12]
x = ndarray(items, shape=[12], format="L", flags=ND_WRITABLE)
y = ndarray(items, shape=[12], format="L")
x[4:4] = y[9:9]
self.assertEqual(x.tolist(), items)
ml = memoryview(x)
mr = memoryview(y)
self.assertEqual(ml, x)
self.assertEqual(ml, y)
ml[4:4] = mr[9:9]
self.assertEqual(ml.tolist(), items)
x = ndarray(items, shape=[3, 4], format="L", flags=ND_WRITABLE)
y = ndarray(items, shape=[4, 3], format="L")
x[1:2, 2:2] = y[1:2, 3:3]
self.assertEqual(x.tolist(), carray(items, [3, 4]))
def test_ndarray_slice_multidim(self):
shape_t = (2, 3, 5)
ndim = len(shape_t)
nitems = prod(shape_t)
for shape in permutations(shape_t):
fmt, items, _ = randitems(nitems)
itemsize = struct.calcsize(fmt)
for flags in (0, ND_PIL):
nd = ndarray(items, shape=shape, format=fmt, flags=flags)
lst = carray(items, shape)
for slices in rslices_ndim(ndim, shape):
listerr = None
try:
sliced = multislice(lst, slices)
except Exception as e:
listerr = e.__class__
nderr = None
try:
ndsliced = nd[slices]
except Exception as e:
nderr = e.__class__
if nderr or listerr:
self.assertIs(nderr, listerr)
else:
self.assertEqual(ndsliced.tolist(), sliced)
def test_ndarray_slice_redundant_suboffsets(self):
shape_t = (2, 3, 5, 2)
ndim = len(shape_t)
nitems = prod(shape_t)
for shape in permutations(shape_t):
fmt, items, _ = randitems(nitems)
itemsize = struct.calcsize(fmt)
nd = ndarray(items, shape=shape, format=fmt)
nd.add_suboffsets()
ex = ndarray(items, shape=shape, format=fmt)
ex.add_suboffsets()
mv = memoryview(ex)
lst = carray(items, shape)
for slices in rslices_ndim(ndim, shape):
listerr = None
try:
sliced = multislice(lst, slices)
except Exception as e:
listerr = e.__class__
nderr = None
try:
ndsliced = nd[slices]
except Exception as e:
nderr = e.__class__
if nderr or listerr:
self.assertIs(nderr, listerr)
else:
self.assertEqual(ndsliced.tolist(), sliced)
def test_ndarray_slice_assign_single(self):
for fmt, items, _ in iter_format(5):
for lslice in genslices(5):
for rslice in genslices(5):
for flags in (0, ND_PIL):
f = flags|ND_WRITABLE
nd = ndarray(items, shape=[5], format=fmt, flags=f)
ex = ndarray(items, shape=[5], format=fmt, flags=f)
mv = memoryview(ex)
lsterr = None
diff_structure = None
lst = items[:]
try:
lval = lst[lslice]
rval = lst[rslice]
lst[lslice] = lst[rslice]
diff_structure = len(lval) != len(rval)
except Exception as e:
lsterr = e.__class__
nderr = None
try:
nd[lslice] = nd[rslice]
except Exception as e:
nderr = e.__class__
if diff_structure: # ndarray cannot change shape
self.assertIs(nderr, ValueError)
else:
self.assertEqual(nd.tolist(), lst)
self.assertIs(nderr, lsterr)
if not is_memoryview_format(fmt):
continue
mverr = None
try:
mv[lslice] = mv[rslice]
except Exception as e:
mverr = e.__class__
if diff_structure: # memoryview cannot change shape
self.assertIs(mverr, ValueError)
else:
self.assertEqual(mv.tolist(), lst)
self.assertEqual(mv, nd)
self.assertIs(mverr, lsterr)
self.verify(mv, obj=ex,
itemsize=nd.itemsize, fmt=fmt, readonly=0,
ndim=nd.ndim, shape=nd.shape, strides=nd.strides,
lst=nd.tolist())
def test_ndarray_slice_assign_multidim(self):
shape_t = (2, 3, 5)
ndim = len(shape_t)
nitems = prod(shape_t)
for shape in permutations(shape_t):
fmt, items, _ = randitems(nitems)
for flags in (0, ND_PIL):
for _ in range(ITERATIONS):
lslices, rslices = randslice_from_shape(ndim, shape)
nd = ndarray(items, shape=shape, format=fmt,
flags=flags|ND_WRITABLE)
lst = carray(items, shape)
listerr = None
try:
result = multislice_assign(lst, lst, lslices, rslices)
except Exception as e:
listerr = e.__class__
nderr = None
try:
nd[lslices] = nd[rslices]
except Exception as e:
nderr = e.__class__
if nderr or listerr:
self.assertIs(nderr, listerr)
else:
self.assertEqual(nd.tolist(), result)
def test_ndarray_random(self):
# construction of valid arrays
for _ in range(ITERATIONS):
for fmt in fmtdict['@']:
itemsize = struct.calcsize(fmt)
t = rand_structure(itemsize, True, maxdim=MAXDIM,
maxshape=MAXSHAPE)
self.assertTrue(verify_structure(*t))
items = randitems_from_structure(fmt, t)
x = ndarray_from_structure(items, fmt, t)
xlist = x.tolist()
mv = memoryview(x)
if is_memoryview_format(fmt):
mvlist = mv.tolist()
self.assertEqual(mvlist, xlist)
if t[2] > 0:
# ndim > 0: test against suboffsets representation.
y = ndarray_from_structure(items, fmt, t, flags=ND_PIL)
ylist = y.tolist()
self.assertEqual(xlist, ylist)
mv = memoryview(y)
if is_memoryview_format(fmt):
self.assertEqual(mv, y)
mvlist = mv.tolist()
self.assertEqual(mvlist, ylist)
if numpy_array:
shape = t[3]
if 0 in shape:
continue # http://projects.scipy.org/numpy/ticket/1910
z = numpy_array_from_structure(items, fmt, t)
self.verify(x, obj=None,
itemsize=z.itemsize, fmt=fmt, readonly=0,
ndim=z.ndim, shape=z.shape, strides=z.strides,
lst=z.tolist())
def test_ndarray_random_invalid(self):
# exceptions during construction of invalid arrays
for _ in range(ITERATIONS):
for fmt in fmtdict['@']:
itemsize = struct.calcsize(fmt)
t = rand_structure(itemsize, False, maxdim=MAXDIM,
maxshape=MAXSHAPE)
self.assertFalse(verify_structure(*t))
items = randitems_from_structure(fmt, t)
nderr = False
try:
x = ndarray_from_structure(items, fmt, t)
except Exception as e:
nderr = e.__class__
self.assertTrue(nderr)
if numpy_array:
numpy_err = False
try:
y = numpy_array_from_structure(items, fmt, t)
except Exception as e:
numpy_err = e.__class__
if 0: # http://projects.scipy.org/numpy/ticket/1910
self.assertTrue(numpy_err)
def test_ndarray_random_slice_assign(self):
# valid slice assignments
for _ in range(ITERATIONS):
for fmt in fmtdict['@']:
itemsize = struct.calcsize(fmt)
lshape, rshape, lslices, rslices = \
rand_aligned_slices(maxdim=MAXDIM, maxshape=MAXSHAPE)
tl = rand_structure(itemsize, True, shape=lshape)
tr = rand_structure(itemsize, True, shape=rshape)
self.assertTrue(verify_structure(*tl))
self.assertTrue(verify_structure(*tr))
litems = randitems_from_structure(fmt, tl)
ritems = randitems_from_structure(fmt, tr)
xl = ndarray_from_structure(litems, fmt, tl)
xr = ndarray_from_structure(ritems, fmt, tr)
xl[lslices] = xr[rslices]
xllist = xl.tolist()
xrlist = xr.tolist()
ml = memoryview(xl)
mr = memoryview(xr)
self.assertEqual(ml.tolist(), xllist)
self.assertEqual(mr.tolist(), xrlist)
if tl[2] > 0 and tr[2] > 0:
# ndim > 0: test against suboffsets representation.
yl = ndarray_from_structure(litems, fmt, tl, flags=ND_PIL)
yr = ndarray_from_structure(ritems, fmt, tr, flags=ND_PIL)
yl[lslices] = yr[rslices]
yllist = yl.tolist()
yrlist = yr.tolist()
self.assertEqual(xllist, yllist)
self.assertEqual(xrlist, yrlist)
ml = memoryview(yl)
mr = memoryview(yr)
self.assertEqual(ml.tolist(), yllist)
self.assertEqual(mr.tolist(), yrlist)
if numpy_array:
if 0 in lshape or 0 in rshape:
continue # http://projects.scipy.org/numpy/ticket/1910
zl = numpy_array_from_structure(litems, fmt, tl)
zr = numpy_array_from_structure(ritems, fmt, tr)
zl[lslices] = zr[rslices]
if not is_overlapping(tl) and not is_overlapping(tr):
# Slice assignment of overlapping structures
# is undefined in NumPy.
self.verify(xl, obj=None,
itemsize=zl.itemsize, fmt=fmt, readonly=0,
ndim=zl.ndim, shape=zl.shape,
strides=zl.strides, lst=zl.tolist())
self.verify(xr, obj=None,
itemsize=zr.itemsize, fmt=fmt, readonly=0,
ndim=zr.ndim, shape=zr.shape,
strides=zr.strides, lst=zr.tolist())
def test_ndarray_re_export(self):
items = [1,2,3,4,5,6,7,8,9,10,11,12]
nd = ndarray(items, shape=[3,4], flags=ND_PIL)
ex = ndarray(nd)
self.assertTrue(ex.flags & ND_PIL)
self.assertIs(ex.obj, nd)
self.assertEqual(ex.suboffsets, (0, -1))
self.assertFalse(ex.c_contiguous)
self.assertFalse(ex.f_contiguous)
self.assertFalse(ex.contiguous)
def test_ndarray_zero_shape(self):
# zeros in shape
for flags in (0, ND_PIL):
nd = ndarray([1,2,3], shape=[0], flags=flags)
mv = memoryview(nd)
self.assertEqual(mv, nd)
self.assertEqual(nd.tolist(), [])
self.assertEqual(mv.tolist(), [])
nd = ndarray([1,2,3], shape=[0,3,3], flags=flags)
self.assertEqual(nd.tolist(), [])
nd = ndarray([1,2,3], shape=[3,0,3], flags=flags)
self.assertEqual(nd.tolist(), [[], [], []])
nd = ndarray([1,2,3], shape=[3,3,0], flags=flags)
self.assertEqual(nd.tolist(),
[[[], [], []], [[], [], []], [[], [], []]])
def test_ndarray_zero_strides(self):
# zero strides
for flags in (0, ND_PIL):
nd = ndarray([1], shape=[5], strides=[0], flags=flags)
mv = memoryview(nd)
self.assertEqual(mv, nd)
self.assertEqual(nd.tolist(), [1, 1, 1, 1, 1])
self.assertEqual(mv.tolist(), [1, 1, 1, 1, 1])
def test_ndarray_offset(self):
nd = ndarray(list(range(20)), shape=[3], offset=7)
self.assertEqual(nd.offset, 7)
self.assertEqual(nd.tolist(), [7,8,9])
def test_ndarray_memoryview_from_buffer(self):
for flags in (0, ND_PIL):
nd = ndarray(list(range(3)), shape=[3], flags=flags)
m = nd.memoryview_from_buffer()
self.assertEqual(m, nd)
def test_ndarray_get_pointer(self):
for flags in (0, ND_PIL):
nd = ndarray(list(range(3)), shape=[3], flags=flags)
for i in range(3):
self.assertEqual(nd[i], get_pointer(nd, [i]))
def test_ndarray_tolist_null_strides(self):
ex = ndarray(list(range(20)), shape=[2,2,5])
nd = ndarray(ex, getbuf=PyBUF_ND|PyBUF_FORMAT)
self.assertEqual(nd.tolist(), ex.tolist())
m = memoryview(ex)
self.assertEqual(m.tolist(), ex.tolist())
def test_ndarray_cmp_contig(self):
self.assertFalse(cmp_contig(b"123", b"456"))
x = ndarray(list(range(12)), shape=[3,4])
y = ndarray(list(range(12)), shape=[4,3])
self.assertFalse(cmp_contig(x, y))
x = ndarray([1], shape=[1], format="B")
self.assertTrue(cmp_contig(x, b'\x01'))
self.assertTrue(cmp_contig(b'\x01', x))
def test_ndarray_hash(self):
a = array.array('L', [1,2,3])
nd = ndarray(a)
self.assertRaises(ValueError, hash, nd)
# one-dimensional
b = bytes(list(range(12)))
nd = ndarray(list(range(12)), shape=[12])
self.assertEqual(hash(nd), hash(b))
# C-contiguous
nd = ndarray(list(range(12)), shape=[3,4])
self.assertEqual(hash(nd), hash(b))
nd = ndarray(list(range(12)), shape=[3,2,2])
self.assertEqual(hash(nd), hash(b))
# Fortran contiguous
b = bytes(transpose(list(range(12)), shape=[4,3]))
nd = ndarray(list(range(12)), shape=[3,4], flags=ND_FORTRAN)
self.assertEqual(hash(nd), hash(b))
b = bytes(transpose(list(range(12)), shape=[2,3,2]))
nd = ndarray(list(range(12)), shape=[2,3,2], flags=ND_FORTRAN)
self.assertEqual(hash(nd), hash(b))
# suboffsets
b = bytes(list(range(12)))
nd = ndarray(list(range(12)), shape=[2,2,3], flags=ND_PIL)
self.assertEqual(hash(nd), hash(b))
# non-byte formats
nd = ndarray(list(range(12)), shape=[2,2,3], format='L')
self.assertEqual(hash(nd), hash(nd.tobytes()))
def test_py_buffer_to_contiguous(self):
# The requests are used in _testbuffer.c:py_buffer_to_contiguous
# to generate buffers without full information for testing.
requests = (
# distinct flags
PyBUF_INDIRECT, PyBUF_STRIDES, PyBUF_ND, PyBUF_SIMPLE,
# compound requests
PyBUF_FULL, PyBUF_FULL_RO,
PyBUF_RECORDS, PyBUF_RECORDS_RO,
PyBUF_STRIDED, PyBUF_STRIDED_RO,
PyBUF_CONTIG, PyBUF_CONTIG_RO,
)
# no buffer interface
self.assertRaises(TypeError, py_buffer_to_contiguous, {}, 'F',
PyBUF_FULL_RO)
# scalar, read-only request
nd = ndarray(9, shape=(), format="L", flags=ND_WRITABLE)
for order in ['C', 'F', 'A']:
for request in requests:
b = py_buffer_to_contiguous(nd, order, request)
self.assertEqual(b, nd.tobytes())
# zeros in shape
nd = ndarray([1], shape=[0], format="L", flags=ND_WRITABLE)
for order in ['C', 'F', 'A']:
for request in requests:
b = py_buffer_to_contiguous(nd, order, request)
self.assertEqual(b, b'')
nd = ndarray(list(range(8)), shape=[2, 0, 7], format="L",
flags=ND_WRITABLE)
for order in ['C', 'F', 'A']:
for request in requests:
b = py_buffer_to_contiguous(nd, order, request)
self.assertEqual(b, b'')
### One-dimensional arrays are trivial, since Fortran and C order
### are the same.
# one-dimensional
for f in [0, ND_FORTRAN]:
nd = ndarray([1], shape=[1], format="h", flags=f|ND_WRITABLE)
ndbytes = nd.tobytes()
for order in ['C', 'F', 'A']:
for request in requests:
b = py_buffer_to_contiguous(nd, order, request)
self.assertEqual(b, ndbytes)
nd = ndarray([1, 2, 3], shape=[3], format="b", flags=f|ND_WRITABLE)
ndbytes = nd.tobytes()
for order in ['C', 'F', 'A']:
for request in requests:
b = py_buffer_to_contiguous(nd, order, request)
self.assertEqual(b, ndbytes)
# one-dimensional, non-contiguous input
nd = ndarray([1, 2, 3], shape=[2], strides=[2], flags=ND_WRITABLE)
ndbytes = nd.tobytes()
for order in ['C', 'F', 'A']:
for request in [PyBUF_STRIDES, PyBUF_FULL]:
b = py_buffer_to_contiguous(nd, order, request)
self.assertEqual(b, ndbytes)
nd = nd[::-1]
ndbytes = nd.tobytes()
for order in ['C', 'F', 'A']:
for request in requests:
try:
b = py_buffer_to_contiguous(nd, order, request)
except BufferError:
continue
self.assertEqual(b, ndbytes)
###
### Multi-dimensional arrays:
###
### The goal here is to preserve the logical representation of the
### input array but change the physical representation if necessary.
###
### _testbuffer example:
### ====================
###
### C input array:
### --------------
### >>> nd = ndarray(list(range(12)), shape=[3, 4])
### >>> nd.tolist()
### [[0, 1, 2, 3],
### [4, 5, 6, 7],
### [8, 9, 10, 11]]
###
### Fortran output:
### ---------------
### >>> py_buffer_to_contiguous(nd, 'F', PyBUF_FULL_RO)
### >>> b'\x00\x04\x08\x01\x05\t\x02\x06\n\x03\x07\x0b'
###
### The return value corresponds to this input list for
### _testbuffer's ndarray:
### >>> nd = ndarray([0,4,8,1,5,9,2,6,10,3,7,11], shape=[3,4],
### flags=ND_FORTRAN)
### >>> nd.tolist()
### [[0, 1, 2, 3],
### [4, 5, 6, 7],
### [8, 9, 10, 11]]
###
### The logical array is the same, but the values in memory are now
### in Fortran order.
###
### NumPy example:
### ==============
### _testbuffer's ndarray takes lists to initialize the memory.
### Here's the same sequence in NumPy:
###
### C input:
### --------
### >>> nd = ndarray(buffer=bytearray(list(range(12))),
### shape=[3, 4], dtype='B')
### >>> nd
### array([[ 0, 1, 2, 3],
### [ 4, 5, 6, 7],
### [ 8, 9, 10, 11]], dtype=uint8)
###
### Fortran output:
### ---------------
### >>> fortran_buf = nd.tostring(order='F')
### >>> fortran_buf
### b'\x00\x04\x08\x01\x05\t\x02\x06\n\x03\x07\x0b'
###
### >>> nd = ndarray(buffer=fortran_buf, shape=[3, 4],
### dtype='B', order='F')
###
### >>> nd
### array([[ 0, 1, 2, 3],
### [ 4, 5, 6, 7],
### [ 8, 9, 10, 11]], dtype=uint8)
###
# multi-dimensional, contiguous input
lst = list(range(12))
for f in [0, ND_FORTRAN]:
nd = ndarray(lst, shape=[3, 4], flags=f|ND_WRITABLE)
if numpy_array:
na = numpy_array(buffer=bytearray(lst),
shape=[3, 4], dtype='B',
order='C' if f == 0 else 'F')
# 'C' request
if f == ND_FORTRAN: # 'F' to 'C'
x = ndarray(transpose(lst, [4, 3]), shape=[3, 4],
flags=ND_WRITABLE)
expected = x.tobytes()
else:
expected = nd.tobytes()
for request in requests:
try:
b = py_buffer_to_contiguous(nd, 'C', request)
except BufferError:
continue
self.assertEqual(b, expected)
# Check that output can be used as the basis for constructing
# a C array that is logically identical to the input array.
y = ndarray([v for v in b], shape=[3, 4], flags=ND_WRITABLE)
self.assertEqual(memoryview(y), memoryview(nd))
if numpy_array:
self.assertEqual(b, na.tostring(order='C'))
# 'F' request
if f == 0: # 'C' to 'F'
x = ndarray(transpose(lst, [3, 4]), shape=[4, 3],
flags=ND_WRITABLE)
else:
x = ndarray(lst, shape=[3, 4], flags=ND_WRITABLE)
expected = x.tobytes()
for request in [PyBUF_FULL, PyBUF_FULL_RO, PyBUF_INDIRECT,
PyBUF_STRIDES, PyBUF_ND]:
try:
b = py_buffer_to_contiguous(nd, 'F', request)
except BufferError:
continue
self.assertEqual(b, expected)
# Check that output can be used as the basis for constructing
# a Fortran array that is logically identical to the input array.
y = ndarray([v for v in b], shape=[3, 4], flags=ND_FORTRAN|ND_WRITABLE)
self.assertEqual(memoryview(y), memoryview(nd))
if numpy_array:
self.assertEqual(b, na.tostring(order='F'))
# 'A' request
if f == ND_FORTRAN:
x = ndarray(lst, shape=[3, 4], flags=ND_WRITABLE)
expected = x.tobytes()
else:
expected = nd.tobytes()
for request in [PyBUF_FULL, PyBUF_FULL_RO, PyBUF_INDIRECT,
PyBUF_STRIDES, PyBUF_ND]:
try:
b = py_buffer_to_contiguous(nd, 'A', request)
except BufferError:
continue
self.assertEqual(b, expected)
# Check that output can be used as the basis for constructing
# an array with order=f that is logically identical to the input
# array.
y = ndarray([v for v in b], shape=[3, 4], flags=f|ND_WRITABLE)
self.assertEqual(memoryview(y), memoryview(nd))
if numpy_array:
self.assertEqual(b, na.tostring(order='A'))
# multi-dimensional, non-contiguous input
nd = ndarray(list(range(12)), shape=[3, 4], flags=ND_WRITABLE|ND_PIL)
# 'C'
b = py_buffer_to_contiguous(nd, 'C', PyBUF_FULL_RO)
self.assertEqual(b, nd.tobytes())
y = ndarray([v for v in b], shape=[3, 4], flags=ND_WRITABLE)
self.assertEqual(memoryview(y), memoryview(nd))
# 'F'
b = py_buffer_to_contiguous(nd, 'F', PyBUF_FULL_RO)
x = ndarray(transpose(lst, [3, 4]), shape=[4, 3], flags=ND_WRITABLE)
self.assertEqual(b, x.tobytes())
y = ndarray([v for v in b], shape=[3, 4], flags=ND_FORTRAN|ND_WRITABLE)
self.assertEqual(memoryview(y), memoryview(nd))
# 'A'
b = py_buffer_to_contiguous(nd, 'A', PyBUF_FULL_RO)
self.assertEqual(b, nd.tobytes())
y = ndarray([v for v in b], shape=[3, 4], flags=ND_WRITABLE)
self.assertEqual(memoryview(y), memoryview(nd))
def test_memoryview_construction(self):
items_shape = [(9, []), ([1,2,3], [3]), (list(range(2*3*5)), [2,3,5])]
# NumPy style, C-contiguous:
for items, shape in items_shape:
# From PEP-3118 compliant exporter:
ex = ndarray(items, shape=shape)
m = memoryview(ex)
self.assertTrue(m.c_contiguous)
self.assertTrue(m.contiguous)
ndim = len(shape)
strides = strides_from_shape(ndim, shape, 1, 'C')
lst = carray(items, shape)
self.verify(m, obj=ex,
itemsize=1, fmt='B', readonly=1,
ndim=ndim, shape=shape, strides=strides,
lst=lst)
# From memoryview:
m2 = memoryview(m)
self.verify(m2, obj=ex,
itemsize=1, fmt='B', readonly=1,
ndim=ndim, shape=shape, strides=strides,
lst=lst)
# PyMemoryView_FromBuffer(): no strides
nd = ndarray(ex, getbuf=PyBUF_CONTIG_RO|PyBUF_FORMAT)
self.assertEqual(nd.strides, ())
m = nd.memoryview_from_buffer()
self.verify(m, obj=None,
itemsize=1, fmt='B', readonly=1,
ndim=ndim, shape=shape, strides=strides,
lst=lst)
# PyMemoryView_FromBuffer(): no format, shape, strides
nd = ndarray(ex, getbuf=PyBUF_SIMPLE)
self.assertEqual(nd.format, '')
self.assertEqual(nd.shape, ())
self.assertEqual(nd.strides, ())
m = nd.memoryview_from_buffer()
lst = [items] if ndim == 0 else items
self.verify(m, obj=None,
itemsize=1, fmt='B', readonly=1,
ndim=1, shape=[ex.nbytes], strides=(1,),
lst=lst)
# NumPy style, Fortran contiguous:
for items, shape in items_shape:
# From PEP-3118 compliant exporter:
ex = ndarray(items, shape=shape, flags=ND_FORTRAN)
m = memoryview(ex)
self.assertTrue(m.f_contiguous)
self.assertTrue(m.contiguous)
ndim = len(shape)
strides = strides_from_shape(ndim, shape, 1, 'F')
lst = farray(items, shape)
self.verify(m, obj=ex,
itemsize=1, fmt='B', readonly=1,
ndim=ndim, shape=shape, strides=strides,
lst=lst)
# From memoryview:
m2 = memoryview(m)
self.verify(m2, obj=ex,
itemsize=1, fmt='B', readonly=1,
ndim=ndim, shape=shape, strides=strides,
lst=lst)
# PIL style:
for items, shape in items_shape[1:]:
# From PEP-3118 compliant exporter:
ex = ndarray(items, shape=shape, flags=ND_PIL)
m = memoryview(ex)
ndim = len(shape)
lst = carray(items, shape)
self.verify(m, obj=ex,
itemsize=1, fmt='B', readonly=1,
ndim=ndim, shape=shape, strides=ex.strides,
lst=lst)
# From memoryview:
m2 = memoryview(m)
self.verify(m2, obj=ex,
itemsize=1, fmt='B', readonly=1,
ndim=ndim, shape=shape, strides=ex.strides,
lst=lst)
# Invalid number of arguments:
self.assertRaises(TypeError, memoryview, b'9', 'x')
# Not a buffer provider:
self.assertRaises(TypeError, memoryview, {})
# Non-compliant buffer provider:
ex = ndarray([1,2,3], shape=[3])
nd = ndarray(ex, getbuf=PyBUF_SIMPLE)
self.assertRaises(BufferError, memoryview, nd)
nd = ndarray(ex, getbuf=PyBUF_CONTIG_RO|PyBUF_FORMAT)
self.assertRaises(BufferError, memoryview, nd)
# ndim > 64
nd = ndarray([1]*128, shape=[1]*128, format='L')
self.assertRaises(ValueError, memoryview, nd)
self.assertRaises(ValueError, nd.memoryview_from_buffer)
self.assertRaises(ValueError, get_contiguous, nd, PyBUF_READ, 'C')
self.assertRaises(ValueError, get_contiguous, nd, PyBUF_READ, 'F')
self.assertRaises(ValueError, get_contiguous, nd[::-1], PyBUF_READ, 'C')
def test_memoryview_cast_zero_shape(self):
# Casts are undefined if buffer is multidimensional and shape
# contains zeros. These arrays are regarded as C-contiguous by
# Numpy and PyBuffer_GetContiguous(), so they are not caught by
# the test for C-contiguity in memory_cast().
items = [1,2,3]
for shape in ([0,3,3], [3,0,3], [0,3,3]):
ex = ndarray(items, shape=shape)
self.assertTrue(ex.c_contiguous)
msrc = memoryview(ex)
self.assertRaises(TypeError, msrc.cast, 'c')
# Monodimensional empty view can be cast (issue #19014).
for fmt, _, _ in iter_format(1, 'memoryview'):
msrc = memoryview(b'')
m = msrc.cast(fmt)
self.assertEqual(m.tobytes(), b'')
self.assertEqual(m.tolist(), [])
def test_memoryview_struct_module(self):
class INT(object):
def __init__(self, val):
self.val = val
def __int__(self):
return self.val
class IDX(object):
def __init__(self, val):
self.val = val
def __index__(self):
return self.val
def f(): return 7
values = [INT(9), IDX(9),
2.2+3j, Decimal("-21.1"), 12.2, Fraction(5, 2),
[1,2,3], {4,5,6}, {7:8}, (), (9,),
True, False, None, NotImplemented,
b'a', b'abc', bytearray(b'a'), bytearray(b'abc'),
'a', 'abc', r'a', r'abc',
f, lambda x: x]
for fmt, items, item in iter_format(10, 'memoryview'):
ex = ndarray(items, shape=[10], format=fmt, flags=ND_WRITABLE)
nd = ndarray(items, shape=[10], format=fmt, flags=ND_WRITABLE)
m = memoryview(ex)
struct.pack_into(fmt, nd, 0, item)
m[0] = item
self.assertEqual(m[0], nd[0])
itemsize = struct.calcsize(fmt)
if 'P' in fmt:
continue
for v in values:
struct_err = None
try:
struct.pack_into(fmt, nd, itemsize, v)
except struct.error:
struct_err = struct.error
mv_err = None
try:
m[1] = v
except (TypeError, ValueError) as e:
mv_err = e.__class__
if struct_err or mv_err:
self.assertIsNot(struct_err, None)
self.assertIsNot(mv_err, None)
else:
self.assertEqual(m[1], nd[1])
def test_memoryview_cast_zero_strides(self):
# Casts are undefined if strides contains zeros. These arrays are
# (sometimes!) regarded as C-contiguous by Numpy, but not by
# PyBuffer_GetContiguous().
ex = ndarray([1,2,3], shape=[3], strides=[0])
self.assertFalse(ex.c_contiguous)
msrc = memoryview(ex)
self.assertRaises(TypeError, msrc.cast, 'c')
def test_memoryview_cast_invalid(self):
# invalid format
for sfmt in NON_BYTE_FORMAT:
sformat = '@' + sfmt if randrange(2) else sfmt
ssize = struct.calcsize(sformat)
for dfmt in NON_BYTE_FORMAT:
dformat = '@' + dfmt if randrange(2) else dfmt
dsize = struct.calcsize(dformat)
ex = ndarray(list(range(32)), shape=[32//ssize], format=sformat)
msrc = memoryview(ex)
self.assertRaises(TypeError, msrc.cast, dfmt, [32//dsize])
for sfmt, sitems, _ in iter_format(1):
ex = ndarray(sitems, shape=[1], format=sfmt)
msrc = memoryview(ex)
for dfmt, _, _ in iter_format(1):
if (not is_memoryview_format(sfmt) or
not is_memoryview_format(dfmt)):
self.assertRaises(ValueError, msrc.cast, dfmt,
[32//dsize])
else:
if not is_byte_format(sfmt) and not is_byte_format(dfmt):
self.assertRaises(TypeError, msrc.cast, dfmt,
[32//dsize])
# invalid shape
size_h = struct.calcsize('h')
size_d = struct.calcsize('d')
ex = ndarray(list(range(2*2*size_d)), shape=[2,2,size_d], format='h')
msrc = memoryview(ex)
self.assertRaises(TypeError, msrc.cast, shape=[2,2,size_h], format='d')
ex = ndarray(list(range(120)), shape=[1,2,3,4,5])
m = memoryview(ex)
# incorrect number of args
self.assertRaises(TypeError, m.cast)
self.assertRaises(TypeError, m.cast, 1, 2, 3)
# incorrect dest format type
self.assertRaises(TypeError, m.cast, {})
# incorrect dest format
self.assertRaises(ValueError, m.cast, "X")
self.assertRaises(ValueError, m.cast, "@X")
self.assertRaises(ValueError, m.cast, "@XY")
# dest format not implemented
self.assertRaises(ValueError, m.cast, "=B")
self.assertRaises(ValueError, m.cast, "!L")
self.assertRaises(ValueError, m.cast, "<P")
self.assertRaises(ValueError, m.cast, ">l")
self.assertRaises(ValueError, m.cast, "BI")
self.assertRaises(ValueError, m.cast, "xBI")
# src format not implemented
ex = ndarray([(1,2), (3,4)], shape=[2], format="II")
m = memoryview(ex)
self.assertRaises(NotImplementedError, m.__getitem__, 0)
self.assertRaises(NotImplementedError, m.__setitem__, 0, 8)
self.assertRaises(NotImplementedError, m.tolist)
# incorrect shape type
ex = ndarray(list(range(120)), shape=[1,2,3,4,5])
m = memoryview(ex)
self.assertRaises(TypeError, m.cast, "B", shape={})
# incorrect shape elements
ex = ndarray(list(range(120)), shape=[2*3*4*5])
m = memoryview(ex)
self.assertRaises(OverflowError, m.cast, "B", shape=[2**64])
self.assertRaises(ValueError, m.cast, "B", shape=[-1])
self.assertRaises(ValueError, m.cast, "B", shape=[2,3,4,5,6,7,-1])
self.assertRaises(ValueError, m.cast, "B", shape=[2,3,4,5,6,7,0])
self.assertRaises(TypeError, m.cast, "B", shape=[2,3,4,5,6,7,'x'])
# N-D -> N-D cast
ex = ndarray(list([9 for _ in range(3*5*7*11)]), shape=[3,5,7,11])
m = memoryview(ex)
self.assertRaises(TypeError, m.cast, "I", shape=[2,3,4,5])
# cast with ndim > 64
nd = ndarray(list(range(128)), shape=[128], format='I')
m = memoryview(nd)
self.assertRaises(ValueError, m.cast, 'I', [1]*128)
# view->len not a multiple of itemsize
ex = ndarray(list([9 for _ in range(3*5*7*11)]), shape=[3*5*7*11])
m = memoryview(ex)
self.assertRaises(TypeError, m.cast, "I", shape=[2,3,4,5])
# product(shape) * itemsize != buffer size
ex = ndarray(list([9 for _ in range(3*5*7*11)]), shape=[3*5*7*11])
m = memoryview(ex)
self.assertRaises(TypeError, m.cast, "B", shape=[2,3,4,5])
# product(shape) * itemsize overflow
nd = ndarray(list(range(128)), shape=[128], format='I')
m1 = memoryview(nd)
nd = ndarray(list(range(128)), shape=[128], format='B')
m2 = memoryview(nd)
if sys.maxsize == 2**63-1:
self.assertRaises(TypeError, m1.cast, 'B',
[7, 7, 73, 127, 337, 92737, 649657])
self.assertRaises(ValueError, m1.cast, 'B',
[2**20, 2**20, 2**10, 2**10, 2**3])
self.assertRaises(ValueError, m2.cast, 'I',
[2**20, 2**20, 2**10, 2**10, 2**1])
else:
self.assertRaises(TypeError, m1.cast, 'B',
[1, 2147483647])
self.assertRaises(ValueError, m1.cast, 'B',
[2**10, 2**10, 2**5, 2**5, 2**1])
self.assertRaises(ValueError, m2.cast, 'I',
[2**10, 2**10, 2**5, 2**3, 2**1])
def test_memoryview_cast(self):
bytespec = (
('B', lambda ex: list(ex.tobytes())),
('b', lambda ex: [x-256 if x > 127 else x for x in list(ex.tobytes())]),
('c', lambda ex: [bytes(chr(x), 'latin-1') for x in list(ex.tobytes())]),
)
def iter_roundtrip(ex, m, items, fmt):
srcsize = struct.calcsize(fmt)
for bytefmt, to_bytelist in bytespec:
m2 = m.cast(bytefmt)
lst = to_bytelist(ex)
self.verify(m2, obj=ex,
itemsize=1, fmt=bytefmt, readonly=0,
ndim=1, shape=[31*srcsize], strides=(1,),
lst=lst, cast=True)
m3 = m2.cast(fmt)
self.assertEqual(m3, ex)
lst = ex.tolist()
self.verify(m3, obj=ex,
itemsize=srcsize, fmt=fmt, readonly=0,
ndim=1, shape=[31], strides=(srcsize,),
lst=lst, cast=True)
# cast from ndim = 0 to ndim = 1
srcsize = struct.calcsize('I')
ex = ndarray(9, shape=[], format='I')
destitems, destshape = cast_items(ex, 'B', 1)
m = memoryview(ex)
m2 = m.cast('B')
self.verify(m2, obj=ex,
itemsize=1, fmt='B', readonly=1,
ndim=1, shape=destshape, strides=(1,),
lst=destitems, cast=True)
# cast from ndim = 1 to ndim = 0
destsize = struct.calcsize('I')
ex = ndarray([9]*destsize, shape=[destsize], format='B')
destitems, destshape = cast_items(ex, 'I', destsize, shape=[])
m = memoryview(ex)
m2 = m.cast('I', shape=[])
self.verify(m2, obj=ex,
itemsize=destsize, fmt='I', readonly=1,
ndim=0, shape=(), strides=(),
lst=destitems, cast=True)
# array.array: roundtrip to/from bytes
for fmt, items, _ in iter_format(31, 'array'):
ex = array.array(fmt, items)
m = memoryview(ex)
iter_roundtrip(ex, m, items, fmt)
# ndarray: roundtrip to/from bytes
for fmt, items, _ in iter_format(31, 'memoryview'):
ex = ndarray(items, shape=[31], format=fmt, flags=ND_WRITABLE)
m = memoryview(ex)
iter_roundtrip(ex, m, items, fmt)
def test_memoryview_cast_1D_ND(self):
# Cast between C-contiguous buffers. At least one buffer must
# be 1D, at least one format must be 'c', 'b' or 'B'.
for _tshape in gencastshapes():
for char in fmtdict['@']:
tfmt = ('', '@')[randrange(2)] + char
tsize = struct.calcsize(tfmt)
n = prod(_tshape) * tsize
obj = 'memoryview' if is_byte_format(tfmt) else 'bytefmt'
for fmt, items, _ in iter_format(n, obj):
size = struct.calcsize(fmt)
shape = [n] if n > 0 else []
tshape = _tshape + [size]
ex = ndarray(items, shape=shape, format=fmt)
m = memoryview(ex)
titems, tshape = cast_items(ex, tfmt, tsize, shape=tshape)
if titems is None:
self.assertRaises(TypeError, m.cast, tfmt, tshape)
continue
if titems == 'nan':
continue # NaNs in lists are a recipe for trouble.
# 1D -> ND
nd = ndarray(titems, shape=tshape, format=tfmt)
m2 = m.cast(tfmt, shape=tshape)
ndim = len(tshape)
strides = nd.strides
lst = nd.tolist()
self.verify(m2, obj=ex,
itemsize=tsize, fmt=tfmt, readonly=1,
ndim=ndim, shape=tshape, strides=strides,
lst=lst, cast=True)
# ND -> 1D
m3 = m2.cast(fmt)
m4 = m2.cast(fmt, shape=shape)
ndim = len(shape)
strides = ex.strides
lst = ex.tolist()
self.verify(m3, obj=ex,
itemsize=size, fmt=fmt, readonly=1,
ndim=ndim, shape=shape, strides=strides,
lst=lst, cast=True)
self.verify(m4, obj=ex,
itemsize=size, fmt=fmt, readonly=1,
ndim=ndim, shape=shape, strides=strides,
lst=lst, cast=True)
def test_memoryview_tolist(self):
# Most tolist() tests are in self.verify() etc.
a = array.array('h', list(range(-6, 6)))
m = memoryview(a)
self.assertEqual(m, a)
self.assertEqual(m.tolist(), a.tolist())
a = a[2::3]
m = m[2::3]
self.assertEqual(m, a)
self.assertEqual(m.tolist(), a.tolist())
ex = ndarray(list(range(2*3*5*7*11)), shape=[11,2,7,3,5], format='L')
m = memoryview(ex)
self.assertEqual(m.tolist(), ex.tolist())
ex = ndarray([(2, 5), (7, 11)], shape=[2], format='lh')
m = memoryview(ex)
self.assertRaises(NotImplementedError, m.tolist)
ex = ndarray([b'12345'], shape=[1], format="s")
m = memoryview(ex)
self.assertRaises(NotImplementedError, m.tolist)
ex = ndarray([b"a",b"b",b"c",b"d",b"e",b"f"], shape=[2,3], format='s')
m = memoryview(ex)
self.assertRaises(NotImplementedError, m.tolist)
def test_memoryview_repr(self):
m = memoryview(bytearray(9))
r = m.__repr__()
self.assertTrue(r.startswith("<memory"))
m.release()
r = m.__repr__()
self.assertTrue(r.startswith("<released"))
def test_memoryview_sequence(self):
for fmt in ('d', 'f'):
inf = float(3e400)
ex = array.array(fmt, [1.0, inf, 3.0])
m = memoryview(ex)
self.assertIn(1.0, m)
self.assertIn(5e700, m)
self.assertIn(3.0, m)
ex = ndarray(9.0, [], format='f')
m = memoryview(ex)
self.assertRaises(TypeError, eval, "9.0 in m", locals())
def test_memoryview_index(self):
# ndim = 0
ex = ndarray(12.5, shape=[], format='d')
m = memoryview(ex)
self.assertEqual(m[()], 12.5)
self.assertEqual(m[...], m)
self.assertEqual(m[...], ex)
self.assertRaises(TypeError, m.__getitem__, 0)
ex = ndarray((1,2,3), shape=[], format='iii')
m = memoryview(ex)
self.assertRaises(NotImplementedError, m.__getitem__, ())
# range
ex = ndarray(list(range(7)), shape=[7], flags=ND_WRITABLE)
m = memoryview(ex)
self.assertRaises(IndexError, m.__getitem__, 2**64)
self.assertRaises(TypeError, m.__getitem__, 2.0)
self.assertRaises(TypeError, m.__getitem__, 0.0)
# out of bounds
self.assertRaises(IndexError, m.__getitem__, -8)
self.assertRaises(IndexError, m.__getitem__, 8)
# Not implemented: multidimensional sub-views
ex = ndarray(list(range(12)), shape=[3,4], flags=ND_WRITABLE)
m = memoryview(ex)
self.assertRaises(NotImplementedError, m.__getitem__, 0)
self.assertRaises(NotImplementedError, m.__setitem__, 0, 9)
self.assertRaises(NotImplementedError, m.__getitem__, 0)
def test_memoryview_assign(self):
# ndim = 0
ex = ndarray(12.5, shape=[], format='f', flags=ND_WRITABLE)
m = memoryview(ex)
m[()] = 22.5
self.assertEqual(m[()], 22.5)
m[...] = 23.5
self.assertEqual(m[()], 23.5)
self.assertRaises(TypeError, m.__setitem__, 0, 24.7)
# read-only
ex = ndarray(list(range(7)), shape=[7])
m = memoryview(ex)
self.assertRaises(TypeError, m.__setitem__, 2, 10)
# range
ex = ndarray(list(range(7)), shape=[7], flags=ND_WRITABLE)
m = memoryview(ex)
self.assertRaises(IndexError, m.__setitem__, 2**64, 9)
self.assertRaises(TypeError, m.__setitem__, 2.0, 10)
self.assertRaises(TypeError, m.__setitem__, 0.0, 11)
# out of bounds
self.assertRaises(IndexError, m.__setitem__, -8, 20)
self.assertRaises(IndexError, m.__setitem__, 8, 25)
# pack_single() success:
for fmt in fmtdict['@']:
if fmt == 'c' or fmt == '?':
continue
ex = ndarray([1,2,3], shape=[3], format=fmt, flags=ND_WRITABLE)
m = memoryview(ex)
i = randrange(-3, 3)
m[i] = 8
self.assertEqual(m[i], 8)
self.assertEqual(m[i], ex[i])
ex = ndarray([b'1', b'2', b'3'], shape=[3], format='c',
flags=ND_WRITABLE)
m = memoryview(ex)
m[2] = b'9'
self.assertEqual(m[2], b'9')
ex = ndarray([True, False, True], shape=[3], format='?',
flags=ND_WRITABLE)
m = memoryview(ex)
m[1] = True
self.assertEqual(m[1], True)
# pack_single() exceptions:
nd = ndarray([b'x'], shape=[1], format='c', flags=ND_WRITABLE)
m = memoryview(nd)
self.assertRaises(TypeError, m.__setitem__, 0, 100)
ex = ndarray(list(range(120)), shape=[1,2,3,4,5], flags=ND_WRITABLE)
m1 = memoryview(ex)
for fmt, _range in fmtdict['@'].items():
if (fmt == '?'): # PyObject_IsTrue() accepts anything
continue
if fmt == 'c': # special case tested above
continue
m2 = m1.cast(fmt)
lo, hi = _range
if fmt == 'd' or fmt == 'f':
lo, hi = -2**1024, 2**1024
if fmt != 'P': # PyLong_AsVoidPtr() accepts negative numbers
self.assertRaises(ValueError, m2.__setitem__, 0, lo-1)
self.assertRaises(TypeError, m2.__setitem__, 0, "xyz")
self.assertRaises(ValueError, m2.__setitem__, 0, hi)
# invalid item
m2 = m1.cast('c')
self.assertRaises(ValueError, m2.__setitem__, 0, b'\xff\xff')
# format not implemented
ex = ndarray(list(range(1)), shape=[1], format="xL", flags=ND_WRITABLE)
m = memoryview(ex)
self.assertRaises(NotImplementedError, m.__setitem__, 0, 1)
ex = ndarray([b'12345'], shape=[1], format="s", flags=ND_WRITABLE)
m = memoryview(ex)
self.assertRaises(NotImplementedError, m.__setitem__, 0, 1)
# Not implemented: multidimensional sub-views
ex = ndarray(list(range(12)), shape=[3,4], flags=ND_WRITABLE)
m = memoryview(ex)
self.assertRaises(NotImplementedError, m.__setitem__, 0, [2, 3])
def test_memoryview_slice(self):
ex = ndarray(list(range(12)), shape=[12], flags=ND_WRITABLE)
m = memoryview(ex)
# zero step
self.assertRaises(ValueError, m.__getitem__, slice(0,2,0))
self.assertRaises(ValueError, m.__setitem__, slice(0,2,0),
bytearray([1,2]))
# invalid slice key
self.assertRaises(TypeError, m.__getitem__, ())
# multidimensional slices
ex = ndarray(list(range(12)), shape=[12], flags=ND_WRITABLE)
m = memoryview(ex)
self.assertRaises(NotImplementedError, m.__getitem__,
(slice(0,2,1), slice(0,2,1)))
self.assertRaises(NotImplementedError, m.__setitem__,
(slice(0,2,1), slice(0,2,1)), bytearray([1,2]))
# invalid slice tuple
self.assertRaises(TypeError, m.__getitem__, (slice(0,2,1), {}))
self.assertRaises(TypeError, m.__setitem__, (slice(0,2,1), {}),
bytearray([1,2]))
# rvalue is not an exporter
self.assertRaises(TypeError, m.__setitem__, slice(0,1,1), [1])
# non-contiguous slice assignment
for flags in (0, ND_PIL):
ex1 = ndarray(list(range(12)), shape=[12], strides=[-1], offset=11,
flags=ND_WRITABLE|flags)
ex2 = ndarray(list(range(24)), shape=[12], strides=[2], flags=flags)
m1 = memoryview(ex1)
m2 = memoryview(ex2)
ex1[2:5] = ex1[2:5]
m1[2:5] = m2[2:5]
self.assertEqual(m1, ex1)
self.assertEqual(m2, ex2)
ex1[1:3][::-1] = ex2[0:2][::1]
m1[1:3][::-1] = m2[0:2][::1]
self.assertEqual(m1, ex1)
self.assertEqual(m2, ex2)
ex1[4:1:-2][::-1] = ex1[1:4:2][::1]
m1[4:1:-2][::-1] = m1[1:4:2][::1]
self.assertEqual(m1, ex1)
self.assertEqual(m2, ex2)
def test_memoryview_array(self):
def cmptest(testcase, a, b, m, singleitem):
for i, _ in enumerate(a):
ai = a[i]
mi = m[i]
testcase.assertEqual(ai, mi)
a[i] = singleitem
if singleitem != ai:
testcase.assertNotEqual(a, m)
testcase.assertNotEqual(a, b)
else:
testcase.assertEqual(a, m)
testcase.assertEqual(a, b)
m[i] = singleitem
testcase.assertEqual(a, m)
testcase.assertEqual(b, m)
a[i] = ai
m[i] = mi
for n in range(1, 5):
for fmt, items, singleitem in iter_format(n, 'array'):
for lslice in genslices(n):
for rslice in genslices(n):
a = array.array(fmt, items)
b = array.array(fmt, items)
m = memoryview(b)
self.assertEqual(m, a)
self.assertEqual(m.tolist(), a.tolist())
self.assertEqual(m.tobytes(), a.tobytes())
self.assertEqual(len(m), len(a))
cmptest(self, a, b, m, singleitem)
array_err = None
have_resize = None
try:
al = a[lslice]
ar = a[rslice]
a[lslice] = a[rslice]
have_resize = len(al) != len(ar)
except Exception as e:
array_err = e.__class__
m_err = None
try:
m[lslice] = m[rslice]
except Exception as e:
m_err = e.__class__
if have_resize: # memoryview cannot change shape
self.assertIs(m_err, ValueError)
elif m_err or array_err:
self.assertIs(m_err, array_err)
else:
self.assertEqual(m, a)
self.assertEqual(m.tolist(), a.tolist())
self.assertEqual(m.tobytes(), a.tobytes())
cmptest(self, a, b, m, singleitem)
def test_memoryview_compare_special_cases(self):
a = array.array('L', [1, 2, 3])
b = array.array('L', [1, 2, 7])
# Ordering comparisons raise:
v = memoryview(a)
w = memoryview(b)
for attr in ('__lt__', '__le__', '__gt__', '__ge__'):
self.assertIs(getattr(v, attr)(w), NotImplemented)
self.assertIs(getattr(a, attr)(v), NotImplemented)
# Released views compare equal to themselves:
v = memoryview(a)
v.release()
self.assertEqual(v, v)
self.assertNotEqual(v, a)
self.assertNotEqual(a, v)
v = memoryview(a)
w = memoryview(a)
w.release()
self.assertNotEqual(v, w)
self.assertNotEqual(w, v)
# Operand does not implement the buffer protocol:
v = memoryview(a)
self.assertNotEqual(v, [1, 2, 3])
# NaNs
nd = ndarray([(0, 0)], shape=[1], format='l x d x', flags=ND_WRITABLE)
nd[0] = (-1, float('nan'))
self.assertNotEqual(memoryview(nd), nd)
# Depends on issue #15625: the struct module does not understand 'u'.
a = array.array('u', 'xyz')
v = memoryview(a)
self.assertNotEqual(a, v)
self.assertNotEqual(v, a)
# Some ctypes format strings are unknown to the struct module.
if ctypes:
# format: "T{>l:x:>l:y:}"
class BEPoint(ctypes.BigEndianStructure):
_fields_ = [("x", ctypes.c_long), ("y", ctypes.c_long)]
point = BEPoint(100, 200)
a = memoryview(point)
b = memoryview(point)
self.assertNotEqual(a, b)
self.assertNotEqual(a, point)
self.assertNotEqual(point, a)
self.assertRaises(NotImplementedError, a.tolist)
def test_memoryview_compare_ndim_zero(self):
nd1 = ndarray(1729, shape=[], format='@L')
nd2 = ndarray(1729, shape=[], format='L', flags=ND_WRITABLE)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, w)
self.assertEqual(w, v)
self.assertEqual(v, nd2)
self.assertEqual(nd2, v)
self.assertEqual(w, nd1)
self.assertEqual(nd1, w)
self.assertFalse(v.__ne__(w))
self.assertFalse(w.__ne__(v))
w[()] = 1728
self.assertNotEqual(v, w)
self.assertNotEqual(w, v)
self.assertNotEqual(v, nd2)
self.assertNotEqual(nd2, v)
self.assertNotEqual(w, nd1)
self.assertNotEqual(nd1, w)
self.assertFalse(v.__eq__(w))
self.assertFalse(w.__eq__(v))
nd = ndarray(list(range(12)), shape=[12], flags=ND_WRITABLE|ND_PIL)
ex = ndarray(list(range(12)), shape=[12], flags=ND_WRITABLE|ND_PIL)
m = memoryview(ex)
self.assertEqual(m, nd)
m[9] = 100
self.assertNotEqual(m, nd)
# struct module: equal
nd1 = ndarray((1729, 1.2, b'12345'), shape=[], format='Lf5s')
nd2 = ndarray((1729, 1.2, b'12345'), shape=[], format='hf5s',
flags=ND_WRITABLE)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, w)
self.assertEqual(w, v)
self.assertEqual(v, nd2)
self.assertEqual(nd2, v)
self.assertEqual(w, nd1)
self.assertEqual(nd1, w)
# struct module: not equal
nd1 = ndarray((1729, 1.2, b'12345'), shape=[], format='Lf5s')
nd2 = ndarray((-1729, 1.2, b'12345'), shape=[], format='hf5s',
flags=ND_WRITABLE)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertNotEqual(v, w)
self.assertNotEqual(w, v)
self.assertNotEqual(v, nd2)
self.assertNotEqual(nd2, v)
self.assertNotEqual(w, nd1)
self.assertNotEqual(nd1, w)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
def test_memoryview_compare_ndim_one(self):
# contiguous
nd1 = ndarray([-529, 576, -625, 676, -729], shape=[5], format='@h')
nd2 = ndarray([-529, 576, -625, 676, 729], shape=[5], format='@h')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# contiguous, struct module
nd1 = ndarray([-529, 576, -625, 676, -729], shape=[5], format='<i')
nd2 = ndarray([-529, 576, -625, 676, 729], shape=[5], format='>h')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# non-contiguous
nd1 = ndarray([-529, -625, -729], shape=[3], format='@h')
nd2 = ndarray([-529, 576, -625, 676, -729], shape=[5], format='@h')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd2[::2])
self.assertEqual(w[::2], nd1)
self.assertEqual(v, w[::2])
self.assertEqual(v[::-1], w[::-2])
# non-contiguous, struct module
nd1 = ndarray([-529, -625, -729], shape=[3], format='!h')
nd2 = ndarray([-529, 576, -625, 676, -729], shape=[5], format='<l')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd2[::2])
self.assertEqual(w[::2], nd1)
self.assertEqual(v, w[::2])
self.assertEqual(v[::-1], w[::-2])
# non-contiguous, suboffsets
nd1 = ndarray([-529, -625, -729], shape=[3], format='@h')
nd2 = ndarray([-529, 576, -625, 676, -729], shape=[5], format='@h',
flags=ND_PIL)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd2[::2])
self.assertEqual(w[::2], nd1)
self.assertEqual(v, w[::2])
self.assertEqual(v[::-1], w[::-2])
# non-contiguous, suboffsets, struct module
nd1 = ndarray([-529, -625, -729], shape=[3], format='h 0c')
nd2 = ndarray([-529, 576, -625, 676, -729], shape=[5], format='> h',
flags=ND_PIL)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd2[::2])
self.assertEqual(w[::2], nd1)
self.assertEqual(v, w[::2])
self.assertEqual(v[::-1], w[::-2])
def test_memoryview_compare_zero_shape(self):
# zeros in shape
nd1 = ndarray([900, 961], shape=[0], format='@h')
nd2 = ndarray([-900, -961], shape=[0], format='@h')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
# zeros in shape, struct module
nd1 = ndarray([900, 961], shape=[0], format='= h0c')
nd2 = ndarray([-900, -961], shape=[0], format='@ i')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
def test_memoryview_compare_zero_strides(self):
# zero strides
nd1 = ndarray([900, 900, 900, 900], shape=[4], format='@L')
nd2 = ndarray([900], shape=[4], strides=[0], format='L')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
# zero strides, struct module
nd1 = ndarray([(900, 900)]*4, shape=[4], format='@ Li')
nd2 = ndarray([(900, 900)], shape=[4], strides=[0], format='!L h')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
def test_memoryview_compare_random_formats(self):
# random single character native formats
n = 10
for char in fmtdict['@m']:
fmt, items, singleitem = randitems(n, 'memoryview', '@', char)
for flags in (0, ND_PIL):
nd = ndarray(items, shape=[n], format=fmt, flags=flags)
m = memoryview(nd)
self.assertEqual(m, nd)
nd = nd[::-3]
m = memoryview(nd)
self.assertEqual(m, nd)
# random formats
n = 10
for _ in range(100):
fmt, items, singleitem = randitems(n)
for flags in (0, ND_PIL):
nd = ndarray(items, shape=[n], format=fmt, flags=flags)
m = memoryview(nd)
self.assertEqual(m, nd)
nd = nd[::-3]
m = memoryview(nd)
self.assertEqual(m, nd)
def test_memoryview_compare_multidim_c(self):
# C-contiguous, different values
nd1 = ndarray(list(range(-15, 15)), shape=[3, 2, 5], format='@h')
nd2 = ndarray(list(range(0, 30)), shape=[3, 2, 5], format='@h')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# C-contiguous, different values, struct module
nd1 = ndarray([(0, 1, 2)]*30, shape=[3, 2, 5], format='=f q xxL')
nd2 = ndarray([(-1.2, 1, 2)]*30, shape=[3, 2, 5], format='< f 2Q')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# C-contiguous, different shape
nd1 = ndarray(list(range(30)), shape=[2, 3, 5], format='L')
nd2 = ndarray(list(range(30)), shape=[3, 2, 5], format='L')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# C-contiguous, different shape, struct module
nd1 = ndarray([(0, 1, 2)]*21, shape=[3, 7], format='! b B xL')
nd2 = ndarray([(0, 1, 2)]*21, shape=[7, 3], format='= Qx l xxL')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# C-contiguous, different format, struct module
nd1 = ndarray(list(range(30)), shape=[2, 3, 5], format='L')
nd2 = ndarray(list(range(30)), shape=[2, 3, 5], format='l')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
def test_memoryview_compare_multidim_fortran(self):
# Fortran-contiguous, different values
nd1 = ndarray(list(range(-15, 15)), shape=[5, 2, 3], format='@h',
flags=ND_FORTRAN)
nd2 = ndarray(list(range(0, 30)), shape=[5, 2, 3], format='@h',
flags=ND_FORTRAN)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# Fortran-contiguous, different values, struct module
nd1 = ndarray([(2**64-1, -1)]*6, shape=[2, 3], format='=Qq',
flags=ND_FORTRAN)
nd2 = ndarray([(-1, 2**64-1)]*6, shape=[2, 3], format='=qQ',
flags=ND_FORTRAN)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# Fortran-contiguous, different shape
nd1 = ndarray(list(range(-15, 15)), shape=[2, 3, 5], format='l',
flags=ND_FORTRAN)
nd2 = ndarray(list(range(-15, 15)), shape=[3, 2, 5], format='l',
flags=ND_FORTRAN)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# Fortran-contiguous, different shape, struct module
nd1 = ndarray(list(range(-15, 15)), shape=[2, 3, 5], format='0ll',
flags=ND_FORTRAN)
nd2 = ndarray(list(range(-15, 15)), shape=[3, 2, 5], format='l',
flags=ND_FORTRAN)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# Fortran-contiguous, different format, struct module
nd1 = ndarray(list(range(30)), shape=[5, 2, 3], format='@h',
flags=ND_FORTRAN)
nd2 = ndarray(list(range(30)), shape=[5, 2, 3], format='@b',
flags=ND_FORTRAN)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
def test_memoryview_compare_multidim_mixed(self):
# mixed C/Fortran contiguous
lst1 = list(range(-15, 15))
lst2 = transpose(lst1, [3, 2, 5])
nd1 = ndarray(lst1, shape=[3, 2, 5], format='@l')
nd2 = ndarray(lst2, shape=[3, 2, 5], format='l', flags=ND_FORTRAN)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, w)
# mixed C/Fortran contiguous, struct module
lst1 = [(-3.3, -22, b'x')]*30
lst1[5] = (-2.2, -22, b'x')
lst2 = transpose(lst1, [3, 2, 5])
nd1 = ndarray(lst1, shape=[3, 2, 5], format='d b c')
nd2 = ndarray(lst2, shape=[3, 2, 5], format='d h c', flags=ND_FORTRAN)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, w)
# different values, non-contiguous
ex1 = ndarray(list(range(40)), shape=[5, 8], format='@I')
nd1 = ex1[3:1:-1, ::-2]
ex2 = ndarray(list(range(40)), shape=[5, 8], format='I')
nd2 = ex2[1:3:1, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# same values, non-contiguous, struct module
ex1 = ndarray([(2**31-1, -2**31)]*22, shape=[11, 2], format='=ii')
nd1 = ex1[3:1:-1, ::-2]
ex2 = ndarray([(2**31-1, -2**31)]*22, shape=[11, 2], format='>ii')
nd2 = ex2[1:3:1, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
# different shape
ex1 = ndarray(list(range(30)), shape=[2, 3, 5], format='b')
nd1 = ex1[1:3:, ::-2]
nd2 = ndarray(list(range(30)), shape=[3, 2, 5], format='b')
nd2 = ex2[1:3:, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# different shape, struct module
ex1 = ndarray(list(range(30)), shape=[2, 3, 5], format='B')
nd1 = ex1[1:3:, ::-2]
nd2 = ndarray(list(range(30)), shape=[3, 2, 5], format='b')
nd2 = ex2[1:3:, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# different format, struct module
ex1 = ndarray([(2, b'123')]*30, shape=[5, 3, 2], format='b3s')
nd1 = ex1[1:3:, ::-2]
nd2 = ndarray([(2, b'123')]*30, shape=[5, 3, 2], format='i3s')
nd2 = ex2[1:3:, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
def test_memoryview_compare_multidim_zero_shape(self):
# zeros in shape
nd1 = ndarray(list(range(30)), shape=[0, 3, 2], format='i')
nd2 = ndarray(list(range(30)), shape=[5, 0, 2], format='@i')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# zeros in shape, struct module
nd1 = ndarray(list(range(30)), shape=[0, 3, 2], format='i')
nd2 = ndarray(list(range(30)), shape=[5, 0, 2], format='@i')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
def test_memoryview_compare_multidim_zero_strides(self):
# zero strides
nd1 = ndarray([900]*80, shape=[4, 5, 4], format='@L')
nd2 = ndarray([900], shape=[4, 5, 4], strides=[0, 0, 0], format='L')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
self.assertEqual(v.tolist(), w.tolist())
# zero strides, struct module
nd1 = ndarray([(1, 2)]*10, shape=[2, 5], format='=lQ')
nd2 = ndarray([(1, 2)], shape=[2, 5], strides=[0, 0], format='<lQ')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
def test_memoryview_compare_multidim_suboffsets(self):
# suboffsets
ex1 = ndarray(list(range(40)), shape=[5, 8], format='@I')
nd1 = ex1[3:1:-1, ::-2]
ex2 = ndarray(list(range(40)), shape=[5, 8], format='I', flags=ND_PIL)
nd2 = ex2[1:3:1, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# suboffsets, struct module
ex1 = ndarray([(2**64-1, -1)]*40, shape=[5, 8], format='=Qq',
flags=ND_WRITABLE)
ex1[2][7] = (1, -2)
nd1 = ex1[3:1:-1, ::-2]
ex2 = ndarray([(2**64-1, -1)]*40, shape=[5, 8], format='>Qq',
flags=ND_PIL|ND_WRITABLE)
ex2[2][7] = (1, -2)
nd2 = ex2[1:3:1, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
# suboffsets, different shape
ex1 = ndarray(list(range(30)), shape=[2, 3, 5], format='b',
flags=ND_PIL)
nd1 = ex1[1:3:, ::-2]
nd2 = ndarray(list(range(30)), shape=[3, 2, 5], format='b')
nd2 = ex2[1:3:, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# suboffsets, different shape, struct module
ex1 = ndarray([(2**8-1, -1)]*40, shape=[2, 3, 5], format='Bb',
flags=ND_PIL|ND_WRITABLE)
nd1 = ex1[1:2:, ::-2]
ex2 = ndarray([(2**8-1, -1)]*40, shape=[3, 2, 5], format='Bb')
nd2 = ex2[1:2:, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# suboffsets, different format
ex1 = ndarray(list(range(30)), shape=[5, 3, 2], format='i', flags=ND_PIL)
nd1 = ex1[1:3:, ::-2]
ex2 = ndarray(list(range(30)), shape=[5, 3, 2], format='@I', flags=ND_PIL)
nd2 = ex2[1:3:, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
# suboffsets, different format, struct module
ex1 = ndarray([(b'hello', b'', 1)]*27, shape=[3, 3, 3], format='5s0sP',
flags=ND_PIL|ND_WRITABLE)
ex1[1][2][2] = (b'sushi', b'', 1)
nd1 = ex1[1:3:, ::-2]
ex2 = ndarray([(b'hello', b'', 1)]*27, shape=[3, 3, 3], format='5s0sP',
flags=ND_PIL|ND_WRITABLE)
ex1[1][2][2] = (b'sushi', b'', 1)
nd2 = ex2[1:3:, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# initialize mixed C/Fortran + suboffsets
lst1 = list(range(-15, 15))
lst2 = transpose(lst1, [3, 2, 5])
nd1 = ndarray(lst1, shape=[3, 2, 5], format='@l', flags=ND_PIL)
nd2 = ndarray(lst2, shape=[3, 2, 5], format='l', flags=ND_FORTRAN|ND_PIL)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, w)
# initialize mixed C/Fortran + suboffsets, struct module
lst1 = [(b'sashimi', b'sliced', 20.05)]*30
lst1[11] = (b'ramen', b'spicy', 9.45)
lst2 = transpose(lst1, [3, 2, 5])
nd1 = ndarray(lst1, shape=[3, 2, 5], format='< 10p 9p d', flags=ND_PIL)
nd2 = ndarray(lst2, shape=[3, 2, 5], format='> 10p 9p d',
flags=ND_FORTRAN|ND_PIL)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, w)
def test_memoryview_compare_not_equal(self):
# items not equal
for byteorder in ['=', '<', '>', '!']:
x = ndarray([2**63]*120, shape=[3,5,2,2,2], format=byteorder+'Q')
y = ndarray([2**63]*120, shape=[3,5,2,2,2], format=byteorder+'Q',
flags=ND_WRITABLE|ND_FORTRAN)
y[2][3][1][1][1] = 1
a = memoryview(x)
b = memoryview(y)
self.assertEqual(a, x)
self.assertEqual(b, y)
self.assertNotEqual(a, b)
self.assertNotEqual(a, y)
self.assertNotEqual(b, x)
x = ndarray([(2**63, 2**31, 2**15)]*120, shape=[3,5,2,2,2],
format=byteorder+'QLH')
y = ndarray([(2**63, 2**31, 2**15)]*120, shape=[3,5,2,2,2],
format=byteorder+'QLH', flags=ND_WRITABLE|ND_FORTRAN)
y[2][3][1][1][1] = (1, 1, 1)
a = memoryview(x)
b = memoryview(y)
self.assertEqual(a, x)
self.assertEqual(b, y)
self.assertNotEqual(a, b)
self.assertNotEqual(a, y)
self.assertNotEqual(b, x)
def test_memoryview_check_released(self):
a = array.array('d', [1.1, 2.2, 3.3])
m = memoryview(a)
m.release()
# PyMemoryView_FromObject()
self.assertRaises(ValueError, memoryview, m)
# memoryview.cast()
self.assertRaises(ValueError, m.cast, 'c')
# getbuffer()
self.assertRaises(ValueError, ndarray, m)
# memoryview.tolist()
self.assertRaises(ValueError, m.tolist)
# memoryview.tobytes()
self.assertRaises(ValueError, m.tobytes)
# sequence
self.assertRaises(ValueError, eval, "1.0 in m", locals())
# subscript
self.assertRaises(ValueError, m.__getitem__, 0)
# assignment
self.assertRaises(ValueError, m.__setitem__, 0, 1)
for attr in ('obj', 'nbytes', 'readonly', 'itemsize', 'format', 'ndim',
'shape', 'strides', 'suboffsets', 'c_contiguous',
'f_contiguous', 'contiguous'):
self.assertRaises(ValueError, m.__getattribute__, attr)
# richcompare
b = array.array('d', [1.1, 2.2, 3.3])
m1 = memoryview(a)
m2 = memoryview(b)
self.assertEqual(m1, m2)
m1.release()
self.assertNotEqual(m1, m2)
self.assertNotEqual(m1, a)
self.assertEqual(m1, m1)
def test_memoryview_tobytes(self):
# Many implicit tests are already in self.verify().
t = (-529, 576, -625, 676, -729)
nd = ndarray(t, shape=[5], format='@h')
m = memoryview(nd)
self.assertEqual(m, nd)
self.assertEqual(m.tobytes(), nd.tobytes())
nd = ndarray([t], shape=[1], format='>hQiLl')
m = memoryview(nd)
self.assertEqual(m, nd)
self.assertEqual(m.tobytes(), nd.tobytes())
nd = ndarray([t for _ in range(12)], shape=[2,2,3], format='=hQiLl')
m = memoryview(nd)
self.assertEqual(m, nd)
self.assertEqual(m.tobytes(), nd.tobytes())
nd = ndarray([t for _ in range(120)], shape=[5,2,2,3,2],
format='<hQiLl')
m = memoryview(nd)
self.assertEqual(m, nd)
self.assertEqual(m.tobytes(), nd.tobytes())
# Unknown formats are handled: tobytes() purely depends on itemsize.
if ctypes:
# format: "T{>l:x:>l:y:}"
class BEPoint(ctypes.BigEndianStructure):
_fields_ = [("x", ctypes.c_long), ("y", ctypes.c_long)]
point = BEPoint(100, 200)
a = memoryview(point)
self.assertEqual(a.tobytes(), bytes(point))
def test_memoryview_get_contiguous(self):
# Many implicit tests are already in self.verify().
# no buffer interface
self.assertRaises(TypeError, get_contiguous, {}, PyBUF_READ, 'F')
# writable request to read-only object
self.assertRaises(BufferError, get_contiguous, b'x', PyBUF_WRITE, 'C')
# writable request to non-contiguous object
nd = ndarray([1, 2, 3], shape=[2], strides=[2])
self.assertRaises(BufferError, get_contiguous, nd, PyBUF_WRITE, 'A')
# scalar, read-only request from read-only exporter
nd = ndarray(9, shape=(), format="L")
for order in ['C', 'F', 'A']:
m = get_contiguous(nd, PyBUF_READ, order)
self.assertEqual(m, nd)
self.assertEqual(m[()], 9)
# scalar, read-only request from writable exporter
nd = ndarray(9, shape=(), format="L", flags=ND_WRITABLE)
for order in ['C', 'F', 'A']:
m = get_contiguous(nd, PyBUF_READ, order)
self.assertEqual(m, nd)
self.assertEqual(m[()], 9)
# scalar, writable request
for order in ['C', 'F', 'A']:
nd[()] = 9
m = get_contiguous(nd, PyBUF_WRITE, order)
self.assertEqual(m, nd)
self.assertEqual(m[()], 9)
m[()] = 10
self.assertEqual(m[()], 10)
self.assertEqual(nd[()], 10)
# zeros in shape
nd = ndarray([1], shape=[0], format="L", flags=ND_WRITABLE)
for order in ['C', 'F', 'A']:
m = get_contiguous(nd, PyBUF_READ, order)
self.assertRaises(IndexError, m.__getitem__, 0)
self.assertEqual(m, nd)
self.assertEqual(m.tolist(), [])
nd = ndarray(list(range(8)), shape=[2, 0, 7], format="L",
flags=ND_WRITABLE)
for order in ['C', 'F', 'A']:
m = get_contiguous(nd, PyBUF_READ, order)
self.assertEqual(ndarray(m).tolist(), [[], []])
# one-dimensional
nd = ndarray([1], shape=[1], format="h", flags=ND_WRITABLE)
for order in ['C', 'F', 'A']:
m = get_contiguous(nd, PyBUF_WRITE, order)
self.assertEqual(m, nd)
self.assertEqual(m.tolist(), nd.tolist())
nd = ndarray([1, 2, 3], shape=[3], format="b", flags=ND_WRITABLE)
for order in ['C', 'F', 'A']:
m = get_contiguous(nd, PyBUF_WRITE, order)
self.assertEqual(m, nd)
self.assertEqual(m.tolist(), nd.tolist())
# one-dimensional, non-contiguous
nd = ndarray([1, 2, 3], shape=[2], strides=[2], flags=ND_WRITABLE)
for order in ['C', 'F', 'A']:
m = get_contiguous(nd, PyBUF_READ, order)
self.assertEqual(m, nd)
self.assertEqual(m.tolist(), nd.tolist())
self.assertRaises(TypeError, m.__setitem__, 1, 20)
self.assertEqual(m[1], 3)
self.assertEqual(nd[1], 3)
nd = nd[::-1]
for order in ['C', 'F', 'A']:
m = get_contiguous(nd, PyBUF_READ, order)
self.assertEqual(m, nd)
self.assertEqual(m.tolist(), nd.tolist())
self.assertRaises(TypeError, m.__setitem__, 1, 20)
self.assertEqual(m[1], 1)
self.assertEqual(nd[1], 1)
# multi-dimensional, contiguous input
nd = ndarray(list(range(12)), shape=[3, 4], flags=ND_WRITABLE)
for order in ['C', 'A']:
m = get_contiguous(nd, PyBUF_WRITE, order)
self.assertEqual(ndarray(m).tolist(), nd.tolist())
self.assertRaises(BufferError, get_contiguous, nd, PyBUF_WRITE, 'F')
m = get_contiguous(nd, PyBUF_READ, order)
self.assertEqual(ndarray(m).tolist(), nd.tolist())
nd = ndarray(list(range(12)), shape=[3, 4],
flags=ND_WRITABLE|ND_FORTRAN)
for order in ['F', 'A']:
m = get_contiguous(nd, PyBUF_WRITE, order)
self.assertEqual(ndarray(m).tolist(), nd.tolist())
self.assertRaises(BufferError, get_contiguous, nd, PyBUF_WRITE, 'C')
m = get_contiguous(nd, PyBUF_READ, order)
self.assertEqual(ndarray(m).tolist(), nd.tolist())
# multi-dimensional, non-contiguous input
nd = ndarray(list(range(12)), shape=[3, 4], flags=ND_WRITABLE|ND_PIL)
for order in ['C', 'F', 'A']:
self.assertRaises(BufferError, get_contiguous, nd, PyBUF_WRITE,
order)
m = get_contiguous(nd, PyBUF_READ, order)
self.assertEqual(ndarray(m).tolist(), nd.tolist())
# flags
nd = ndarray([1,2,3,4,5], shape=[3], strides=[2])
m = get_contiguous(nd, PyBUF_READ, 'C')
self.assertTrue(m.c_contiguous)
def test_memoryview_serializing(self):
# C-contiguous
size = struct.calcsize('i')
a = array.array('i', [1,2,3,4,5])
m = memoryview(a)
buf = io.BytesIO(m)
b = bytearray(5*size)
buf.readinto(b)
self.assertEqual(m.tobytes(), b)
# C-contiguous, multi-dimensional
size = struct.calcsize('L')
nd = ndarray(list(range(12)), shape=[2,3,2], format="L")
m = memoryview(nd)
buf = io.BytesIO(m)
b = bytearray(2*3*2*size)
buf.readinto(b)
self.assertEqual(m.tobytes(), b)
# Fortran contiguous, multi-dimensional
#size = struct.calcsize('L')
#nd = ndarray(list(range(12)), shape=[2,3,2], format="L",
# flags=ND_FORTRAN)
#m = memoryview(nd)
#buf = io.BytesIO(m)
#b = bytearray(2*3*2*size)
#buf.readinto(b)
#self.assertEqual(m.tobytes(), b)
def test_memoryview_hash(self):
# bytes exporter
b = bytes(list(range(12)))
m = memoryview(b)
self.assertEqual(hash(b), hash(m))
# C-contiguous
mc = m.cast('c', shape=[3,4])
self.assertEqual(hash(mc), hash(b))
# non-contiguous
mx = m[::-2]
b = bytes(list(range(12))[::-2])
self.assertEqual(hash(mx), hash(b))
# Fortran contiguous
nd = ndarray(list(range(30)), shape=[3,2,5], flags=ND_FORTRAN)
m = memoryview(nd)
self.assertEqual(hash(m), hash(nd))
# multi-dimensional slice
nd = ndarray(list(range(30)), shape=[3,2,5])
x = nd[::2, ::, ::-1]
m = memoryview(x)
self.assertEqual(hash(m), hash(x))
# multi-dimensional slice with suboffsets
nd = ndarray(list(range(30)), shape=[2,5,3], flags=ND_PIL)
x = nd[::2, ::, ::-1]
m = memoryview(x)
self.assertEqual(hash(m), hash(x))
# equality-hash invariant
x = ndarray(list(range(12)), shape=[12], format='B')
a = memoryview(x)
y = ndarray(list(range(12)), shape=[12], format='b')
b = memoryview(y)
self.assertEqual(a, b)
self.assertEqual(hash(a), hash(b))
# non-byte formats
nd = ndarray(list(range(12)), shape=[2,2,3], format='L')
m = memoryview(nd)
self.assertRaises(ValueError, m.__hash__)
nd = ndarray(list(range(-6, 6)), shape=[2,2,3], format='h')
m = memoryview(nd)
self.assertRaises(ValueError, m.__hash__)
nd = ndarray(list(range(12)), shape=[2,2,3], format='= L')
m = memoryview(nd)
self.assertRaises(ValueError, m.__hash__)
nd = ndarray(list(range(-6, 6)), shape=[2,2,3], format='< h')
m = memoryview(nd)
self.assertRaises(ValueError, m.__hash__)
def test_memoryview_release(self):
# Create re-exporter from getbuffer(memoryview), then release the view.
a = bytearray([1,2,3])
m = memoryview(a)
nd = ndarray(m) # re-exporter
self.assertRaises(BufferError, m.release)
del nd
m.release()
a = bytearray([1,2,3])
m = memoryview(a)
nd1 = ndarray(m, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
nd2 = ndarray(nd1, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
self.assertIs(nd2.obj, m)
self.assertRaises(BufferError, m.release)
del nd1, nd2
m.release()
# chained views
a = bytearray([1,2,3])
m1 = memoryview(a)
m2 = memoryview(m1)
nd = ndarray(m2) # re-exporter
m1.release()
self.assertRaises(BufferError, m2.release)
del nd
m2.release()
a = bytearray([1,2,3])
m1 = memoryview(a)
m2 = memoryview(m1)
nd1 = ndarray(m2, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
nd2 = ndarray(nd1, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
self.assertIs(nd2.obj, m2)
m1.release()
self.assertRaises(BufferError, m2.release)
del nd1, nd2
m2.release()
# Allow changing layout while buffers are exported.
nd = ndarray([1,2,3], shape=[3], flags=ND_VAREXPORT)
m1 = memoryview(nd)
nd.push([4,5,6,7,8], shape=[5]) # mutate nd
m2 = memoryview(nd)
x = memoryview(m1)
self.assertEqual(x.tolist(), m1.tolist())
y = memoryview(m2)
self.assertEqual(y.tolist(), m2.tolist())
self.assertEqual(y.tolist(), nd.tolist())
m2.release()
y.release()
nd.pop() # pop the current view
self.assertEqual(x.tolist(), nd.tolist())
del nd
m1.release()
x.release()
# If multiple memoryviews share the same managed buffer, implicit
# release() in the context manager's __exit__() method should still
# work.
def catch22(b):
with memoryview(b) as m2:
pass
x = bytearray(b'123')
with memoryview(x) as m1:
catch22(m1)
self.assertEqual(m1[0], ord(b'1'))
x = ndarray(list(range(12)), shape=[2,2,3], format='l')
y = ndarray(x, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
z = ndarray(y, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
self.assertIs(z.obj, x)
with memoryview(z) as m:
catch22(m)
self.assertEqual(m[0:1].tolist(), [[[0, 1, 2], [3, 4, 5]]])
# Test garbage collection.
for flags in (0, ND_REDIRECT):
x = bytearray(b'123')
with memoryview(x) as m1:
del x
y = ndarray(m1, getbuf=PyBUF_FULL_RO, flags=flags)
with memoryview(y) as m2:
del y
z = ndarray(m2, getbuf=PyBUF_FULL_RO, flags=flags)
with memoryview(z) as m3:
del z
catch22(m3)
catch22(m2)
catch22(m1)
self.assertEqual(m1[0], ord(b'1'))
self.assertEqual(m2[1], ord(b'2'))
self.assertEqual(m3[2], ord(b'3'))
del m3
del m2
del m1
x = bytearray(b'123')
with memoryview(x) as m1:
del x
y = ndarray(m1, getbuf=PyBUF_FULL_RO, flags=flags)
with memoryview(y) as m2:
del y
z = ndarray(m2, getbuf=PyBUF_FULL_RO, flags=flags)
with memoryview(z) as m3:
del z
catch22(m1)
catch22(m2)
catch22(m3)
self.assertEqual(m1[0], ord(b'1'))
self.assertEqual(m2[1], ord(b'2'))
self.assertEqual(m3[2], ord(b'3'))
del m1, m2, m3
# memoryview.release() fails if the view has exported buffers.
x = bytearray(b'123')
with self.assertRaises(BufferError):
with memoryview(x) as m:
ex = ndarray(m)
m[0] == ord(b'1')
def test_memoryview_redirect(self):
nd = ndarray([1.0 * x for x in range(12)], shape=[12], format='d')
a = array.array('d', [1.0 * x for x in range(12)])
for x in (nd, a):
y = ndarray(x, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
z = ndarray(y, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
m = memoryview(z)
self.assertIs(y.obj, x)
self.assertIs(z.obj, x)
self.assertIs(m.obj, x)
self.assertEqual(m, x)
self.assertEqual(m, y)
self.assertEqual(m, z)
self.assertEqual(m[1:3], x[1:3])
self.assertEqual(m[1:3], y[1:3])
self.assertEqual(m[1:3], z[1:3])
del y, z
self.assertEqual(m[1:3], x[1:3])
def test_memoryview_from_static_exporter(self):
fmt = 'B'
lst = [0,1,2,3,4,5,6,7,8,9,10,11]
# exceptions
self.assertRaises(TypeError, staticarray, 1, 2, 3)
# view.obj==x
x = staticarray()
y = memoryview(x)
self.verify(y, obj=x,
itemsize=1, fmt=fmt, readonly=1,
ndim=1, shape=[12], strides=[1],
lst=lst)
for i in range(12):
self.assertEqual(y[i], i)
del x
del y
x = staticarray()
y = memoryview(x)
del y
del x
x = staticarray()
y = ndarray(x, getbuf=PyBUF_FULL_RO)
z = ndarray(y, getbuf=PyBUF_FULL_RO)
m = memoryview(z)
self.assertIs(y.obj, x)
self.assertIs(m.obj, z)
self.verify(m, obj=z,
itemsize=1, fmt=fmt, readonly=1,
ndim=1, shape=[12], strides=[1],
lst=lst)
del x, y, z, m
x = staticarray()
y = ndarray(x, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
z = ndarray(y, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
m = memoryview(z)
self.assertIs(y.obj, x)
self.assertIs(z.obj, x)
self.assertIs(m.obj, x)
self.verify(m, obj=x,
itemsize=1, fmt=fmt, readonly=1,
ndim=1, shape=[12], strides=[1],
lst=lst)
del x, y, z, m
# view.obj==NULL
x = staticarray(legacy_mode=True)
y = memoryview(x)
self.verify(y, obj=None,
itemsize=1, fmt=fmt, readonly=1,
ndim=1, shape=[12], strides=[1],
lst=lst)
for i in range(12):
self.assertEqual(y[i], i)
del x
del y
x = staticarray(legacy_mode=True)
y = memoryview(x)
del y
del x
x = staticarray(legacy_mode=True)
y = ndarray(x, getbuf=PyBUF_FULL_RO)
z = ndarray(y, getbuf=PyBUF_FULL_RO)
m = memoryview(z)
self.assertIs(y.obj, None)
self.assertIs(m.obj, z)
self.verify(m, obj=z,
itemsize=1, fmt=fmt, readonly=1,
ndim=1, shape=[12], strides=[1],
lst=lst)
del x, y, z, m
x = staticarray(legacy_mode=True)
y = ndarray(x, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
z = ndarray(y, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
m = memoryview(z)
# Clearly setting view.obj==NULL is inferior, since it
# messes up the redirection chain:
self.assertIs(y.obj, None)
self.assertIs(z.obj, y)
self.assertIs(m.obj, y)
self.verify(m, obj=y,
itemsize=1, fmt=fmt, readonly=1,
ndim=1, shape=[12], strides=[1],
lst=lst)
del x, y, z, m
def test_memoryview_getbuffer_undefined(self):
# getbufferproc does not adhere to the new documentation
nd = ndarray([1,2,3], [3], flags=ND_GETBUF_FAIL|ND_GETBUF_UNDEFINED)
self.assertRaises(BufferError, memoryview, nd)
def test_issue_7385(self):
x = ndarray([1,2,3], shape=[3], flags=ND_GETBUF_FAIL)
self.assertRaises(BufferError, memoryview, x)
def test_main():
support.run_unittest(TestBufferProtocol)
if __name__ == "__main__":
test_main()
|
the-stack_106_28115 | from __future__ import unicode_literals
from django import forms
from django.forms import ModelForm
# Import from Models
from .models import bug, folder, group, tag, User, change_task, customer, kanban_column, kanban_level, tag_assignment,\
kanban_card, kanban_board, permission_set, project, request_for_change, requirement_item, requirement, task,\
organisation, bug_client, document, object_assignment
class AddBugForm(forms.Form):
bug_client = forms.ModelChoiceField(
required=True,
queryset=bug_client.objects.filter(
is_deleted=False,
)
)
bug_id = forms.IntegerField(
required=True,
min_value=0,
)
bug_description = forms.CharField(
required=True,
)
bug_status = forms.CharField(
required=True,
)
class AddCustomerForm(forms.Form):
customer = forms.ModelChoiceField(
required=True,
queryset=customer.objects.all(),
)
class AddFolderForm(forms.Form):
folder_description = forms.CharField(
required=True,
max_length=50,
)
parent_folder = forms.ModelChoiceField(
required=False,
queryset=folder.objects.all(),
)
class AddGroupForm(forms.Form):
group_list = forms.ModelMultipleChoiceField(
required=True,
queryset=group.objects.all(),
)
class AddKanbanLinkForm(forms.Form):
project = forms.ModelChoiceField(
required=False,
queryset=project.objects.filter(
is_deleted=False,
)
)
requirement = forms.ModelChoiceField(
required=False,
queryset=requirement.objects.filter(
is_deleted=False,
)
)
task = forms.ModelChoiceField(
required=False,
queryset=task.objects.filter(
is_deleted=False,
)
)
kanban_column = forms.ModelChoiceField(
required=True,
queryset=kanban_column.objects.filter(
is_deleted=False,
)
)
kanban_level = forms.ModelChoiceField(
required=True,
queryset=kanban_level.objects.filter(
is_deleted=False,
)
)
class AddLinkForm(forms.Form):
document_description = forms.CharField(
max_length=50,
required=True,
)
document_url_location = forms.URLField(
required=True,
)
parent_folder = forms.ModelChoiceField(
required=False,
queryset=folder.objects.all(),
)
class AddNoteForm(forms.Form):
note = forms.CharField(
required=True,
)
class AddObjectLinkForm(forms.Form):
project = forms.ModelMultipleChoiceField(
queryset=project.objects.all(),
required=False,
)
requirement = forms.ModelMultipleChoiceField(
queryset=requirement.objects.all(),
required=False,
)
requirement_item = forms.ModelMultipleChoiceField(
queryset=requirement_item.objects.all(),
required=False,
)
task = forms.ModelMultipleChoiceField(
queryset=task.objects.all(),
required=False,
)
class AddRequirementLinkForm(forms.Form):
# One external field
project = forms.ModelMultipleChoiceField(
required=False,
queryset=project.objects.filter(
is_deleted=False,
)
)
task = forms.ModelMultipleChoiceField(
required=False,
queryset=task.objects.filter(
is_deleted=False,
)
)
class AddTagsForm(forms.Form):
tag_id = forms.ModelMultipleChoiceField(
queryset=tag.objects.all(),
)
class AdminAddUserForm(forms.Form):
group = forms.ModelMultipleChoiceField(
queryset=group.objects.all(),
)
permission_set = forms.ModelMultipleChoiceField(
queryset=permission_set.objects.all(),
)
username = forms.ModelChoiceField(
queryset=User.objects.all(),
)
class ChangeTaskForm(forms.ModelForm):
# Basic Meta Data
class Meta:
model = change_task
fields = [
'change_task_title',
'change_task_description',
'change_task_start_date',
'change_task_end_date',
'change_task_seconds',
'change_task_required_by',
'is_downtime',
]
class ChangeTaskStatusForm(forms.ModelForm):
class Meta:
model = change_task
fields = [
'change_task_status',
]
class CheckKanbanBoardName(forms.Form):
kanban_board_name = forms.CharField(max_length=255)
class CustomerForm(forms.ModelForm):
# Basic Meta Data
class Meta:
model = customer
fields = [
'customer_title',
'customer_first_name',
'customer_last_name',
'customer_email',
'organisation',
]
class DeleteColumnForm(forms.Form):
delete_item_id = forms.ModelChoiceField(
queryset=kanban_column.objects.all(),
required=True,
)
destination_item_id = forms.ModelChoiceField(
queryset=kanban_column.objects.all(),
required=True,
)
class DeleteLevelForm(forms.Form):
delete_item_id = forms.ModelChoiceField(
queryset=kanban_level.objects.all(),
required=True,
)
destination_item_id = forms.ModelChoiceField(
queryset=kanban_level.objects.all(),
required=True,
)
class DeleteBugForm(forms.Form):
bug_id = forms.ModelChoiceField(
queryset=bug.objects.all(),
required=True,
)
class DeleteLinkForm(forms.Form):
object_assignment_id = forms.ModelChoiceField(
queryset=object_assignment.objects.all(),
required=True,
)
class DeleteTagForm(forms.ModelForm):
class Meta:
model = tag_assignment
fields = {
'tag',
'object_enum',
'object_id',
}
class DocumentUploadForm(forms.ModelForm):
document = forms.FileField(
required=True,
)
parent_folder = forms.ModelChoiceField(
required=False,
queryset=folder.objects.all(),
)
class Meta:
model = document
fields = {
'document',
'document_description',
}
class KanbanCardForm(forms.ModelForm):
kanban_card_id = forms.ModelChoiceField(
required=True,
queryset=kanban_card.objects.all(),
)
kanban_card_description = forms.CharField(
required=False,
)
kanban_column = forms.ModelChoiceField(
required=True,
queryset=kanban_column.objects.all()
)
kanban_level = forms.ModelChoiceField(
required=True,
queryset=kanban_level.objects.all(),
)
class Meta:
model = kanban_card
fields = {
'kanban_card_id',
'kanban_card_text',
'kanban_card_description',
'kanban_column',
'kanban_level',
}
class KanbanCardArchiveForm(forms.Form):
kanban_card_id = forms.ModelMultipleChoiceField(
required=True,
queryset=kanban_card.objects.all(),
)
class AddUserForm(forms.Form):
user_list = forms.ModelMultipleChoiceField(
required=True,
queryset=User.objects.all(),
)
class LoginForm(forms.Form):
username = forms.CharField(
widget=forms.TextInput(attrs={
'placeholder': 'Username',
'class': 'form-control',
'required': True,
'autofocus': True,
})
)
password = forms.CharField(
widget=forms.PasswordInput(attrs={
'placeholder': 'Password',
'class': 'form-control',
'required': True,
})
)
class MoveKanbanCardForm(forms.Form):
# Get Query Sets
kanban_column_results = kanban_column.objects.all()
kanban_level_results = kanban_level.objects.all()
# New card information
new_card_column = forms.ModelChoiceField(
required=True,
queryset=kanban_column_results,
)
new_card_level = forms.ModelChoiceField(
required=True,
queryset=kanban_level_results,
)
new_card_sort_number = forms.IntegerField()
# Old card information
old_card_column = forms.ModelChoiceField(
required=True,
queryset=kanban_column_results,
)
old_card_level = forms.ModelChoiceField(
required=True,
queryset=kanban_level_results,
)
old_card_sort_number = forms.IntegerField()
class NewChangeTaskForm(forms.ModelForm):
# Basic Meta Data
class Meta:
model = change_task
fields = [
'request_for_change',
'change_task_title',
'change_task_description',
'change_task_start_date',
'change_task_end_date',
'change_task_seconds',
# 'change_task_assigned_user',
# 'change_task_qa_user',
'change_task_required_by',
'is_downtime',
]
class NewColumnForm(forms.ModelForm):
# Basic Meta Data
class Meta:
model = kanban_column
fields = [
'kanban_column_name',
'kanban_column_sort_number',
]
class NewCustomerForm(forms.ModelForm):
organisation = forms.ModelChoiceField(
queryset=organisation.objects.all(),
required=False,
)
# Basic Meta Data
class Meta:
model = customer
fields = [
'customer_title',
'customer_first_name',
'customer_last_name',
'customer_email',
'organisation',
]
class NewGroupForm(forms.ModelForm):
parent_group = forms.ModelChoiceField(
queryset=group.objects.all(),
required=False,
)
# Basic Meta Data
class Meta:
model = group
fields = [
'group_name',
'parent_group',
]
class NewKanbanCardForm(forms.ModelForm):
kanban_card_description = forms.CharField(
required=False,
)
# Basic Meta Data
class Meta:
model = kanban_card
fields = [
'kanban_card_text',
'kanban_card_description',
'kanban_level',
'kanban_column',
]
class NewKanbanForm(forms.ModelForm):
column_title = forms.SelectMultiple()
level_title = forms.SelectMultiple()
# Basic Meta Data
class Meta:
model = kanban_board
fields = [
'kanban_board_name',
]
class NewLevelForm(forms.ModelForm):
# Basic Meta Data
class Meta:
model = kanban_level
fields = [
'kanban_level_name',
'kanban_level_sort_number',
]
class NewPermissionSetForm(forms.ModelForm):
# Basic Meta Data
class Meta:
model = permission_set
fields = [
'permission_set_name',
]
class NewProjectForm(forms.ModelForm):
project_start_date = forms.DateTimeField(
input_formats=['c'],
)
project_end_date = forms.DateTimeField(
input_formats=['c'],
)
group_list = forms.ModelMultipleChoiceField(
required=True,
queryset=group.objects.filter(
is_deleted=False,
)
)
# Basic Meta Data
class Meta:
model = project
fields = [
'project_name',
'project_description',
'project_start_date',
'project_end_date',
'organisation',
]
class NewRequestForChangeForm(forms.ModelForm):
group_list = forms.ModelMultipleChoiceField(
required=True,
queryset=group.objects.filter(
is_deleted=False,
)
)
# Basic Meta Data
class Meta:
model = request_for_change
fields = [
'rfc_title',
'rfc_summary',
'rfc_type',
'rfc_implementation_start_date',
'rfc_implementation_end_date',
'rfc_implementation_release_date',
'rfc_version_number',
'rfc_lead',
'rfc_priority',
'rfc_risk',
'rfc_impact',
'rfc_risk_and_impact_analysis',
'rfc_implementation_plan',
'rfc_backout_plan',
'rfc_test_plan',
]
class NewRequirementItemForm(forms.ModelForm):
# Basic Meta data
class Meta:
model = requirement_item
fields = [
'requirement_item_title',
'requirement_item_scope',
'requirement_item_status',
'requirement_item_type',
]
class NewRequirementForm(forms.ModelForm):
# One external field
group_list = forms.ModelMultipleChoiceField(
required=True,
queryset=group.objects.filter(
is_deleted=False,
)
)
# Basic Meta data
class Meta:
model = requirement
fields = [
'requirement_title',
'requirement_scope',
'requirement_status',
'requirement_type',
'organisation',
]
class NewTagForm(forms.ModelForm):
class Meta:
model = tag
fields = [
'tag_name',
'tag_colour',
]
class NewTaskForm(forms.ModelForm):
task_start_date = forms.DateTimeField(
input_formats=['c'],
)
task_end_date = forms.DateTimeField(
input_formats=['c'],
)
group_list = forms.ModelMultipleChoiceField(
required=True,
queryset=group.objects.filter(
is_deleted=False,
)
)
# Basic Meta Data
class Meta:
model = task
fields = [
'task_short_description',
'task_long_description',
'task_start_date',
'task_end_date',
'organisation',
]
class NewUserForm(forms.ModelForm):
password1 = forms.CharField(
max_length=255,
required=True,
)
password2 = forms.CharField(
max_length=255,
required=True,
)
# Basic Meta data
class Meta:
model = User
fields = [
'username',
'first_name',
'last_name',
'email',
]
class OrganisationForm(forms.ModelForm):
# Basic Meta data
class Meta:
model = organisation
fields = [
'organisation_name',
'organisation_website',
'organisation_email',
]
class OrganisationProfilePictureForm(forms.ModelForm):
# Basic Meta Data
class Meta:
fields = [
'organisation_picture'
]
class PasswordResetForm(forms.Form):
password = forms.CharField(
max_length=50,
required=True,
)
username = forms.ModelChoiceField(
queryset=User.objects.all(),
required=True,
)
class PermissionSetForm(forms.ModelForm):
class Meta:
model = permission_set
exclude = [
'change_user',
'is_deleted',
]
class ProjectForm(forms.ModelForm):
project_start_date = forms.DateTimeField(
input_formats=['c'],
)
project_end_date = forms.DateTimeField(
input_formats=['c'],
)
# Basic Meta Data
class Meta:
model = project
fields = [
'project_name',
'project_description',
'project_start_date',
'project_end_date',
'project_status',
]
class ResortColumnForm(forms.Form):
item = forms.ModelMultipleChoiceField(
queryset=kanban_column.objects.all(),
required=True,
)
class ResortLevelForm(forms.Form):
item = forms.ModelMultipleChoiceField(
queryset=kanban_level.objects.all(),
required=True,
)
class QueryBugClientForm(forms.Form):
bug_client_id = forms.ModelChoiceField(
required=True,
queryset=bug_client.objects.filter(
is_deleted=False,
)
)
search = forms.CharField(
max_length=50,
)
class RemoveUserForm(forms.Form):
username = forms.CharField(
required=True,
)
class RfcModuleForm(forms.Form):
# This form is for all the sub modules that need to be saved separately.
text_input = forms.CharField(
required=True,
)
priority_of_change = forms.IntegerField(
required=False,
)
risk_of_change = forms.IntegerField(
required=False,
)
impact_of_change = forms.IntegerField(
required=False,
)
class RfcInformationSaveForm(forms.ModelForm):
class Meta:
model = request_for_change
fields = [
'rfc_title',
'rfc_summary',
'rfc_type',
'rfc_version_number',
'rfc_implementation_start_date',
'rfc_implementation_end_date',
'rfc_implementation_release_date',
]
class SearchForm(forms.Form):
# Just have a simple search field
search = forms.CharField(
max_length=250,
required=False,
)
class SearchObjectsForm(forms.Form):
include_closed = forms.BooleanField(
required=False,
initial=False,
)
search = forms.CharField(
max_length=250,
required=False,
)
class TagForm(forms.Form):
tag_id = forms.IntegerField(required=True)
tag_name = forms.CharField(
required=True,
max_length=50,
)
tag_colour = forms.CharField(
required=True,
max_length=7,
)
class TaskInformationForm(forms.ModelForm):
# Basic Meta data
class Meta:
model = task
fields = [
'task_short_description',
'task_long_description',
'task_start_date',
'task_end_date',
'task_status',
]
class UpdateRequirementForm(forms.ModelForm):
# Basic Meta data
class Meta:
model = requirement
fields = [
'requirement_title',
'requirement_scope',
'requirement_status',
'requirement_type',
]
class UpdateRequirementItemForm(forms.ModelForm):
# Basic Meta data
class Meta:
model = requirement_item
fields = [
'requirement_item_title',
'requirement_item_scope',
'requirement_item_status',
'requirement_item_type',
]
class UpdateRFCStatus(forms.ModelForm):
# Basic Meta Data
class Meta:
model = request_for_change
fields = [
'rfc_status',
]
class UpdateUserForm(forms.ModelForm):
first_name = forms.CharField(
max_length=255,
required=True,
)
last_name = forms.CharField(
max_length=255,
required=True,
)
email = forms.EmailField(
max_length=255,
required=False,
)
# Basic Meta Data
class Meta:
model = User
fields = [
'email',
'first_name',
'last_name',
'is_active',
'is_superuser',
]
|
the-stack_106_28116 |
# import necessary packages
import tensorflow as tf
from sklearn.preprocessing import LabelBinarizer
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
from tensorflow.keras.applications.densenet import DenseNet169
from tensorflow.keras.callbacks import CSVLogger, TensorBoard
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.losses import CategoricalCrossentropy
from tensorflow.keras.metrics import CategoricalAccuracy
from tensorflow.keras.optimizers import SGD
# for allocating GPU resources
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
# set hyper-parameters
init_lr = 0.1
n_gpus = 4 # 4 gpus are used in the experiment
img_size = 32
bs = 64
n_epoch = 6
# customize train and test data augmentation strategy for distributed training of model
def train_augmentor(img):
# simple image augmentation processing
img_ = tf.image.random_flip_left_right(img)
img_ = tf.image.random_contrast(img_, lower=0.2, upper=1.8)
img_ = tf.image.convert_image_dtype(img_, dtype=tf.float32)
img_ = img_ / 255.0 # normalization into the intensity range of [0,1]
return img_
def test_augmentor(img):
img_ = tf.image.convert_image_dtype(img, dtype=tf.float32)
img_ = img_ / 255.0 # normalization into the intensity range of [0,1]
return img_
def get_compiled_model():
opt = SGD(lr=init_lr * n_gpus, momentum=0.9)
loss = CategoricalCrossentropy(label_smoothing=0.1)
# Initialize a DenseNet169 network for cifar10 classification
# Reference: https://openaccess.thecvf.com/content_cvpr_2017/papers/Huang_Densely_Connected_Convolutional_CVPR_2017_paper.pdf
model = DenseNet169(input_shape=(img_size, img_size, 3), weights=None, classes=10)
model.build(input_shape=(None, img_size, img_size, 3))
model.compile(loss=loss, optimizer=opt, metrics=[CategoricalAccuracy()])
return model
def get_dataset():
((trainX, trainY), (testX, testY)) = cifar10.load_data()
trainX = trainX.astype("float")
testX = testX.astype("float")
# one-hot encoding towards training and testing labels
lb = LabelBinarizer()
trainY = lb.fit_transform(trainY)
testY = lb.transform(testY)
trainY = trainY.astype('float')
testY = testY.astype('float')
# data split into train set(80%) and val set(20%)
num_val_samples = int(len(trainX) * 0.2)
valX = trainX[-num_val_samples:]
valY = trainY[-num_val_samples:]
trainX = trainX[:-num_val_samples]
trainY = trainY[:-num_val_samples]
# note that tf.data.Dataset.from_tensor_slices() is used to wrap training, validation and testing data for safe distributed training of model
return (
len(trainX),
tf.data.Dataset.from_tensor_slices((trainX, trainY)).map(
lambda x, y: (train_augmentor(x), y)).shuffle(36).batch(batch_size=bs * n_gpus).repeat(),
len(valX),
tf.data.Dataset.from_tensor_slices((valX, valY)).map(
lambda x, y: (test_augmentor(x), y)).shuffle(36).batch(batch_size=bs * n_gpus).repeat(),
len(testX),
tf.data.Dataset.from_tensor_slices((testX, testY)).map(
lambda x, y: (test_augmentor(x), y)).shuffle(36).batch(batch_size=bs * n_gpus).repeat(),
)
# configure distributed training section across multiple gpus
device_type = 'GPU'
devices = tf.config.experimental.list_physical_devices(device_type)
devices_names = [d.name.split("e:")[1] for d in devices]
# Create a MirroredStrategy, enabling synchronous training across multiple replicas (each on one gpu) on one machine
strategy = tf.distribute.MirroredStrategy(devices=devices_names[:n_gpus])
# Open a strategy scope
with strategy.scope():
# Everything which creates variables should be within the strategy scope
# In general this is only model construction & `compile()
model = get_compiled_model()
# Return the cifar10 dataset in the form of a 'tf.data.Dataset', each with the number of samples
train_len, train_set, val_len, val_set, test_len, test_set = get_dataset()
log_path = 'specify a path where you wanna save the training log of model'
# use CSVLogger and Tensorboard to record the process of training and validation
cl = CSVLogger(log_path + '/log.csv', separator=',', append=True)
tb = TensorBoard(log_path)
print('\n------Start training------')
# Both steps_per_epoch and validation_steps arguments are required to specify when passing an infinitely repeating dataset
H = model.fit(train_set, validation_data=val_set, epochs=n_epoch,
steps_per_epoch=train_len // (bs * n_gpus),
validation_steps=val_len // (bs * n_gpus),
callbacks=[cl, tb])
print('\n------Training finished and Start testing------')
model.evaluate(test_set, steps=test_len // (bs * n_gpus))
# Reference:
# 1. https://towardsdatascience.com/train-a-neural-network-on-multi-gpu-with-tensorflow-42fa5f51b8af
# 2. https://keras.io/guides/distributed_training/
|
the-stack_106_28119 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Program to (re)categorize images at commons.
The program uses commonshelper for category suggestions.
It takes the suggestions and the current categories. Put the categories through
some filters and adds the result.
The following command line parameters are supported:
-onlyfilter Don't use Commonsense to get categories, just filter the
current categories
-onlyuncat Only work on uncategorized images. Will prevent the bot from
working on an image multiple times.
-hint Give Commonsense a hint.
For example -hint:li.wikipedia.org
-onlyhint Give Commonsense a hint. And only work on this hint.
Syntax is the same as -hint. Some special hints are possible:
_20 : Work on the top 20 wikipedia's
_80 : Work on the top 80 wikipedia's
wps : Work on all wikipedia's
"""
#
# (C) Multichill, 2008-2011
# (C) Pywikibot team, 2008-2019
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, division, unicode_literals
import re
import socket
import xml.etree.ElementTree
import pywikibot
from pywikibot import pagegenerators, textlib
from pywikibot.comms.http import fetch
from pywikibot.tools import deprecated, PY2
if not PY2:
from urllib.parse import urlencode
else:
from urllib import urlencode
category_blacklist = []
countries = []
search_wikis = '_20'
hint_wiki = ''
def initLists():
"""Get the list of countries & the blacklist from Commons."""
global category_blacklist
global countries
blacklistPage = pywikibot.Page(pywikibot.Site('commons', 'commons'),
'User:Multichill/Category_blacklist')
for cat in blacklistPage.linkedPages():
category_blacklist.append(cat.title(with_ns=False))
countryPage = pywikibot.Page(pywikibot.Site('commons', 'commons'),
'User:Multichill/Countries')
for country in countryPage.linkedPages():
countries.append(country.title(with_ns=False))
return
def categorizeImages(generator, onlyFilter, onlyUncat):
"""Loop over all images in generator and try to categorize them.
Get category suggestions from CommonSense.
"""
for page in generator:
if not page.exists() or page.namespace() != 6 or page.isRedirectPage():
continue
imagepage = pywikibot.FilePage(page.site, page.title())
pywikibot.output('Working on ' + imagepage.title())
if (onlyUncat and not pywikibot.Page(
imagepage.site, 'Template:Uncategorized')
in imagepage.templates()):
pywikibot.output('No Uncategorized template found')
continue
currentCats = getCurrentCats(imagepage)
if onlyFilter:
commonshelperCats = []
usage = []
galleries = []
else:
(commonshelperCats, usage,
galleries) = getCommonshelperCats(imagepage)
newcats = applyAllFilters(commonshelperCats + currentCats)
if newcats and set(currentCats) != set(newcats):
for cat in newcats:
pywikibot.output(' Found new cat: ' + cat)
saveImagePage(imagepage, newcats, usage, galleries, onlyFilter)
def getCurrentCats(imagepage):
"""Get the categories currently on the image."""
result = []
for cat in imagepage.categories():
result.append(cat.title(with_ns=False))
return list(set(result))
def getCommonshelperCats(imagepage):
"""Get category suggestions from CommonSense.
@rtype: list of unicode
"""
commonshelperCats = []
usage = []
galleries = []
global search_wikis
global hint_wiki
site = imagepage.site
lang = site.code
family = site.family.name
if lang == 'commons' and family == 'commons':
parameters = urlencode(
{'i': imagepage.title(with_ns=False).encode('utf-8'),
'r': 'on',
'go-clean': 'Find+Categories',
'p': search_wikis,
'cl': hint_wiki})
elif family == 'wikipedia':
parameters = urlencode(
{'i': imagepage.title(with_ns=False).encode('utf-8'),
'r': 'on',
'go-move': 'Find+Categories',
'p': search_wikis,
'cl': hint_wiki,
'w': lang})
else:
# Can't handle other sites atm
return [], [], []
commonsenseRe = re.compile(
r'^#COMMONSENSE(.*)#USAGE(\s)+\((?P<usagenum>(\d)+)\)\s'
r'(?P<usage>(.*))\s'
r'#KEYWORDS(\s)+\((?P<keywords>(\d)+)\)(.*)'
r'#CATEGORIES(\s)+\((?P<catnum>(\d)+)\)\s(?P<cats>(.*))\s'
r'#GALLERIES(\s)+\((?P<galnum>(\d)+)\)\s(?P<gals>(.*))\s(.*)#EOF$',
re.MULTILINE + re.DOTALL)
gotInfo = False
matches = None
maxtries = 10
tries = 0
while not gotInfo:
try:
if tries < maxtries:
tries += 1
commonsHelperPage = fetch(
'https://toolserver.org/~daniel/WikiSense/CommonSense.php?'
+ parameters)
matches = commonsenseRe.search(
commonsHelperPage.text)
gotInfo = True
else:
break
except IOError:
pywikibot.output("Got an IOError, let's try again")
except socket.timeout:
pywikibot.output("Got a timeout, let's try again")
if matches and gotInfo:
if matches.group('usagenum') > 0:
used = matches.group('usage').splitlines()
for use in used:
usage = usage + getUsage(use)
if matches.group('catnum') > 0:
cats = matches.group('cats').splitlines()
for cat in cats:
commonshelperCats.append(cat.replace('_', ' '))
pywikibot.output('category : ' + cat)
if matches.group('galnum') > 0:
gals = matches.group('gals').splitlines()
for gal in gals:
galleries.append(gal.replace('_', ' '))
pywikibot.output('gallery : ' + gal)
commonshelperCats = list(set(commonshelperCats))
galleries = list(set(galleries))
for (lang, project, article) in usage:
pywikibot.output(lang + project + article)
return commonshelperCats, usage, galleries
def getOpenStreetMapCats(latitude, longitude):
"""Get a list of location categories based on the OSM nomatim tool."""
result = []
location_list = getOpenStreetMap(latitude, longitude)
for i, location in enumerate(location_list):
pywikibot.log('Working on {!r}'.format(location))
if i <= len(location_list) - 3:
category = getCategoryByName(name=location,
parent=location_list[i + 1],
grandparent=location_list[i + 2])
elif i == len(location_list) - 2:
category = getCategoryByName(name=location,
parent=location_list[i + 1])
else:
category = getCategoryByName(name=location_list[i])
if category and not category == '':
result.append(category)
return result
def getOpenStreetMap(latitude, longitude):
"""
Get the result from https://nominatim.openstreetmap.org/reverse .
@rtype: list of tuples
"""
result = []
gotInfo = False
parameters = urlencode({'lat': latitude, 'lon': longitude,
'accept-language': 'en'})
while not gotInfo:
try:
page = fetch(
'https://nominatim.openstreetmap.org/reverse?format=xml&{}'
.format(parameters))
et = xml.etree.ElementTree.fromstring(page.text)
gotInfo = True
except IOError:
pywikibot.output("Got an IOError, let's try again")
pywikibot.sleep(30)
except socket.timeout:
pywikibot.output("Got a timeout, let's try again")
pywikibot.sleep(30)
validParts = ['hamlet', 'village', 'city', 'county', 'country']
invalidParts = ['path', 'road', 'suburb', 'state', 'country_code']
addressparts = et.find('addressparts')
for addresspart in addressparts.getchildren():
if addresspart.tag in validParts:
result.append(addresspart.text)
elif addresspart.tag in invalidParts:
pywikibot.output('Dropping {}, {}'
.format(addresspart.tag, addresspart.text))
else:
pywikibot.warning('{}, {} is not in addressparts lists'
.format(addresspart.tag, addresspart.text))
return result
def getCategoryByName(name, parent='', grandparent=''):
"""Get category by name."""
if not parent == '':
workname = name.strip() + ',_' + parent.strip()
workcat = pywikibot.Category(pywikibot.Site('commons', 'commons'),
workname)
if workcat.exists():
return workname
if not grandparent == '':
workname = name.strip() + ',_' + grandparent.strip()
workcat = pywikibot.Category(pywikibot.Site('commons', 'commons'),
workname)
if workcat.exists():
return workname
workname = name.strip()
workcat = pywikibot.Category(pywikibot.Site('commons', 'commons'),
workname)
if workcat.exists():
return workname
return ''
def getUsage(use):
"""Parse the Commonsense output to get the usage."""
result = []
lang = ''
project = ''
articles = ''
usageRe = re.compile(
r'^(?P<lang>([\w-]+))\.(?P<project>([\w]+))\.org:(?P<articles>\s(.*))')
matches = usageRe.search(use)
if matches:
if matches.group('lang'):
lang = matches.group('lang')
if matches.group('project'):
project = matches.group('project')
if matches.group('articles'):
articles = matches.group('articles')
for article in articles.split():
result.append((lang, project, article))
return result
def applyAllFilters(categories):
"""Apply all filters on categories."""
result = filterDisambiguation(categories)
result = followRedirects(result)
result = filterBlacklist(result)
result = filterCountries(result)
return result
def filterBlacklist(categories):
"""Filter out categories which are on the blacklist."""
result = []
for cat in categories:
cat = cat.replace('_', ' ')
if not (cat in category_blacklist):
result.append(cat)
return list(set(result))
def filterDisambiguation(categories):
"""Filter out disambiguation categories."""
result = []
for cat in categories:
if (not pywikibot.Page(pywikibot.Site('commons', 'commons'),
cat, ns=14).isDisambig()):
result.append(cat)
return result
def followRedirects(categories):
"""If a category is a redirect, replace the category with the target."""
result = []
for cat in categories:
categoryPage = pywikibot.Page(pywikibot.Site('commons', 'commons'),
cat, ns=14)
if categoryPage.isCategoryRedirect():
result.append(
categoryPage.getCategoryRedirectTarget().title(
with_ns=False))
else:
result.append(cat)
return result
def filterCountries(categories):
"""Try to filter out ...by country categories.
First make a list of any ...by country categories and try to find some
countries. If a by country category has a subcategoy containing one of the
countries found, add it. The ...by country categories remain in the set and
should be filtered out by filterParents.
"""
result = categories
listByCountry = []
listCountries = []
for cat in categories:
if cat.endswith('by country'):
listByCountry.append(cat)
# If cat contains 'by country' add it to the list
# If cat contains the name of a country add it to the list
else:
for country in countries:
if country in cat:
listCountries.append(country)
if len(listByCountry) > 0:
for bc in listByCountry:
category = pywikibot.Category(
pywikibot.Site('commons', 'commons'), 'Category:' + bc)
for subcategory in category.subcategories():
for country in listCountries:
if subcategory.title(with_ns=False).endswith(country):
result.append(subcategory.title(with_ns=False))
return list(set(result))
@deprecated(since='20180120')
def filterParents(categories):
"""
Remove all parent categories from the set to prevent overcategorization.
DEPRECATED: Toolserver script isn't available anymore (T78462).
This method is kept for compatibility and may be restored sometime by a new
implementation.
"""
return categories
def saveImagePage(imagepage, newcats, usage, galleries, onlyFilter):
"""Remove the old categories and add the new categories to the image."""
newtext = textlib.removeCategoryLinks(imagepage.text, imagepage.site)
if not onlyFilter:
newtext = removeTemplates(newtext)
newtext = newtext + getCheckCategoriesTemplate(usage, galleries,
len(newcats))
newtext += '\n'
for category in newcats:
newtext = newtext + '[[Category:' + category + ']]\n'
if onlyFilter:
comment = 'Filtering categories'
else:
comment = ('Image is categorized by a bot using data from '
'[[Commons:Tools#CommonSense|CommonSense]]')
pywikibot.showDiff(imagepage.text, newtext)
imagepage.text = newtext
imagepage.save(comment)
return
def removeTemplates(oldtext=''):
"""Remove {{Uncategorized}} and {{Check categories}} templates."""
result = re.sub(
r'{{\s*([Uu]ncat(egori[sz]ed( image)?)?|'
r'[Nn]ocat|[Nn]eedscategory)[^}]*}}',
'', oldtext)
result = re.sub('<!-- Remove this line once you have added categories -->',
'', result)
result = re.sub(r'\{\{\s*[Cc]heck categories[^}]*\}\}', '', result)
return result
def getCheckCategoriesTemplate(usage, galleries, ncats):
"""Build the check categories template with all parameters."""
result = ('{{Check categories|year={{subst:CURRENTYEAR}}|month={{subst:'
'CURRENTMONTHNAME}}|day={{subst:CURRENTDAY}}\n')
usageCounter = 1
for (lang, project, article) in usage:
result += '|lang%d=%s' % (usageCounter, lang)
result += '|wiki%d=%s' % (usageCounter, project)
result += '|article%d=%s' % (usageCounter, article)
result += '\n'
usageCounter += 1
galleryCounter = 1
for gallery in galleries:
result += '|gallery{}={}'.format(galleryCounter,
gallery.replace('_', ' ')) + '\n'
galleryCounter += 1
result += '|ncats={}\n}}\n'.format(ncats)
return result
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: str
"""
onlyFilter = False
onlyUncat = False
# Process global args and prepare generator args parser
local_args = pywikibot.handle_args(args)
genFactory = pagegenerators.GeneratorFactory()
global search_wikis
global hint_wiki
for arg in local_args:
if arg == '-onlyfilter':
onlyFilter = True
elif arg == '-onlyuncat':
onlyUncat = True
elif arg.startswith('-hint:'):
hint_wiki = arg[len('-hint:'):]
elif arg.startswith('-onlyhint'):
search_wikis = arg[len('-onlyhint:'):]
else:
genFactory.handleArg(arg)
generator = genFactory.getCombinedGenerator()
if not generator:
site = pywikibot.Site('commons', 'commons')
generator = pagegenerators.CategorizedPageGenerator(
pywikibot.Category(site, 'Category:Media needing categories'),
recurse=True)
initLists()
categorizeImages(generator, onlyFilter, onlyUncat)
pywikibot.output('All done')
if __name__ == '__main__':
main()
|
the-stack_106_28120 | """The example shows you how to convert all Earth Engine Python scripts in a GitHub repo to Jupyter notebooks.
"""
import os
from geemap.conversion import *
import subprocess
try:
from git import Repo
except ImportError:
print('gitpython package is not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'gitpython'])
from git import Repo
git_url = 'https://github.com/giswqs/qgis-earthengine-examples'
out_repo_name = 'earthengine-py-notebooks'
# Create a temporary working directory
work_dir = os.path.join(os.path.expanduser('~'), 'geemap')
if not os.path.exists(work_dir):
os.makedirs(work_dir)
out_dir = os.path.join(work_dir, out_repo_name)
repo_name = git_url.split('/')[-1]
repo_dir = os.path.join(work_dir, repo_name)
if not os.path.exists(repo_dir):
Repo.clone_from(git_url, repo_dir)
# # Convert all Earth Engine Python scripts in a folder recursively to Jupyter notebooks.
nb_template = get_nb_template() # Get the notebook template from the package folder.
py_to_ipynb_dir(repo_dir, nb_template, out_dir, github_username='giswqs', github_repo=out_repo_name)
# execute_notebook_dir(out_dir)
|
the-stack_106_28121 | from ulauncher.api.client.Extension import Extension
from ulauncher.api.client.EventListener import EventListener
from ulauncher.api.shared.event import KeywordQueryEvent, ItemEnterEvent
from ulauncher.api.shared.item.ExtensionResultItem import ExtensionResultItem
from ulauncher.api.shared.action.RenderResultListAction import RenderResultListAction
from ulauncher.api.shared.action.HideWindowAction import HideWindowAction
from ulauncher.api.shared.action.OpenAction import OpenAction
from ulauncher.api.shared.action.ExtensionCustomAction import ExtensionCustomAction
import os
class Ext_zh0per(Extension):
def __init__(self):
super().__init__()
self.subscribe(KeywordQueryEvent, KeywordListener())
self.subscribe(ItemEnterEvent, ItemEnterListener())
class KeywordListener(EventListener):
def on_event(self, event, extension):
items = []
data = {'command': event.get_argument()}
items.append(ExtensionResultItem(
icon='images/run.png',
name='Execute',
description=data['command'],
on_enter=ExtensionCustomAction(data) ))
return RenderResultListAction(items)
class ItemEnterListener(EventListener):
def on_event(self, event, extension):
data = event.get_data()
command = data['command']
terminal = extension.preferences['terminal']
environment = extension.preferences['env_type']
terminal_cmd = extension.preferences['terminal_cmd']
if terminal_cmd:
os.system(f"{terminal_cmd} {command}")
else:
os.system(f"{terminal} -e '{environment} -c \"{command}; exec {environment}\"'")
return HideWindowAction()
if __name__ == '__main__':
Ext_zh0per().run()
|
the-stack_106_28123 | # -*- coding: utf-8 -*-
"""
remarks: commonly used functions related to intervals
NOTE:
`Interval` refers to interval of the form [a,b]
`GeneralizedInterval` refers to some (finite) union of `Interval`s
TODO:
1. unify `Interval` and `GeneralizedInterval`, by letting `Interval` be of the form [[a,b]]
2. distinguish openness and closedness
"""
import time
from copy import deepcopy
from functools import reduce
from numbers import Real
from typing import Union, Optional, Any, List, Tuple, Sequence
import numpy as np
np.set_printoptions(precision=5, suppress=True)
__all__ = [
"get_optimal_covering",
"overlaps",
"validate_interval",
"in_interval",
"in_generalized_interval",
"get_confidence_interval",
"intervals_union",
"generalized_intervals_union",
"intervals_intersection",
"generalized_intervals_intersection",
"generalized_interval_complement",
"find_max_cont_len",
"interval_len",
"generalized_interval_len",
"diff_with_step",
"find_extrema",
"is_intersect",
"max_disjoint_covering",
"mask_to_intervals",
]
EMPTY_SET = []
Interval = Union[Sequence[Real], type(EMPTY_SET)]
GeneralizedInterval = Union[Sequence[Interval], type(EMPTY_SET)]
def overlaps(interval:Interval, another:Interval) -> int:
""" finished, checked,
Return the amount of overlap, in bp between interval and anohter.
If >0, the number of bp of overlap
If 0, they are book-ended
If <0, the distance in bp between them
Parameters
----------
interval, another: two `Interval`s
Returns
-------
int, overlap length of two intervals; if < 0, the distance of two intervals
"""
# in case a or b is not in ascending order
interval.sort()
another.sort()
return min(interval[-1], another[-1]) - max(interval[0], another[0])
def validate_interval(
interval:Union[Interval, GeneralizedInterval],
join_book_endeds:bool=True
) -> Tuple[bool,Union[Interval, GeneralizedInterval]]:
""" finished, not checked,
check whether `interval` is an `Interval` or a `GeneralizedInterval`,
if true, return True, and validated (of the form [a,b] with a<=b) interval,
return `False, []`, otherwise
Parameters
----------
interval: Interval, or unions of `Interval`s
join_book_endeds: bool, default True,
if True, two book-ended intervals will be joined into one
Returns
-------
tuple, consisting of
a bool, indicating whether `interval` is a valid interval
an interval (can be empty)
"""
if isinstance(interval[0], (list,tuple)):
info = [validate_interval(itv,join_book_endeds) for itv in interval]
if all([item[0] for item in info]):
return True, intervals_union(interval,join_book_endeds)
else:
return False, []
if len(interval) == 2:
return True, [min(interval), max(interval)]
else:
return False, []
def in_interval(val:Real, interval:Interval, left_closed:bool=True, right_closed:bool=False) -> bool:
""" finished, checked,
check whether val is inside interval or not
Parameters
----------
val: real number,
interval: Interval,
left_closed: bool, default True,
right_closed: bool, default False,
Returns
-------
is_in: bool,
"""
itv = sorted(interval)
if left_closed:
is_in = (itv[0] <= val)
else:
is_in = (itv[0] < val)
if right_closed:
is_in = is_in and (val <= itv[-1])
else:
is_in = is_in and (val < itv[-1])
return is_in
def in_generalized_interval(
val:Real,
generalized_interval:GeneralizedInterval,
left_closed:bool=True,
right_closed:bool=False
) -> bool:
""" finished, checked,
check whether val is inside generalized_interval or not
Parameters
----------
val: real number,
generalized_interval: union of `Interval`s,
left_closed: bool, default True,
right_closed: bool, default False,
Returns
-------
is_in: bool,
"""
is_in = False
for interval in generalized_interval:
if in_interval(val, interval, left_closed, right_closed):
is_in = True
break
return is_in
def get_confidence_interval(
data:Optional[Sequence]=None,
val:Optional[Real]=None,
rmse:Optional[float]=None,
confidence:float=0.95,
**kwargs:Any
) -> np.ndarray:
""" finished, checked,
Parameters
----------
data: array_like, optional,
val: real number, optional,
rmse: float, optional,
confidence: float, default 0.95,
kwargs: dict,
Returns
-------
conf_itv: ndarray,
"""
from scipy.stats import norm
assert data or (val and rmse), "insufficient data for computing"
correct_factor = kwargs.get("correct_factor", 1)
bias = norm.ppf(0.5 + confidence / 2)
if data is None:
lower_bound = (val - rmse * bias) * correct_factor
upper_bound = (val + rmse * bias) / correct_factor
else:
average = np.mean(np.array(data))
std = np.std(np.array(data), ddof=1)
lower_bound = (average - std * bias) * correct_factor
upper_bound = (average + std * bias) / correct_factor
conf_itv = np.array([lower_bound, upper_bound])
return conf_itv
def intervals_union(interval_list:GeneralizedInterval, join_book_endeds:bool=True) -> GeneralizedInterval:
""" finished, checked,
find the union (ordered and non-intersecting) of all the intervals in `interval_list`,
which is a list of intervals in the form [a,b], where a,b need not be ordered
Parameters
----------
interval_list: GeneralizedInterval,
the list of intervals to calculate their union
join_book_endeds: bool, default True,
join the book-ended intervals into one (e.g. [[1,2],[2,3]] into [1,3]) or not
Returns
-------
processed: GeneralizedInterval,
the union of the intervals in `interval_list`
"""
interval_sort_key = lambda i: i[0]
# list_add = lambda list1, list2: list1+list2
processed = [item for item in interval_list if len(item) > 0]
for item in processed:
item.sort()
processed.sort(key=interval_sort_key)
# end_points = reduce(list_add, processed)
merge_flag = True
while merge_flag:
merge_flag = False
new_intervals = []
if len(processed) == 1:
return processed
for idx, interval in enumerate(processed[:-1]):
this_start, this_end = interval
next_start, next_end = processed[idx + 1]
# it is certain that this_start <= next_start
if this_end < next_start:
# the case where two consecutive intervals are disjoint
new_intervals.append([this_start, this_end])
if idx == len(processed) - 2:
new_intervals.append([next_start, next_end])
elif this_end == next_start:
# the case where two consecutive intervals are book-ended
# concatenate if `join_book_endeds` is True,
# or one interval degenerates (to a single point)
if (this_start == this_end or next_start == next_end) or join_book_endeds:
new_intervals.append([this_start, max(this_end, next_end)])
new_intervals += processed[idx + 2:]
merge_flag = True
processed = new_intervals
break
else:
new_intervals.append([this_start, this_end])
if idx == len(processed) - 2:
new_intervals.append([next_start, next_end])
else:
new_intervals.append([this_start, max(this_end, next_end)])
new_intervals += processed[idx + 2:]
merge_flag = True
processed = new_intervals
break
processed = new_intervals
return processed
def generalized_intervals_union(
interval_list:Union[List[GeneralizedInterval],Tuple[GeneralizedInterval]],
join_book_endeds:bool=True
) -> GeneralizedInterval:
""" finished, checked,
calculate the union of a list (or tuple) of `GeneralizedInterval`s
Parameters
----------
interval_list: list or tuple,
a list (or tuple) of `GeneralizedInterval`s
join_book_endeds: bool, default True,
join the book-ended intervals into one (e.g. [[1,2],[2,3]] into [1,3]) or not
Returns
-------
iu: GeneralizedInterval,
the union of `interval_list`
"""
all_intervals = [itv for gnr_itv in interval_list for itv in gnr_itv]
iu = intervals_union(interval_list=all_intervals, join_book_endeds=join_book_endeds)
return iu
def intervals_intersection(interval_list:GeneralizedInterval, drop_degenerate:bool=True) -> Interval:
""" finished, checked,
calculate the intersection of all intervals in interval_list
Parameters
----------
interval_list: GeneralizedInterval,
the list of intervals to yield intersection
drop_degenerate: bool, default True,
whether or not drop the degenerate intervals, i.e. intervals with length 0
Returns
-------
its: Interval,
the intersection of all intervals in `interval_list`
"""
if [] in interval_list:
return []
for item in interval_list:
item.sort()
potential_start = max([item[0] for item in interval_list])
potential_end = min([item[-1] for item in interval_list])
if (potential_end > potential_start) or (potential_end == potential_start and not drop_degenerate):
its = [potential_start, potential_end]
else:
its = []
return its
def generalized_intervals_intersection(
generalized_interval:GeneralizedInterval,
another_generalized_interval:GeneralizedInterval,
drop_degenerate:bool=True
) -> GeneralizedInterval:
""" finished, checked,
calculate the intersection of generalized_interval and another_generalized_interval,
which are both generalized intervals
Parameters
----------
generalized_interval, another_generalized_interval: GeneralizedInterval,
the 2 `GeneralizedInterval`s to yield intersection
drop_degenerate: bool, default True,
whether or not drop the degenerate intervals, i.e. intervals with length 0
Returns
-------
its: GeneralizedInterval,
the intersection of `generalized_interval` and `another_generalized_interval`
"""
this = intervals_union(generalized_interval)
another = intervals_union(another_generalized_interval)
# NOTE: from now on, `this`, `another` are in ascending ordering
# and are disjoint unions of intervals
its = []
# TODO: optimize the following process
cut_idx = 0
for item in this:
another = another[cut_idx:]
intersected_indices = []
for idx, item_prime in enumerate(another):
tmp = intervals_intersection([item,item_prime], drop_degenerate=drop_degenerate)
if len(tmp) > 0:
its.append(tmp)
intersected_indices.append(idx)
if len(intersected_indices) > 0:
cut_idx = intersected_indices[-1]
return its
def generalized_interval_complement(
total_interval:Interval,
generalized_interval:GeneralizedInterval
) -> GeneralizedInterval:
""" finished, checked, to be improved,
TODO: the case `total_interval` is a `GeneralizedInterval`
Parameters
----------
total_interval, Interval,
generalized_interval: union of `Interval`s
Returns
-------
cpl: union of `Interval`s,
the complement of `generalized_interval` in `total_interval`
"""
rearranged_intervals = intervals_union(generalized_interval)
total_interval.sort()
tot_start, tot_end = total_interval[0], total_interval[-1]
rearranged_intervals = [
[max(tot_start, item[0]), min(tot_end, item[-1])] \
for item in rearranged_intervals if overlaps(item, total_interval) > 0
]
slice_points = [tot_start]
for item in rearranged_intervals:
slice_points += item
slice_points.append(tot_end)
cpl = []
for i in range(len(slice_points) // 2):
if slice_points[2 * i + 1] - slice_points[2 * i] > 0:
cpl.append([slice_points[2 * i], slice_points[2 * i + 1]])
return cpl
def get_optimal_covering(
total_interval:Interval,
to_cover:list,
min_len:Real,
split_threshold:Real,
traceback:bool=False,
**kwargs:Any
) -> Tuple[GeneralizedInterval,list]:
""" finished, checked,
compute an optimal covering (disjoint union of intervals) that covers `to_cover` such that
each interval in the covering is of length at least `min_len`,
and any two intervals in the covering have distance at least `split_threshold`
Parameters
----------
total_interval: Interval,
the total interval that the covering is picked from
to_cover: list,
a list of intervals to cover
min_len: real number,
minimun length of the intervals of the covering
split_threshold: real number,
minumun distance of intervals of the covering
traceback: bool, default False,
if True, a list containing the list of indices of the intervals in the original `to_cover`,
that each interval in the covering covers
Raises
------
if any of the intervals in `to_cover` exceeds the range of `total_interval`,
ValueError will be raised
Returns
-------
(ret, ret_traceback)
ret: GeneralizedInterval,
the covering that satisfies the given conditions
ret_traceback: list,
contains the list of indices of the intervals in the original `to_cover`,
that each interval in the covering covers
"""
start_time = time.time()
verbose = kwargs.get("verbose", 0)
tmp = sorted(total_interval)
tot_start, tot_end = tmp[0], tmp[-1]
if verbose >= 1:
print(f"total_interval = {total_interval}, with_length = {tot_end-tot_start}")
if tot_end - tot_start < min_len:
ret = [[tot_start, tot_end]]
ret_traceback = [list(range(len(to_cover)))] if traceback else []
return ret, ret_traceback
to_cover_intervals = []
for item in to_cover:
if isinstance(item, list):
to_cover_intervals.append(item)
else:
to_cover_intervals.append(
[max(tot_start, item-min_len//2), min(tot_end, item+min_len//2)]
)
if traceback:
replica_for_traceback = deepcopy(to_cover_intervals)
if verbose >= 2:
print(f"to_cover_intervals after all converted to intervals = {to_cover_intervals}")
# elif isinstance(item, int):
# to_cover_intervals.append([item, item+1])
# else:
# raise ValueError(f"{item} is not an integer or an interval")
# to_cover_intervals = interval_union(to_cover_intervals)
for interval in to_cover_intervals:
interval.sort()
interval_sort_key = lambda i: i[0]
to_cover_intervals.sort(key=interval_sort_key)
if verbose >= 2:
print(f"to_cover_intervals after sorted = {to_cover_intervals}")
# if to_cover_intervals[0][0] < tot_start or to_cover_intervals[-1][-1] > tot_end:
# raise IndexError("some item in to_cover list exceeds the range of total_interval")
# these cases now seen normal, and treated as follows:
for item in to_cover_intervals:
item[0] = max(item[0], tot_start)
item[-1] = min(item[-1], tot_end)
# to_cover_intervals = [item for item in to_cover_intervals if item[-1] > item[0]]
# ensure that the distance from the first interval to `tot_start` is at least `min_len`
to_cover_intervals[0][-1] = max(to_cover_intervals[0][-1], tot_start + min_len)
# ensure that the distance from the last interval to `tot_end` is at least `min_len`
to_cover_intervals[-1][0] = min(to_cover_intervals[-1][0], tot_end - min_len)
if verbose >= 2:
print(f"`to_cover_intervals` after two tails adjusted to {to_cover_intervals}")
# merge intervals whose distances (might be negative) are less than `split_threshold`
merge_flag = True
while merge_flag:
merge_flag = False
new_intervals = []
if len(to_cover_intervals) == 1:
break
for idx, item in enumerate(to_cover_intervals[:-1]):
this_start, this_end = item
next_start, next_end = to_cover_intervals[idx + 1]
if next_start - this_end >= split_threshold:
if split_threshold == (next_start - next_end) == 0 or split_threshold == (this_start - this_end) == 0:
# the case where split_threshold ==0 and the degenerate case should be dealth with separately
new_intervals.append([this_start, max(this_end, next_end)])
new_intervals += to_cover_intervals[idx + 2:]
merge_flag = True
to_cover_intervals = new_intervals
break
else:
new_intervals.append([this_start, this_end])
if idx == len(to_cover_intervals) - 2:
new_intervals.append([next_start, next_end])
else:
new_intervals.append([this_start, max(this_end, next_end)])
new_intervals += to_cover_intervals[idx + 2:]
merge_flag = True
to_cover_intervals = new_intervals
break
if verbose >= 2:
print(f"`to_cover_intervals` after merging intervals whose gaps < split_threshold are {to_cover_intervals}")
# currently, distance between any two intervals in `to_cover_intervals` are larger than `split_threshold`
# but any interval except the head and tail might has length less than `min_len`
ret = []
ret_traceback = []
if len(to_cover_intervals) == 1:
# NOTE: here, there's only one `to_cover_intervals`,
# whose length should be at least `min_len`
mid_pt = (to_cover_intervals[0][0]+to_cover_intervals[0][-1]) // 2
half_len = min_len // 2
if mid_pt - tot_start < half_len:
ret_start = tot_start
ret_end = min(tot_end, max(tot_start+min_len, to_cover_intervals[0][-1]))
ret = [[ret_start, ret_end]]
else:
ret_start = max(tot_start, min(to_cover_intervals[0][0], mid_pt-half_len))
ret_end = min(tot_end, max(mid_pt-half_len+min_len, to_cover_intervals[0][-1]))
ret = [[ret_start, ret_end]]
start = min(to_cover_intervals[0][0], to_cover_intervals[0][-1]-min_len)
for idx, item in enumerate(to_cover_intervals[:-1]):
# print("item", item)
this_start, this_end = item
next_start, next_end = to_cover_intervals[idx + 1]
potential_end = max(this_end, start + min_len)
# print(f"start = {start}")
# print("potential_end", potential_end)
# if distance from `potential_end` to `next_start` is not enough
# and has not reached the end of `to_cover_intervals`
# continue to the next loop
if next_start - potential_end < split_threshold:
if idx < len(to_cover_intervals) - 2:
continue
else:
# now, idx==len(to_cover_intervals)-2
# distance from `next_start` (hence `start`) to `tot_end` is at least `min_len`
ret.append([start, max(start + min_len, next_end)])
else:
ret.append([start, potential_end])
start = next_start
if idx == len(to_cover_intervals) - 2:
ret.append([next_start, max(next_start + min_len, next_end)])
# print(f"ret = {ret}")
if traceback:
for item in ret:
record = []
for idx, item_prime in enumerate(replica_for_traceback):
itc = intervals_intersection([item, item_prime])
len_itc = itc[-1] - itc[0] if len(itc) > 0 else -1
if len_itc > 0 or (len_itc == 0 and item_prime[-1] - item_prime[0] == 0):
record.append(idx)
ret_traceback.append(record)
if verbose >= 1:
print(f"the final result of get_optimal_covering is ret = {ret}, ret_traceback = {ret_traceback}, the whole process used {time.time()-start_time} second(s)")
return ret, ret_traceback
def find_max_cont_len(sublist:Interval, tot_rng:Real) -> dict:
""" finished, checked,
find the maximum length of continuous (consecutive) sublists of `sublist`,
whose element are integers within the range from 0 to `tot_rng`,
along with the position of this sublist and the sublist itself.
eg, tot_rng=10, sublist=[0,2,3,4,7,9],
then 3, 1, [2,3,4] will be returned
Parameters
----------
sublist: Interval,
a sublist
tot_rng: real number,
the total range
Returns
-------
ret: dict, with items
- "max_cont_len"
- "max_cont_sublist_start"
- "max_cont_sublist"
"""
complementary_sublist = [-1] + [i for i in range(tot_rng) if i not in sublist] + [tot_rng]
diff_list = np.diff(np.array(complementary_sublist))
max_cont_len = np.max(diff_list) - 1
max_cont_sublist_start = np.argmax(diff_list)
max_cont_sublist = sublist[max_cont_sublist_start: max_cont_sublist_start + max_cont_len]
ret = {
"max_cont_len": max_cont_len,
"max_cont_sublist_start": max_cont_sublist_start,
"max_cont_sublist": max_cont_sublist
}
return ret
def interval_len(interval:Interval) -> Real:
""" finished, checked,
compute the length of an interval. 0 for the empty interval []
Parameters
----------
interval: Interval
Returns
-------
itv_len: real number,
the `length` of `interval`
"""
interval.sort()
itv_len = interval[-1] - interval[0] if len(interval) > 0 else 0
return itv_len
def generalized_interval_len(generalized_interval:GeneralizedInterval) -> Real:
""" finished, checked,
compute the length of a generalized interval. 0 for the empty interval []
Parameters
----------
generalized_interval: GeneralizedInterval
Returns
-------
gi_len: real number,
the `length` of `generalized_interval`
"""
gi_len = sum([interval_len(item) for item in intervals_union(generalized_interval)])
return gi_len
def diff_with_step(a:Sequence, step:int=1, **kwargs) -> np.ndarray:
""" finished, checked,
compute a[n+step] - a[n] for all valid n
Parameters
----------
a: array_like,
the input data
step: int, default 1,
the step to compute the difference
kwargs: dict,
Returns
-------
d: ndarray:
the difference array
"""
_a = np.array(a)
if step >= len(_a):
raise ValueError(f"step ({step}) should be less than the length ({len(_a)}) of `a`")
d = _a[step:] - _a[:-step]
return d
def find_extrema(signal:Optional[Sequence]=None, mode:str="both") -> np.ndarray:
"""
Locate local extrema points in a signal. Based on Fermat's Theorem
Parameters
----------
signal: ndarray
input signal.
mode: str, optional
whether to find maxima ("max"), minima ("min"), or both ("both").
Returns
-------
extrema : ndarray
indices of the extrama points.
"""
# check inputs
if signal is None:
raise TypeError("Please specify an input signal.")
if mode not in ["max", "min", "both"]:
raise ValueError("Unknwon mode %r." % mode)
aux = np.diff(np.sign(np.diff(signal)))
if mode == "both":
aux = np.abs(aux)
extrema = np.nonzero(aux > 0)[0] + 1
elif mode == "max":
extrema = np.nonzero(aux < 0)[0] + 1
elif mode == "min":
extrema = np.nonzero(aux > 0)[0] + 1
return extrema
def is_intersect(
interval:Union[GeneralizedInterval,Interval],
another_interval:Union[GeneralizedInterval,Interval]
) -> bool:
"""
determines if two (generalized) intervals intersect or not
Parameters
----------
interval, another_interval: GeneralizedInterval or Interval
Returns
-------
bool, True if `interval` intersects with another_interval, False otherwise
"""
if interval is None or another_interval is None or len(interval)*len(another_interval)==0:
# the case of empty set
return False
# check if is GeneralizedInterval
is_generalized = isinstance(interval[0], (list,tuple))
is_another_generalized = isinstance(another_interval[0], (list,tuple))
if is_generalized and is_another_generalized:
return any([is_intersect(interval, itv) for itv in another_interval])
elif not is_generalized and is_another_generalized:
return is_intersect(another_interval, interval)
elif is_generalized: # and not is_another_generalized
return any([is_intersect(itv, another_interval) for itv in interval])
else: # not is_generalized and not is_another_generalized
return any([overlaps(interval, another_interval)>0])
def max_disjoint_covering(
intervals:GeneralizedInterval,
allow_book_endeds:bool=True,
with_traceback:bool=True,
verbose:int=0
) -> Tuple[GeneralizedInterval, List[int]]:
""" finished, checked,
find the largest (the largest interval length) covering of a sequence of intervals
NOTE
----
1. the problem seems slightly different from the problem discussed in refs
2. intervals with non-positive length will be ignored
Parameters
----------
intervals: GeneralizedInterval,
a sequence of intervals
allow_book_endeds: bool, default True,
if True, book-ended intervals will be considered valid (disjoint)
with_traceback: bool, default True,
if True, the indices of the intervals in the input `intervals` of the output covering
will also be returned
Returns
-------
covering: GeneralizedInterval,
the maximum non-overlapping (disjoint) subset of `intervals`
covering_inds: list of int,
indices in `intervals` of the intervals of `covering_inds`
References
----------
[1] https://en.wikipedia.org/wiki/Maximum_disjoint_set
[2] https://www.geeksforgeeks.org/maximal-disjoint-intervals/
"""
if len(intervals) <= 1:
covering = deepcopy(intervals)
return covering, list(range(len(covering)))
l_itv = [sorted(itv) for itv in intervals]
ordering = np.argsort([itv[-1] for itv in l_itv])
l_itv = [l_itv[idx] for idx in ordering]
# l_itv = sorted(l_itv, key=lambda itv: itv[-1])
if verbose >= 1:
print(f"the sorted intervals are {l_itv}, whose indices in the original input `intervals` are {ordering}")
if allow_book_endeds:
candidates_inds = [[idx] for idx,itv in enumerate(l_itv) if overlaps(itv, l_itv[0]) > 0]
else:
candidates_inds = [[idx] for idx,itv in enumerate(l_itv) if overlaps(itv, l_itv[0]) >= 0]
candidates = [[l_itv[inds[0]]] for inds in candidates_inds]
if verbose >= 1:
print(f"candidates heads = {candidates}, with corresponding indices in the sorted list of input intervals = {candidates_inds}")
for c_idx, (cl, ci) in enumerate(zip(candidates, candidates_inds)):
if interval_len(cl[0]) == 0:
continue
if allow_book_endeds:
tmp_inds = [
idx for idx,itv in enumerate(l_itv) if itv[0] >= cl[0][-1] and interval_len(itv) > 0
]
else:
tmp_inds = [
idx for idx,itv in enumerate(l_itv) if itv[0] > cl[0][-1] and interval_len(itv) > 0
]
if verbose >= 2:
print(f"for the {c_idx}-th candidate, tmp_inds = {tmp_inds}")
if len(tmp_inds) > 0:
tmp = [l_itv[idx] for idx in tmp_inds]
tmp_candidates, tmp_candidates_inds = \
max_disjoint_covering(
intervals=tmp,
allow_book_endeds=allow_book_endeds,
with_traceback=with_traceback,
# verbose=verbose,
)
candidates[c_idx] = cl + tmp_candidates
candidates_inds[c_idx] = ci + [tmp_inds[i] for i in tmp_candidates_inds]
if verbose >= 1:
print(f"the processed candidates are {candidates}, with corresponding indices in the sorted list of input intervals = {candidates_inds}")
# covering = max(candidates, key=generalized_interval_len)
max_idx = np.argmax([generalized_interval_len(c) for c in candidates])
covering = candidates[max_idx]
if with_traceback:
covering_inds = candidates_inds[max_idx]
covering_inds = [ordering[i] for i in covering_inds] # map to the original indices
else:
covering_inds = []
return covering, covering_inds
def mask_to_intervals(mask:np.ndarray, vals:Optional[Union[int,Sequence[int]]]=None) -> Union[list, dict]:
""" finished, checked,
Parameters
----------
mask: ndarray,
1d mask
vals: int or sequence of int, optional,
values in `mask` to obtain intervals
Returns
-------
intervals: dict or list,
the intervals corr. to each value in `vals` if `vals` is `None` or `Sequence`;
or the intervals corr. to `vals` if `vals` is int.
each interval is of the form `[a,b]`, left inclusive, right exclusive
"""
if vals is None:
_vals = list(set(mask))
elif isinstance(vals, int):
_vals = [vals]
else:
_vals = vals
# assert set(_vals) & set(mask) == set(_vals)
intervals = {v:[] for v in _vals}
for v in _vals:
valid_inds = np.where(np.array(mask)==v)[0]
if len(valid_inds) == 0:
continue
split_indices = np.where(np.diff(valid_inds)>1)[0]
split_indices = split_indices.tolist() + (split_indices+1).tolist()
split_indices = sorted([0] + split_indices + [len(valid_inds)-1])
for idx in range(len(split_indices)//2):
intervals[v].append(
[valid_inds[split_indices[2*idx]], valid_inds[split_indices[2*idx+1]]+1]
)
if isinstance(vals, int):
intervals = intervals[vals]
return intervals
|
the-stack_106_28126 | def _fromUtf8(s): return s
import math
from shutil import *
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from PyQt4 import QtCore, QtGui
from PyQt4.phonon import Phonon
from pydub import AudioSegment
from main_const import *
class ProcessAudioFiles(QThread):
def __init__(self, listofaudiofiles):
QThread.__init__(self)
self.listofaudiofiles = listofaudiofiles
self.currentaudiopath = str()
self.currentaudioname = str()
self.currentaudiolengthformatted = str()
def run(self):
for x, i in enumerate(self.listofaudiofiles):
length = getaudiofilelength(i)
if length is not False:
self.currentaudiopath = i
self.currentaudioname = os.path.basename(i)
self.currentaudiolengthformatted = formatmilliseconds(length)
# Emit Signal Here
def getaudiofilelength(file):
try:
file = os.path.abspath(file)
if str(file).endswith(".mp3"):
temp = AudioSegment.from_mp3(file)
return len(temp)
elif str(file).endswith(".wav"):
temp = AudioSegment.from_wav(file)
return len(temp)
elif str(file).endswith(".ogg"):
temp = AudioSegment.from_ogg(file)
return len(temp)
else:
return False
except Exception:
return False
def formatmilliseconds(milliseconds):
seconds = milliseconds/1000
minutes,seconds = divmod(seconds,60)
hours,minutes = divmod(minutes,60)
if hours:
msg = "%s:%02d:%02d" % (hours, minutes, seconds)
return msg
else:
msg = "%02d:%02d" % (minutes, seconds)
return msg
class ChangeAlertFile(QDialog):
def __init__(self, gui, msg=None):
QDialog.__init__(self, gui)
self.currentalertfile = ALERTFILE
self.currentsilence = ALERTSILENCE
self.newalertfile = str()
self.newalertsilence = str()
self.newalertfilename = str()
self.gui = gui
# GUI Bindings
self.resize(458, 184)
self.changealertfileAlertFileValue = QtGui.QLabel(self)
self.changealertfileAlertFileValue.setGeometry(QtCore.QRect(10, 70, 441, 21))
self.changealertfileAlertFileValue.setAlignment(QtCore.Qt.AlignCenter)
self.changealertfileAlertFileValue.setObjectName(_fromUtf8("changealertfileAlertFileValue"))
self.changealertfileTopLabel = QtGui.QLabel(self)
self.changealertfileTopLabel.setGeometry(QtCore.QRect(10, 10, 441, 41))
self.changealertfileTopLabel.setAlignment(QtCore.Qt.AlignCenter)
self.changealertfileTopLabel.setWordWrap(True)
self.changealertfileTopLabel.setObjectName(_fromUtf8("changealertfileTopLabel"))
self.horizontalLayoutWidget = QtGui.QWidget(self)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(110, 140, 341, 41))
self.horizontalLayoutWidget.setObjectName(_fromUtf8("horizontalLayoutWidget"))
self.changealertfileButtonLayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget)
self.changealertfileButtonLayout.setMargin(0)
self.changealertfileButtonLayout.setObjectName(_fromUtf8("changealertfileButtonLayout"))
self.changealertfileChangeButton = QtGui.QPushButton(self.horizontalLayoutWidget)
self.changealertfileChangeButton.setObjectName(_fromUtf8("pushButton"))
self.changealertfileButtonLayout.addWidget(self.changealertfileChangeButton)
self.changealertfileSelectButton = QtGui.QPushButton(self.horizontalLayoutWidget)
self.changealertfileSelectButton.setObjectName(_fromUtf8("changealertfileSelectButton"))
self.changealertfileButtonLayout.addWidget(self.changealertfileSelectButton)
self.changealertfileCloseButton = QtGui.QPushButton(self.horizontalLayoutWidget)
self.changealertfileCloseButton.setObjectName(_fromUtf8("changealertfileCloseButton"))
self.changealertfileButtonLayout.addWidget(self.changealertfileCloseButton)
self.textBrowser = QtGui.QTextBrowser(self)
if msg is None:
self.setWindowTitle("Change Alert File")
else:
self.setWindowTitle(msg)
self.textBrowser.setGeometry(QtCore.QRect(10, 100, 441, 28))
self.textBrowser.setObjectName(_fromUtf8("textBrowser"))
self.changealertfileAlertFileValue.setText("New Alert File:")
self.changealertfileTopLabel.setText("This function will change your alert file (played in between each cut "
"practiced to let you know to change to the next one)")
self.changealertfileChangeButton.setText("Accept")
self.changealertfileSelectButton.setText("Open File")
self.changealertfileCloseButton.setText("Close")
self.textBrowser.setText("Please Select...")
QtCore.QObject.connect(self.changealertfileSelectButton, QtCore.SIGNAL(_fromUtf8("clicked()")),
self.changealertfile)
QtCore.QObject.connect(self.changealertfileCloseButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.reject)
QtCore.QObject.connect(self.changealertfileChangeButton, QtCore.SIGNAL(_fromUtf8("clicked()")),
self.commitchanges)
self.exec_()
def changealertfile(self):
"""Method To Change Alert File -> Format: New Alert File: /path/to/file.mp3 (x Seconds)"""
# Select File Dialog
self.newalertfilename = QtGui.QFileDialog.getOpenFileName(self, "Select A New File To Use As An Alert File",
'', "Music Files (*.mp3);;All Files (*)")
if self.newalertfilename:
try:
self.newalertfile = AudioSegment.from_mp3(self.newalertfilename)
self.changealertfileChangeButton.setText("Opening...")
if len(self.newalertfile) > 10000:
msg = "The File You Have Selected Is %s Seconds. I Recommend You Keep The Alert File Less Than 10 Seconds. Really Use This File?" % \
str(math.floor(len(self.newalertfile) / 1000))
reply = QtGui.QMessageBox.question(self, 'Confirmation To Use A Long Alert File',
msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.No:
self.newalertfile = str()
self.changealertfileChangeButton.setText("Accept")
return False
if len(self.newalertfile) == 0:
QMessageBox.information(None, "Alert File Invalid", "This File Has A 0 Length, Please Try Another",
QMessageBox.Ok | QMessageBox.Default,
QMessageBox.NoButton)
self.newalertfile = str()
self.changealertfileChangeButton.setText("Accept")
return False
self.changealertfileChangeButton.setText("Accept")
newalertfiletext = "%s (%s Seconds)" % (
self.newalertfilename, math.floor(len(self.newalertfile) / 1000))
self.textBrowser.setText(newalertfiletext)
except:
QMessageBox.information(None, "Error Opening File",
"I Couldn't Open That File, Is It A Valid .mp3 File?",
QMessageBox.Ok | QMessageBox.Default,
QMessageBox.NoButton)
def commitchanges(self):
if isinstance(self.newalertfile, str):
QMessageBox.information(None, "No Alert File Selected", "Select A New Alert File First",
QMessageBox.Ok | QMessageBox.Default,
QMessageBox.NoButton)
else:
msg = "Are You Sure You Want To Use '%s' As The New Alert File? (This Will Erase Whatever Alert File You Had" \
" Before)" % self.newalertfilename
reply = QtGui.QMessageBox.question(self, 'Confirmation To Change Alert File',
msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
# Move Old Alert File And Alert Silence To Temp (In Case The Export Doesn't Work)
os.rename(ALERTFILE, os.path.join(TEMPDIRECTORY, "Alert.mp3"))
os.rename(ALERTSILENCE, os.path.join(TEMPDIRECTORY, "AlertSilence.mp3"))
# Write New Alert File And Alert Silence
try:
self.newalertsilence = AudioSegment.silent(len(self.newalertfile))
self.newalertsilence.export(ALERTSILENCE, format="mp3")
self.newalertfile.export(ALERTFILE, format="mp3")
alerttest = AudioSegment.from_mp3(ALERTFILE)
alertsilencetest = AudioSegment.from_mp3(ALERTSILENCE)
if os.path.exists(ALERTSILENCE) and os.path.exists(ALERTFILE):
print("Alert File: %s; Alert Silence: %s" % (len(alerttest), len(alertsilencetest)))
if len(alertsilencetest) > 0:
os.remove(os.path.join(TEMPDIRECTORY, "Alert.mp3"))
os.remove(os.path.join(TEMPDIRECTORY, "AlertSilence.mp3"))
self.gui.statusBar.showMessage("Alert File Changed Successfully", 5000)
self.accept()
else:
raise Exception
else:
raise Exception
# Test Files To Make Sure They Are Appropriate Length
except:
# Delete Fucked Up Audio
if os.path.exists(ALERTFILE):
os.remove(ALERTFILE)
if os.path.exists(ALERTSILENCE):
os.remove(ALERTSILENCE)
os.rename(os.path.join(TEMPDIRECTORY, "Alert.mp3"), ALERTFILE)
os.rename(os.path.join(TEMPDIRECTORY, "AlertSilence.mp3"), ALERTSILENCE)
QMessageBox.critical(None, "Error", "Changing The Alert File Failed. Is The File You Selected"
"A Valid .mp3 File?",
QMessageBox.Ok | QMessageBox.Default,
QMessageBox.NoButton)
# TODO Make Processing Audio File(s) A QThread So It Doesn't Hang The Gui
# TODO Make An Option To Pass An Individual Ambience
class AddAmbienceFiles(QDialog):
def __init__(self, parent, cutname=None):
QDialog.__init__(self, parent)
self.resize(657, 450)
self.optionalcutname = cutname
self.audioFilesTable = QtGui.QTableWidget(self)
self.audioFilesTable.setGeometry(QtCore.QRect(10, 40, 631, 291))
self.audioFilesTable.setLayoutDirection(QtCore.Qt.LeftToRight)
self.audioFilesTable.setAutoFillBackground(False)
self.audioFilesTable.setObjectName(_fromUtf8("audioFilesTable"))
self.audioFilesTable.setSelectionBehavior(QAbstractItemView.SelectRows)
self.audioFilesTable.setColumnCount(2)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
self.audioFilesTable.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
self.audioFilesTable.setHorizontalHeaderItem(1, item)
header = self.audioFilesTable.horizontalHeader()
header.setResizeMode(QHeaderView.Stretch)
self.addFilesToTableButton = QtGui.QPushButton(self)
self.addFilesToTableButton.setGeometry(QtCore.QRect(10, 340, 91, 31))
self.addFilesToTableButton.setObjectName(_fromUtf8("addFilesToTableButton"))
self.previewButton = QtGui.QPushButton(self)
self.previewButton.setGeometry(QtCore.QRect(560, 340, 84, 31))
self.previewButton.setObjectName(_fromUtf8("previewButton"))
self.topLabel = QtGui.QLabel(self)
self.topLabel.setGeometry(QtCore.QRect(10, 10, 631, 20))
self.topLabel.setStyleSheet("font: 12pt Arial Black")
self.topLabel.setAlignment(QtCore.Qt.AlignCenter)
self.topLabel.setObjectName(_fromUtf8("topLabel"))
self.addToProgramButton = QtGui.QPushButton(self)
self.addToProgramButton.setGeometry(QtCore.QRect(440, 410, 111, 31))
self.addToProgramButton.setObjectName(_fromUtf8("addToProgramButton"))
self.cancelButton = QtGui.QPushButton(self)
self.cancelButton.setGeometry(QtCore.QRect(560, 410, 84, 31))
self.cancelButton.setObjectName(_fromUtf8("cancelButton"))
self.editCurrentAmbienceButton = QtGui.QPushButton(self)
self.editCurrentAmbienceButton.setGeometry(QtCore.QRect(10, 410, 141, 31))
self.editCurrentAmbienceButton.setObjectName(_fromUtf8("editCurrentAmbienceButton"))
self.previewFileNameLabel = QtGui.QLabel(self)
self.previewFileNameLabel.setGeometry(QtCore.QRect(260, 350, 251, 20))
self.previewFileNameLabel.setAlignment(QtCore.Qt.AlignCenter)
self.previewFileNameLabel.setObjectName(_fromUtf8("previewFileNameLabel"))
self.previewcurrentTimeLabel = QtGui.QLabel(self)
self.previewcurrentTimeLabel.setGeometry(QtCore.QRect(200, 380, 51, 16))
self.previewcurrentTimeLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.previewcurrentTimeLabel.setObjectName(_fromUtf8("previewcurrentTimeLabel"))
self.previewtotalTimeLabel = QtGui.QLabel(self)
self.previewtotalTimeLabel.setGeometry(QtCore.QRect(490, 380, 41, 16))
self.previewtotalTimeLabel.setObjectName(_fromUtf8("previewtotalTimeLabel"))
self.removefromTableButton = QtGui.QPushButton(self)
self.removefromTableButton.setGeometry(QtCore.QRect(110, 340, 101, 31))
self.removefromTableButton.setObjectName(_fromUtf8("pushButton"))
self.statusBar = QtGui.QLabel(self)
self.statusBar.setGeometry(QtCore.QRect(160, 420, 271, 16))
self.statusBar.setObjectName(_fromUtf8("statusBar"))
self.statusBar.setText("Click 'Add File(s)' To Choose Files To Add")
self.setWindowTitle("Add Ambience To Program")
item = self.audioFilesTable.horizontalHeaderItem(0)
item.setText("Name")
item = self.audioFilesTable.horizontalHeaderItem(1)
item.setText("Length")
self.addFilesToTableButton.setText("Open File(s)")
self.removefromTableButton.setText("Remove File(s)")
self.previewButton.setText("Preview")
self.topLabel.setText("Add Ambience Files To The Kuji-In Program")
self.addToProgramButton.setText("Add To Program")
self.cancelButton.setText("Cancel")
self.editCurrentAmbienceButton.setText("Edit Existing Ambience")
self.previewFileNameLabel.setText("No File Selected")
self.previewcurrentTimeLabel.setText("--:--")
self.previewtotalTimeLabel.setText("--:--")
# PHONON ------
self.previewOutput = Phonon.AudioOutput(Phonon.MusicCategory, self)
self.previewplayer = Phonon.MediaObject(self)
self.previewplayer.setTickInterval(1000)
self.previewplayer.tick.connect(self.playertick)
Phonon.createPath(self.previewplayer, self.previewOutput)
self.seekslider = Phonon.SeekSlider(self.previewplayer, self)
self.seekslider.setGeometry(QtCore.QRect(260, 380, 221, 20))
self.seekslider.setOrientation(QtCore.Qt.Horizontal)
self.volumeSlider = Phonon.VolumeSlider(self.previewOutput, self)
self.volumeSlider.setGeometry(QtCore.QRect(520, 380, 120, 20))
self.volumeSlider.setOrientation(QtCore.Qt.Horizontal)
# -------------
QtCore.QObject.connect(self.addToProgramButton, QtCore.SIGNAL("clicked()"), self.addtoprogram)
QtCore.QObject.connect(self.cancelButton, QtCore.SIGNAL("clicked()"), self.reject)
QtCore.QObject.connect(self.editCurrentAmbienceButton, QtCore.SIGNAL("clicked()"), self.editcurrentambience)
QtCore.QObject.connect(self.addFilesToTableButton, QtCore.SIGNAL("clicked()"), self.addtotable)
QtCore.QObject.connect(self.removefromTableButton, QtCore.SIGNAL("clicked()"), self.removefromtable)
QtCore.QObject.connect(self.previewButton, QtCore.SIGNAL("clicked()"), self.preview)
QtCore.QObject.connect(self.audioFilesTable, QtCore.SIGNAL("currentItemChanged(QTableWidgetItem*,QTableWidgetItem*)"), self.loadselectedfileforpreview)
###
self.mytableitems = list()
self.mytablefiles = list()
self.mytablelengths = list()
self.index = int()
self.tableselected = False
##
self.exec_()
def playertick(self, ticktime):
"""Method To Display Preview Time On Preview Widget"""
displaytime = QtCore.QTime(0, (ticktime / 60000) % 60, (ticktime / 1000) % 60)
self.previewcurrentTimeLabel.setText(str(displaytime.toString('mm:ss')))
def addtoprogram(self):
"""Method To Find Out Which Cuts To Add Ambience In Table To, And Then Copy Those Audio Files There"""
if len(self.mytableitems) != 0:
if self.optionalcutname is not None:
namelist = list()
namelist.append(self.optionalcutname)
AddAmbienceConfirmationDialog(self.mytableitems, self.mytableitems, namelist)
else:
self.cutstoaddto = ChooseWhichAmbienceDialog(self)
if self.cutstoaddto.checkednames:
AddAmbienceConfirmationDialog(self.mytableitems, self.mytablefiles, self.cutstoaddto.checkednames)
else:
QtGui.QMessageBox.information(self, "No Files To Add To Program",
"You Need To Add At Least One Audio File Before I Can Add It The Program",
QtGui.QMessageBox.Ok | QtGui.QMessageBox.Default,
QtGui.QMessageBox.NoButton)
def editcurrentambience(self):
"""Method That Calls EditAmbienceFiles() Below"""
EditAmbienceFiles(self)
def addtotable(self):
"""Method To Open A File Chooser (with multi-select) and test if multiple audio files while adding to table"""
addtotablefilechooser = QFileDialog.getOpenFileNames(self, "Choose Music File(s) To Open", "",
"Music Files(*.mp3 *.wav *.ogg);;All Files(*)")
notworkingfiles = list()
for x, i in enumerate(addtotablefilechooser):
self.statusBar.setText("Processing Files (%s/%s). Please Wait..." % (x + 1, len(addtotablefilechooser)))
QApplication.processEvents()
length = getaudiofilelength(i)
if length is not False:
tablesize = self.audioFilesTable.rowCount()
self.mytablefiles.append(i)
self.mytableitems.append(os.path.basename(i))
self.audioFilesTable.setRowCount(tablesize + 1)
self.audioFilesTable.setItem(tablesize, 0, QTableWidgetItem(os.path.basename(i)))
self.mytablelengths.append(formatmilliseconds(length))
item = QTableWidgetItem(formatmilliseconds(length))
item.setTextAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter | QtCore.Qt.AlignCenter)
self.audioFilesTable.setItem(tablesize, 1, item)
else:
notworkingfiles.append(i)
self.statusBar.setText("")
def removefromtable(self):
"""Method To Remove Selected Ambience File From Table"""
# TODO Indexes Are Fucked Up, Fix Them Here
if self.tableselected:
self.audioFilesTable.removeRow(self.index)
self.mytablefiles.pop(self.index)
self.mytablelengths.pop(self.index)
self.mytableitems.pop(self.index)
[print("%s: %s" % (i, x)) for i, x in enumerate(self.mytableitems)]
[print("%s: %s" % (i, x)) for i, x in enumerate(self.mytablelengths)]
[print("%s: %s" % (i, x)) for i, x in enumerate(self.mytablefiles)]
else:
self.statusBar.setText("Select Something To Remove From Table")
def loadselectedfileforpreview(self, newItem, oldItem):
"""Method To Be Called When A Row Is Selected In The Table, And Passed Into Phonon For Instant Playback"""
self.tableselected = True
self.index = self.mytableitems.index(newItem.text())
print("Current Index Should Be %s" % self.index)
self.previewplayer.setCurrentSource(Phonon.MediaSource(os.path.abspath(self.mytablefiles[self.index])))
self.previewFileNameLabel.setText(self.mytableitems[self.index])
self.previewtotalTimeLabel.setText(self.mytablelengths[self.index])
def preview(self):
"""Method To Preview Selected Audio File In Table"""
playing = (self.previewplayer.state() == Phonon.PlayingState)
if playing:
self.previewplayer.stop()
self.previewButton.setText("Preview")
else:
self.previewplayer.play()
self.previewButton.setText("Stop")
class EditAmbienceFiles(QDialog):
def __init__(self, parent):
QDialog.__init__(self, parent)
self.resize(664, 736)
self.loadSelectedCutsAmbienceButton = QtGui.QPushButton(self)
self.loadSelectedCutsAmbienceButton.setGeometry(QtCore.QRect(220, 40, 101, 31))
self.loadSelectedCutsAmbienceButton.setObjectName(_fromUtf8("loadSelectedCutsAmbienceButton"))
self.cutselectorComboBox = QtGui.QComboBox(self)
self.cutselectorComboBox.setGeometry(QtCore.QRect(60, 40, 151, 29))
self.cutselectorComboBox.setObjectName(_fromUtf8("cutselectorComboBox"))
self.topLabel1 = QtGui.QLabel(self)
self.topLabel1.setGeometry(QtCore.QRect(60, 10, 261, 20))
self.topLabel1.setAlignment(QtCore.Qt.AlignCenter)
self.topLabel1.setObjectName(_fromUtf8("topLabel1"))
self.topLabel2 = QtGui.QLabel(self)
self.topLabel2.setGeometry(QtCore.QRect(390, 10, 241, 16))
self.topLabel2.setAlignment(QtCore.Qt.AlignCenter)
self.topLabel2.setObjectName(_fromUtf8("topLabel2"))
self.currentAmbienceLabel = QtGui.QLabel(self)
self.currentAmbienceLabel.setGeometry(QtCore.QRect(384, 50, 251, 20))
self.currentAmbienceLabel.setAlignment(QtCore.Qt.AlignCenter)
self.currentAmbienceLabel.setObjectName(_fromUtf8("currentAmbienceLabel"))
self.audioFilesTable = QtGui.QTableWidget(self)
self.audioFilesTable.setGeometry(QtCore.QRect(20, 120, 621, 481))
self.audioFilesTable.setLayoutDirection(QtCore.Qt.LeftToRight)
self.audioFilesTable.setAutoFillBackground(False)
self.audioFilesTable.setObjectName(_fromUtf8("audioFilesTable"))
self.audioFilesTable.setColumnCount(2)
self.audioFilesTable.setSelectionBehavior(QAbstractItemView.SelectRows)
header = self.audioFilesTable.horizontalHeader()
header.setResizeMode(QHeaderView.Stretch)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
self.audioFilesTable.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignCenter)
self.audioFilesTable.setHorizontalHeaderItem(1, item)
self.addAmbienceButton = QtGui.QPushButton(self)
self.addAmbienceButton.setGeometry(QtCore.QRect(20, 610, 101, 31))
self.addAmbienceButton.setObjectName(_fromUtf8("addAmbienceButton"))
self.removeAmbienceButton = QtGui.QPushButton(self)
self.removeAmbienceButton.setGeometry(QtCore.QRect(130, 610, 111, 31))
self.removeAmbienceButton.setObjectName(_fromUtf8("removeAmbienceButton"))
self.previewAmbienceButton = QtGui.QPushButton(self)
self.previewAmbienceButton.setGeometry(QtCore.QRect(560, 610, 81, 31))
self.previewAmbienceButton.setObjectName(_fromUtf8("previewAmbienceButton"))
self.previewFileNameLabel = QtGui.QLabel(self)
self.previewFileNameLabel.setGeometry(QtCore.QRect(250, 620, 231, 20))
self.previewFileNameLabel.setAlignment(QtCore.Qt.AlignCenter)
self.previewFileNameLabel.setObjectName(_fromUtf8("previewFileNameLabel"))
self.previewcurrentTimeLabel = QtGui.QLabel(self)
self.previewcurrentTimeLabel.setGeometry(QtCore.QRect(180, 650, 51, 16))
self.previewcurrentTimeLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.previewcurrentTimeLabel.setObjectName(_fromUtf8("previewcurrentTimeLabel"))
self.previewtotalTimeLabel = QtGui.QLabel(self)
self.previewtotalTimeLabel.setGeometry(QtCore.QRect(500, 650, 41, 16))
self.previewtotalTimeLabel.setObjectName(_fromUtf8("previewtotalTimeLabel"))
self.topLabel3 = QtGui.QLabel(self)
self.topLabel3.setGeometry(QtCore.QRect(24, 90, 611, 20))
self.topLabel3.setAlignment(QtCore.Qt.AlignCenter)
self.topLabel3.setObjectName(_fromUtf8("topLabel3"))
self.closeButton = QtGui.QPushButton(self)
self.closeButton.setGeometry(QtCore.QRect(570, 700, 84, 31))
self.closeButton.setObjectName(_fromUtf8("closeButton"))
self.statusBar = QtGui.QLabel(self)
self.statusBar.setGeometry(QtCore.QRect(10, 710, 541, 16))
self.statusBar.setObjectName(_fromUtf8("statusBar"))
self.setWindowTitle("Edit Ambience")
self.loadSelectedCutsAmbienceButton.setText("Load Ambience")
self.topLabel1.setText("Select Cut To Edit Ambience")
self.topLabel2.setText("Currently Editing:")
self.currentAmbienceLabel.setText("Select A Cut's Ambience To Edit")
item = self.audioFilesTable.horizontalHeaderItem(0)
item.setText("Name")
item = self.audioFilesTable.horizontalHeaderItem(1)
item.setText("Length")
self.addAmbienceButton.setText("Add Ambience")
self.removeAmbienceButton.setText("Remove Selected")
self.previewAmbienceButton.setText("Preview")
self.previewFileNameLabel.setText("No File Selected")
self.previewcurrentTimeLabel.setText("--:--")
self.previewtotalTimeLabel.setText("--:--")
self.topLabel3.setText("NOTE: These Changes Cannot Be Undone")
self.closeButton.setText("Close")
# PHONON
self.previewOutput = Phonon.AudioOutput(Phonon.MusicCategory, self)
self.previewplayer = Phonon.MediaObject(self)
self.previewplayer.setTickInterval(1000)
self.previewplayer.tick.connect(self.playertick)
Phonon.createPath(self.previewplayer, self.previewOutput)
self.seekslider = Phonon.SeekSlider(self.previewplayer, self)
self.seekslider.setGeometry(QtCore.QRect(240, 650, 251, 20))
self.seekslider.setOrientation(QtCore.Qt.Horizontal)
self.seekslider.setObjectName(_fromUtf8("previewSlider"))
self.volumeSlider = Phonon.VolumeSlider(self.previewOutput, self)
self.volumeSlider.setGeometry(QtCore.QRect(530, 650, 113, 20))
self.volumeSlider.setOrientation(QtCore.Qt.Horizontal)
# -----------------
self.cutlist = [
"Presession", "Rin", "Kyo", "Toh", "Sha", "Kai", "Jin", "Retsu",
"Zai", "Zen", "Postsession", "General"
]
self.filenames = list()
self.filepaths = list()
self.filelengths = list()
# -----------------
self.cutselectorComboBox.addItems(self.cutlist)
# Populate cutselectorComboBox here
QtCore.QObject.connect(self.loadSelectedCutsAmbienceButton, QtCore.SIGNAL("clicked()"), self.loadcutsambience)
QtCore.QObject.connect(self.addAmbienceButton, QtCore.SIGNAL("clicked()"), self.addambience)
QtCore.QObject.connect(self.audioFilesTable, QtCore.SIGNAL("currentItemChanged(QTableWidgetItem*,QTableWidgetItem*)"), self.loadselectedfileforpreview)
QtCore.QObject.connect(self.previewAmbienceButton, QtCore.SIGNAL("clicked()"), self.previewambience)
QtCore.QObject.connect(self.removeAmbienceButton, QtCore.SIGNAL("clicked()"), self.removeambience)
# QtCore.QObject.connect(self.previewButton, QtCore.SIGNAL("clicked()"), self.preview)
self.exec_()
def loadselectedfileforpreview(self, newItem, oldItem):
"""Method To Be Called When A Row Is Selected In The Table, And Passed Into Phonon For Instant Playback"""
if newItem is not None:
self.tableselected = True
try:
self.index = self.filenames.index(newItem.text())
except ValueError:
self.index = self.filelengths.index(newItem.text())
self.previewcurrentTimeLabel.setText("00:00")
self.previewplayer.setCurrentSource(Phonon.MediaSource(os.path.abspath(self.filepaths[self.index])))
self.previewFileNameLabel.setText(self.filenames[self.index])
self.previewtotalTimeLabel.setText(self.filelengths[self.index])
def playertick(self, ticktime):
"""Method To Display Preview Time On Preview Widget"""
displaytime = QtCore.QTime(0, (ticktime / 60000) % 60, (ticktime / 1000) % 60)
self.previewcurrentTimeLabel.setText(str(displaytime.toString('mm:ss')))
def loadcutsambience(self):
"""Method To Get A Cut From cutselectorComboBox, And Retrieve And Populate The Table With Ambience From That
Folder"""
self.filenames.clear()
self.filepaths.clear()
self.filelengths.clear()
self.audioFilesTable.clearContents()
self.audioFilesTable.setRowCount(0)
name = self.cutselectorComboBox.currentText()
if name in self.cutlist:
self.currentAmbienceLabel.setText("%s" % name)
notworkingfiles = list()
listoffiles = os.listdir(os.path.join(AMBIENCEDIRECTORY, name))
if len(listoffiles) > 0:
for x, i in enumerate(listoffiles):
i = os.path.join(AMBIENCEDIRECTORY, name, i)
self.statusBar.setText("Processing Files (%s/%s). Please Wait..." % (x + 1, len(listoffiles)))
QApplication.processEvents()
length = getaudiofilelength(i)
if length is not False:
tablesize = self.audioFilesTable.rowCount()
self.filepaths.append(i)
self.filenames.append(os.path.basename(i))
self.audioFilesTable.setRowCount(tablesize + 1)
self.audioFilesTable.setItem(tablesize, 0, QTableWidgetItem(os.path.basename(i)))
self.filelengths.append(formatmilliseconds(length))
item = QTableWidgetItem(formatmilliseconds(length))
item.setTextAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter | QtCore.Qt.AlignCenter)
self.audioFilesTable.setItem(tablesize, 1, item)
else:
print("%s Didn't Work" % i)
notworkingfiles.append(i)
else:
quit_msg = "No Ambience Files Found! Add Ambience To %s" % name
reply = QtGui.QMessageBox.question(self.gui, 'Add Ambience?',
quit_msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
self.addambience()
else:
return
self.statusBar.setText("")
else:
self.statusBar.setText("Not A Valid Option. Check Your Selection At The Top Left")
def addambience(self):
"""Method To Call The Add Ambience Dialog Written Above"""
AddAmbienceFiles(self)
self.loadcutsambience()
def removeambience(self):
"""Method To Get The Index And Remove The Ambience From The Table, And Delete It From Disk! Make Sure There
Is A Confirmation Dialog Before You Delete From Disk"""
index = self.filenames.index(self.previewFileNameLabel.text())
name = self.filenames[index]
msg = "Really Delete '%s'?(This Cannot Be Undone)" % name
reply = QtGui.QMessageBox.question(self, 'Really Delete This File?',
msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QMessageBox.Yes:
path = self.filepaths[index]
os.remove(path)
a = self.filenames.pop(index)
b = self.filepaths.pop(index)
c = self.filelengths.pop(index)
print("Removed From Lists %s|%s|%s" % (a, b, c))
self.audioFilesTable.removeRow(index)
if not os.path.exists(path):
self.statusBar.setText("%s Successfully Deleted" % name)
else:
self.statusBar.setText("An Error Occured Trying To Delete %s" % name)
def previewambience(self):
"""Method To Preview The Selected Ambience In Table"""
playing = (self.previewplayer.state() == Phonon.PlayingState)
if playing:
self.previewplayer.stop()
self.previewAmbienceButton.setText("Preview")
else:
self.previewplayer.play()
self.previewAmbienceButton.setText("Stop")
class ChooseWhichAmbienceDialog(QDialog):
def __init__(self, parent, msg="Select Cut(s)"):
QDialog.__init__(self, parent)
self.resize(627, 139)
self.generalcheckbox = QtGui.QCheckBox(self)
self.generalcheckbox.setGeometry(QtCore.QRect(450, 70, 171, 20))
self.generalcheckbox.setObjectName(_fromUtf8("generalcheckbox"))
self.descriptionLabel = QtGui.QLabel(self)
self.descriptionLabel.setGeometry(QtCore.QRect(10, 0, 611, 31))
self.descriptionLabel.setAlignment(QtCore.Qt.AlignCenter)
self.descriptionLabel.setWordWrap(True)
self.descriptionLabel.setObjectName(_fromUtf8("descriptionLabel"))
self.OKButton = QtGui.QPushButton(self)
self.OKButton.setGeometry(QtCore.QRect(450, 100, 84, 31))
self.OKButton.setObjectName(_fromUtf8("OKButton"))
self.cancelButton = QtGui.QPushButton(self)
self.cancelButton.setGeometry(QtCore.QRect(540, 100, 84, 31))
self.cancelButton.setObjectName(_fromUtf8("cancelButton"))
self.horizontalLayoutWidget = QtGui.QWidget(self)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(10, 30, 611, 41))
self.horizontalLayoutWidget.setObjectName(_fromUtf8("horizontalLayoutWidget"))
self.horizontalLayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.presession = QtGui.QCheckBox(self.horizontalLayoutWidget)
self.presession.setObjectName(_fromUtf8("presession"))
self.horizontalLayout.addWidget(self.presession)
self.rin = QtGui.QCheckBox(self.horizontalLayoutWidget)
self.rin.setObjectName(_fromUtf8("rin"))
self.horizontalLayout.addWidget(self.rin)
self.kyo = QtGui.QCheckBox(self.horizontalLayoutWidget)
self.kyo.setObjectName(_fromUtf8("kyo"))
self.horizontalLayout.addWidget(self.kyo)
self.toh = QtGui.QCheckBox(self.horizontalLayoutWidget)
self.toh.setObjectName(_fromUtf8("toh"))
self.horizontalLayout.addWidget(self.toh)
self.sha = QtGui.QCheckBox(self.horizontalLayoutWidget)
self.sha.setObjectName(_fromUtf8("sha"))
self.horizontalLayout.addWidget(self.sha)
self.kai = QtGui.QCheckBox(self.horizontalLayoutWidget)
self.kai.setObjectName(_fromUtf8("kai"))
self.horizontalLayout.addWidget(self.kai)
self.jin = QtGui.QCheckBox(self.horizontalLayoutWidget)
self.jin.setObjectName(_fromUtf8("jin"))
self.horizontalLayout.addWidget(self.jin)
self.retsu = QtGui.QCheckBox(self.horizontalLayoutWidget)
self.retsu.setObjectName(_fromUtf8("retsu"))
self.horizontalLayout.addWidget(self.retsu)
self.zai = QtGui.QCheckBox(self.horizontalLayoutWidget)
self.zai.setObjectName(_fromUtf8("zai"))
self.horizontalLayout.addWidget(self.zai)
self.zen = QtGui.QCheckBox(self.horizontalLayoutWidget)
self.zen.setObjectName(_fromUtf8("zen"))
self.horizontalLayout.addWidget(self.zen)
self.postsession = QtGui.QCheckBox(self.horizontalLayoutWidget)
self.postsession.setObjectName(_fromUtf8("postsession"))
self.horizontalLayout.addWidget(self.postsession)
########
self.setWindowTitle(msg)
self.generalcheckbox.setText("General (Unspecific Ambience)")
self.presession.setText("Pre-session")
self.rin.setText("Rin")
self.kyo.setText("Kyo")
self.toh.setText("Toh")
self.sha.setText("Sha")
self.kai.setText("Kai")
self.jin.setText("Jin")
self.retsu.setText("Retsu")
self.zai.setText("Zai")
self.zen.setText("Zen")
self.postsession.setText("Post-Session")
self.descriptionLabel.setText("Please Select Which Cut(s) You Would Like To Add Ambience To:")
self.OKButton.setText("OK")
self.cancelButton.setText("Cancel")
self.checkboxes = [
self.presession,
self.rin,
self.kyo,
self.toh,
self.sha,
self.kai,
self.jin,
self.retsu,
self.zai,
self.zen,
self.postsession,
self.generalcheckbox
]
self.names = [
"Presession",
"Rin",
"Kyo",
"Toh",
"Sha",
"Kai",
"Jin",
"Retsu",
"Zai",
"Zen",
"Postsession",
"General"
]
self.checkednames = list()
QtCore.QObject.connect(self.OKButton, QtCore.SIGNAL("clicked()"), self.accept)
QtCore.QObject.connect(self.cancelButton, QtCore.SIGNAL("clicked()"), self.reject)
ret = self.exec_()
if ret == QDialog.Accepted:
for x, i in enumerate(self.checkboxes):
if i.isChecked():
self.checkednames.append(self.names[x])
class ModifyReferenceFiles(QDialog):
def __init__(self):
QDialog.__init__(self)
class AddAmbienceConfirmationDialog(QDialog):
def __init__(self, filenames, filepaths, cutnames):
QDialog.__init__(self)
self.filenames = filenames
self.filepaths = filepaths
self.cutnames = cutnames
self.resize(593, 398)
self.setStyleSheet(_fromUtf8("background-color:#212526;"))
self.filesListView = QtGui.QListWidget(self)
self.filesListView.setGeometry(QtCore.QRect(20, 70, 261, 261))
self.filesListView.setStyleSheet(_fromUtf8("color: #98A6A8;\n"
"background-color: rgb(42, 52, 53);"))
self.filesListView.setObjectName(_fromUtf8("filesListView"))
self.horizontalLayoutWidget = QtGui.QWidget(self)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(340, 350, 231, 33))
self.horizontalLayoutWidget.setObjectName(_fromUtf8("horizontalLayoutWidget"))
self.buttonsLayou = QtGui.QHBoxLayout(self.horizontalLayoutWidget)
self.buttonsLayou.setMargin(0)
self.buttonsLayou.setObjectName(_fromUtf8("buttonsLayou"))
self.acceptButton = QtGui.QPushButton(self.horizontalLayoutWidget)
self.acceptButton.setStyleSheet(_fromUtf8("color: #98A6A8;\n"
"background-color: rgb(53, 63, 68);"))
self.acceptButton.setObjectName(_fromUtf8("acceptButton"))
self.buttonsLayou.addWidget(self.acceptButton)
self.cancelButton = QtGui.QPushButton(self.horizontalLayoutWidget)
self.cancelButton.setStyleSheet(_fromUtf8("color: #98A6A8;\n"
"background-color: rgb(53, 63, 68);"))
self.cancelButton.setObjectName(_fromUtf8("cancelButton"))
self.buttonsLayou.addWidget(self.cancelButton)
self.topLabel = QtGui.QLabel(self)
self.topLabel.setGeometry(QtCore.QRect(50, 10, 481, 20))
self.topLabel.setAlignment(QtCore.Qt.AlignCenter)
self.topLabel.setObjectName(_fromUtf8("topLabel"))
self.statusBar = QtGui.QLabel(self)
self.statusBar.setGeometry(QtCore.QRect(20, 360, 291, 16))
self.statusBar.setObjectName(_fromUtf8("statusBar"))
self.filestopLabel = QtGui.QLabel(self)
self.filestopLabel.setGeometry(QtCore.QRect(24, 40, 261, 20))
self.filestopLabel.setAlignment(QtCore.Qt.AlignCenter)
self.filestopLabel.setObjectName(_fromUtf8("filestopLabel"))
self.cutsListView = QtGui.QListWidget(self)
self.cutsListView.setGeometry(QtCore.QRect(310, 70, 261, 261))
self.cutsListView.setStyleSheet(_fromUtf8("color: #98A6A8; background-color: rgb(42, 52, 53);"))
self.cutsListView.setObjectName(_fromUtf8("cutsListView"))
self.ambiencetopLabel = QtGui.QLabel(self)
self.ambiencetopLabel.setGeometry(QtCore.QRect(310, 40, 261, 20))
self.ambiencetopLabel.setAlignment(QtCore.Qt.AlignCenter)
self.ambiencetopLabel.setObjectName(_fromUtf8("ambiencetopLabel"))
self.setWindowTitle("Confirmation")
self.acceptButton.setText("Accept")
self.cancelButton.setText("Cancel")
self.topLabel.setText("NOTE: This Can\'t Be Undone")
self.statusBar.setText("")
self.filestopLabel.setText("Add These File(s)")
self.ambiencetopLabel.setText("As Ambience To These Cut(s)")
for i in self.filenames:
msg = str(i)
item = QListWidgetItem(msg)
self.filesListView.addItem(item)
for x in self.cutnames:
msg = str(x)
item = QListWidgetItem(msg)
self.cutsListView.addItem(item)
QtCore.QObject.connect(self.acceptButton, QtCore.SIGNAL("clicked()"), self.addfiles)
QtCore.QObject.connect(self.cancelButton, QtCore.SIGNAL("clicked()"), self.cancel)
self.exec_()
def cancel(self):
"""Method To Close Out Dialog And Return To Cut Selection Dialog"""
self.reject()
def addfiles(self):
"""Method To Add Ambience Files To Each Cut Folder Corresponding To cutnames"""
for x, i in enumerate(self.cutnames):
count = 1
for h, g in enumerate(self.filepaths):
newdirectory = os.path.abspath(os.path.join(AMBIENCEDIRECTORY, i, self.filenames[h]))
olddirectory = g
copy2(olddirectory, newdirectory)
self.statusBar.setText("Processing (%s/%s). Please Wait..." % (count, int(self.cutnames * self.filepaths)))
count += 1
self.accept()
def checkreferencefile(self, referencevariation, file):
if isinstance(referencevariation, str):
filedirectory = referencevariation
filesexist = int()
for i in os.listdir(filedirectory):
filetocheck = os.path.join(filedirectory, i)
fileisgood = self.checkifreferencefilegood(filetocheck)
if fileisgood:
filesexist += 1
else:
print("This Isn't Good: " + filetocheck)
if filesexist == len(os.listdir(filedirectory)): ## All Files Have At Least 3 Lines With Working Characters
return True
elif filesexist > 0: ## Some Files Have At Least 3 Lines With Working Characters
quit_msg = "Some Reference Files Have ContentF And Some Don't. Do You Want Me To Add The Reference" \
" Files That Have Content (The Ones That Don't Will Be Displayed Blank)?"
reply = QtGui.QMessageBox.question(self.gui, 'Add Partial Reference Files To Session',
quit_msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
return True
else:
return False
else: ## No Files Have 3 Lines With Working Characters
return False |
the-stack_106_28127 | '''Runs all tests for the coleco emulator'''
import sys
import unittest
def main():
'''update the path to point to the coleco packages and run all tests'''
# update the path
sys.path.append('../')
# find all of the tests to run
discovered_suite = unittest.TestLoader().discover('.', pattern='test_*.py')
# run the tests that were found
unittest.TextTestRunner(verbosity=1).run(discovered_suite)
if __name__ == '__main__':
main() |
the-stack_106_28128 | from lbworkflow.views.generics import CreateView, UpdateView, WFListView
from .forms import SimpleWorkFlowForm
from .models import SimpleWorkFlow
class SimpleWorkFlowCreateView(CreateView):
form_classes = {
"form": SimpleWorkFlowForm,
}
def get_initial(self, form_class_key):
return {"content": self.process.ext_data.get("template", "")}
new = SimpleWorkFlowCreateView.as_view()
class SimpleWorkFlowUpdateView(UpdateView):
form_classes = {
"form": SimpleWorkFlowForm,
}
edit = SimpleWorkFlowUpdateView.as_view()
class SimpleWorkFlowListView(WFListView):
wf_code = "simplewf"
model = SimpleWorkFlow
excel_file_name = "simplewf"
excel_titles = [
"Created on",
"Created by",
"Summary",
"Content",
"Status",
]
def get_excel_data(self, o):
return [
o.created_by.username,
o.created_on,
o.summary,
o.content,
o.pinstance.cur_node.name,
]
show_list = SimpleWorkFlowListView.as_view()
|
the-stack_106_28129 | #!/usr/bin/env python
"""
Neural SPARQL Machines - Filter dataset by a given criterion.
'SPARQL as a Foreign Language' by Tommaso Soru and Edgard Marx et al., SEMANTiCS 2017
https://arxiv.org/abs/1708.07624
Version 1.0.0
"""
import argparse
import collections
import json
import os
import sys
from generator_utils import encode, save_cache, extract_encoded_entities
import importlib
if __name__ == '__main__':
parser = argparse.ArgumentParser()
requiredNamed = parser.add_argument_group('required named arguments')
requiredNamed.add_argument('--dataset', dest='dataset', metavar='data_300.en', help='dataset', required=True)
requiredNamed.add_argument('--used_resources', dest='used_resources', metavar='used_resources.json', help='json file', required=True)
requiredNamed.add_argument('--minimum', dest='minimum', metavar='15', help='minimum number of occurence', required=True)
requiredNamed.add_argument('--comp', dest='comp', metavar='all|any', help='require minimum for all/any resources in the query', required=True)
args = parser.parse_args()
dataset_file = args.dataset
used_resources_file = args.used_resources
MINIMUM = int(args.minimum)
COMP = any if args.comp == 'any' else all
importlib.reload(sys)
sys.setdefaultencoding("utf-8")
dataset_root, _ = os.path.splitext(dataset_file)
used_resources_root, _ = os.path.splitext(used_resources_file)
filtered_sparql_file = '{}_filtered_{:d}_{}.sparql'.format(dataset_root, MINIMUM, COMP.__name__)
filtered_en_file = '{}_filtered_{:d}_{}.en'.format(dataset_root, MINIMUM, COMP.__name__)
used_resources = collections.Counter(json.loads(open(used_resources_file).read()))
filtered_resources = [elem_cnt for elem_cnt in list(used_resources.items()) if elem_cnt[1] >= MINIMUM]
save_cache('{}_filter_{:d}.json'.format(used_resources_root, MINIMUM), collections.Counter(dict(filtered_resources)))
valid_encoded_resources = [encode(elem_cnt1[0]) for elem_cnt1 in filtered_resources]
check = lambda encoded_entity : encoded_entity in valid_encoded_resources
valid_lines = []
filtered_queries = []
with open(dataset_root+'.sparql', 'r') as sparql_file:
for linenumber, line in enumerate(sparql_file):
entities = extract_encoded_entities(line)
valid = COMP(list(map(check, entities)))
if valid:
filtered_queries.append(line)
valid_lines.append(linenumber)
filtered_questions = []
with open(dataset_root+'.en', 'r') as en_file:
for linenumber, line in enumerate(en_file):
if linenumber in valid_lines:
filtered_questions.append(line)
with open(filtered_en_file, 'w') as filtered:
filtered.writelines(filtered_questions)
with open(filtered_sparql_file, 'w') as filtered:
filtered.writelines(filtered_queries)
|
the-stack_106_28130 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from absl import app
from absl import flags
from contextlib import suppress
from datetime import datetime
from classes.ga360_report_manager import GA360ReportManager
from classes.sa360_report_manager import SA360Manager
logging.basicConfig(
filename=f'report_manager-{datetime.now().strftime("%Y-%m-%d-%H:%M:%S")}.log',
format='%(asctime)s %(message)s',
datefmt='%Y-%m-%d %I:%M:%S %p',
level=logging.DEBUG
)
FLAGS = flags.FLAGS
flags.DEFINE_bool('list', False, 'List all defined reports.')
flags.DEFINE_bool('show', False, 'Print the defintion of the named report.')
flags.DEFINE_bool('add', False,
'Add a new report from a definition format JSON file.')
flags.DEFINE_bool('delete', False,
('Remove a defined report. This will also disable any '
'runners for the report if an API key is supplied.'))
flags.DEFINE_bool('install', False,
'Add runners for a named report from a JSON file.')
flags.DEFINE_bool('validate', False, 'Validate a defined report (SA360 only).')
flags.mark_bool_flags_as_mutual_exclusive([
'list', 'show', 'add', 'delete', 'install', 'validate',
])
# add
flags.DEFINE_string('file', None, 'JSON file containing the report definition.')
flags.DEFINE_bool('gcs_stored', False, 'Is this stored in gcs?')
flags.DEFINE_bool('ga360', False, 'GA360 management.')
flags.DEFINE_bool('sa360', False, 'SA360 management.')
flags.mark_bool_flags_as_mutual_exclusive(['ga360', 'sa360'])
# add/delete/show
flags.DEFINE_string('name', None,
('Name as which the report should be stored. Default is '
'the file name minus extension.'))
# common
flags.DEFINE_string('project', None,
'GCP Project act on. Default is the environment default.')
flags.DEFINE_string('email', None, 'Report owner/user email.')
flags.DEFINE_string('api_key', None, 'API Key for scheduler.')
def main(unused_argv):
if FLAGS.list: action = 'list'
elif FLAGS.show: action = 'show'
elif FLAGS.add: action = 'add'
elif FLAGS.install: action = 'install'
elif FLAGS.delete: action = 'delete'
elif FLAGS.validate: action = 'validate'
else: raise NotImplementedError()
args = {
'action': action,
'_print': True,
**{k: v for k, v in FLAGS.flag_values_dict().items() if v is not None}
}
if FLAGS.ga360:
GA360ReportManager().manage(**args)
else:
SA360Manager().manage(**args)
if __name__ == '__main__':
with suppress(SystemExit):
app.run(main)
|
the-stack_106_28131 | import time
import argparse
import numpy as np
import random
import torch
import torch.nn.functional as F
from utils import load_data, load_rand_split_data, accuracy
from model import GCN
# hyper-params
dataset = 'citeseer' # Citeseer or cora
seed = 24 # Random seed
hidden = 16 # Number of hidden units
dropout = 0.5 # Dropout rate
lr = 0.01 # Learning rate
weight_decay = 5e-4 # Weight decay(L2 loss)
epochs = 200 # Train epochs
def train(epoch):
t = time.time()
model.train()
optimizer.zero_grad()
output = model(features, adj)
loss_train = F.nll_loss(output[idx_train], labels[idx_train])
acc_train = accuracy(output[idx_train], labels[idx_train])
loss_train.backward()
optimizer.step()
model.eval()
output = model(features, adj)
loss_val = F.nll_loss(output[idx_val], labels[idx_val])
acc_val = accuracy(output[idx_val], labels[idx_val])
print('Epoch: {:04d}'.format(epoch+1),
'loss_train: {:.4f}'.format(loss_train.item()),
'acc_train: {:.4f}'.format(acc_train.item()),
'loss_val: {:.4f}'.format(loss_val.item()),
'acc_val: {:.4f}'.format(acc_val.item()),
'time: {:.4f}s'.format(time.time() - t))
def test():
model.eval()
output = model(features, adj)
loss_test = F.nll_loss(output[idx_test], labels[idx_test])
acc_test = accuracy(output[idx_test], labels[idx_test])
print("Test set results:",
"loss= {:.4f}".format(loss_test.item()),
"accuracy= {:.4f}".format(acc_test.item()))
return loss_test.item(), acc_test.item()
if __name__ == '__main__':
random.seed(seed)
rg = np.random.default_rng(seed)
torch.manual_seed(seed)
# Load data
adj, features, labels, idx_train, idx_val, idx_test = load_data(dataset=dataset, seed=seed)
# adj, features, labels, idx_train, idx_val, idx_test = load_rand_loadsplit_data(dataset)
# Model and optimizer
model = GCN(nfeat=features.shape[1],
nhid=hidden,
nclass=labels.max().item() + 1,
dropout=dropout)
optimizer = torch.optim.Adam(model.parameters(),
lr=lr, weight_decay=weight_decay)
# Train model
t_total = time.time()
for epoch in range(epochs):
train(epoch)
print("Optimization Finished!")
print("Total time elapsed: {:.4f}s".format(time.time() - t_total))
# Testing
res = test() |
the-stack_106_28132 | from functools import partial
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtWidgets import QInputDialog, QLabel, QVBoxLayout, QLineEdit
from electrum.i18n import _
from electrum.plugin import hook
from electrum.wallet import Standard_Wallet
from electrum.gui.qt.util import WindowModalDialog
from .ledger import LedgerPlugin
from ..hw_wallet.qt import QtHandlerBase, QtPluginBase
from ..hw_wallet.plugin import only_hook_if_libraries_available
class Plugin(LedgerPlugin, QtPluginBase):
icon_unpaired = "ledger_unpaired.png"
icon_paired = "ledger.png"
def create_handler(self, window):
return Ledger_Handler(window)
@only_hook_if_libraries_available
@hook
def receive_menu(self, menu, addrs, wallet):
if type(wallet) is not Standard_Wallet:
return
keystore = wallet.get_keystore()
if type(keystore) == self.keystore_class and len(addrs) == 1:
def show_address():
keystore.thread.add(partial(self.show_address, wallet, addrs[0]))
menu.addAction(_("Show on Ledger"), show_address)
class Ledger_Handler(QtHandlerBase):
setup_signal = pyqtSignal()
auth_signal = pyqtSignal(object)
def __init__(self, win):
super(Ledger_Handler, self).__init__(win, 'Ledger')
self.setup_signal.connect(self.setup_dialog)
self.auth_signal.connect(self.auth_dialog)
def word_dialog(self, msg):
response = QInputDialog.getText(self.top_level_window(), "Ledger Wallet Authentication", msg, QLineEdit.Password)
if not response[1]:
self.word = None
else:
self.word = str(response[0])
self.done.set()
def message_dialog(self, msg):
self.clear_dialog()
self.dialog = dialog = WindowModalDialog(self.top_level_window(), _("Ledger Status"))
l = QLabel(msg)
vbox = QVBoxLayout(dialog)
vbox.addWidget(l)
dialog.show()
def auth_dialog(self, data):
try:
from .auth2fa import LedgerAuthDialog
except ImportError as e:
self.message_dialog(str(e))
return
dialog = LedgerAuthDialog(self, data)
dialog.exec_()
self.word = dialog.pin
self.done.set()
def get_auth(self, data):
self.done.clear()
self.auth_signal.emit(data)
self.done.wait()
return self.word
def get_setup(self):
self.done.clear()
self.setup_signal.emit()
self.done.wait()
return
def setup_dialog(self):
self.show_error(_('Initialization of Ledger HW devices is currently disabled.'))
|
the-stack_106_28133 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from textwrap import dedent
from pants.backend.codegen.thrift.java.apache_thrift_java_gen import ApacheThriftJavaGen
from pants.backend.codegen.thrift.java.java_thrift_library import JavaThriftLibrary
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.base.exceptions import TargetDefinitionException
from pants_test.tasks.task_test_base import TaskTestBase
class ApacheThriftJavaGenTest(TaskTestBase):
@classmethod
def task_type(cls):
return ApacheThriftJavaGen
def generate_single_thrift_target(self, java_thrift_library):
context = self.context(target_roots=[java_thrift_library])
apache_thrift_gen = self.create_task(context)
apache_thrift_gen.execute()
def is_synthetic_java_library(target):
return isinstance(target, JavaLibrary) and target.is_synthetic
synthetic_targets = context.targets(predicate=is_synthetic_java_library)
self.assertEqual(1, len(synthetic_targets))
return synthetic_targets[0]
def test_single_namespace(self):
self.create_file('src/thrift/com/foo/one.thrift', contents=dedent("""
namespace java com.foo
struct One {}
"""))
one = self.make_target(spec='src/thrift/com/foo:one',
target_type=JavaThriftLibrary,
sources=['one.thrift'],
compiler='thrift')
synthetic_target = self.generate_single_thrift_target(one)
self.assertEqual(['com/foo/One.java'], list(synthetic_target.sources_relative_to_source_root()))
def test_nested_namespaces(self):
self.create_file('src/thrift/com/foo/one.thrift', contents=dedent("""
namespace java com.foo
struct One {}
"""))
self.create_file('src/thrift/com/foo/bar/two.thrift', contents=dedent("""
namespace java com.foo.bar
struct Two {}
"""))
one = self.make_target(spec='src/thrift/com/foo:one',
target_type=JavaThriftLibrary,
sources=['one.thrift', 'bar/two.thrift'],
compiler='thrift')
synthetic_target = self.generate_single_thrift_target(one)
self.assertEqual(sorted(['com/foo/One.java', 'com/foo/bar/Two.java']),
sorted(synthetic_target.sources_relative_to_source_root()))
def test_invalid_parameters(self):
self.create_file('src/thrift/com/foo/one.thrift', contents=dedent("""
namespace java com.foo
struct One {}
"""))
a = self.make_target(spec='src/thrift/com/foo:a',
target_type=JavaThriftLibrary,
sources=['one.thrift'],
compiler='thrift',
language='not-a-lang')
with self.assertRaises(TargetDefinitionException):
self.generate_single_thrift_target(a)
b = self.make_target(spec='src/thrift/com/foo:b',
target_type=JavaThriftLibrary,
sources=['one.thrift'],
compiler='thrift',
rpc_style='not-a-style')
with self.assertRaises(TargetDefinitionException):
self.generate_single_thrift_target(b)
|
the-stack_106_28134 | import election
num_candidates = 150
num_spots = 25
num_voters = 50
# dataset.make_dataset(num_candidates, num_spots, num_voters)
# name = "out.csv"
# #name = "custom.csv"
# elect = election.Election(num_candidates, name)
# ret, extras = elect.run_election()
# print(ret)
# print(len(ret))
# print(extras)
###
num_candidates = 23
elect = election.Election(num_candidates, "/Users/joshlevitas/Desktop/1stroundboys.csv")
ret, extras = elect.run_election()
print(ret)
print(len(ret))
print(extras)
# elect2 = election.Election(15, "~/Desktop/tE2.csv")
# print(elect2.num_candidates)
# ret2, extras2 = elect2.run_election()
# print(ret2)
# print(len(ret2))
# print(extras2)
|
the-stack_106_28136 | import uuid
from unittest.mock import patch
import pytest
import s3fs
from rubicon import domain
from rubicon.repository import S3Repository
from rubicon.repository.utils import slugify
def test_initialization():
s3_repo = S3Repository(root_dir="s3://bucket/root")
assert s3_repo.PROTOCOL == "s3"
assert type(s3_repo.filesystem) == s3fs.core.S3FileSystem
@patch("s3fs.core.S3FileSystem.open")
def test_persist_bytes(mock_open):
bytes_data = b"test data {uuid.uuid4()}"
bytes_path = "s3://bucket/root/path/to/data"
s3_repo = S3Repository(root_dir="s3://bucket/root")
s3_repo._persist_bytes(bytes_data, bytes_path)
mock_open.assert_called_once_with(bytes_path, "wb")
@patch("s3fs.core.S3FileSystem.open")
def test_persist_domain(mock_open):
project = domain.Project(f"Test Project {uuid.uuid4()}")
project_metadata_path = f"s3://bucket/root/{slugify(project.name)}/metadata.json"
s3_repo = S3Repository(root_dir="s3://bucket/root")
s3_repo._persist_domain(project, project_metadata_path)
mock_open.assert_called_once_with(project_metadata_path, "w")
@patch("s3fs.core.S3FileSystem.open")
def test_persist_domain_throws_error(mock_open):
not_serializable = str
project = domain.Project(f"Test Project {uuid.uuid4()}", description=not_serializable)
project_metadata_path = f"s3://bucket/root/{slugify(project.name)}/metadata.json"
s3_repo = S3Repository(root_dir="s3://bucket/root")
with pytest.raises(TypeError):
s3_repo._persist_domain(project, project_metadata_path)
mock_open.assert_not_called()
|
the-stack_106_28138 | """
Example demonstrating how to write Schema and Cred Definition on the ledger
As a setup, Steward (already on the ledger) adds Trust Anchor to the ledger.
After that, Steward builds the SCHEMA request to add new schema to the ledger.
Once that succeeds, Trust Anchor uses anonymous credentials to issue and store
claim definition for the Schema added by Steward.
"""
import asyncio
import json
import pprint
from indy import pool, ledger, wallet, did, anoncreds
from indy.error import ErrorCode, IndyError
pool_name = 'pool'
wallet_name = 'wallet'
genesis_file_path = '../indy-sdk/cli/docker_pool_transactions_genesis'
wallet_credentials = json.dumps({"key": "wallet_key"})
def print_log(value_color="", value_noncolor=""):
"""set the colors for text."""
HEADER = '\033[92m'
ENDC = '\033[0m'
print(HEADER + value_color + ENDC + str(value_noncolor))
async def write_schema_and_cred_def():
try:
await pool.set_protocol_version(2)
# 1.
print_log('\n1. Creates a new local pool ledger configuration that is used '
'later when connecting to ledger.\n')
pool_config = json.dumps({'genesis_txn': genesis_file_path})
try:
await pool.create_pool_ledger_config(pool_name, pool_config)
except IndyError:
await pool.delete_pool_ledger_config(config_name=pool_name)
await pool.create_pool_ledger_config(pool_name, pool_config)
# 2.
print_log('\n2. Open pool ledger and get handle from libindy\n')
pool_handle = await pool.open_pool_ledger(config_name=pool_name, config=None)
# 3.
print_log('\n3. Creating new secure wallet\n')
try:
await wallet.create_wallet(pool_name, wallet_name, None, None, wallet_credentials)
except IndyError:
await wallet.delete_wallet(wallet_name, wallet_credentials)
await wallet.create_wallet(pool_name, wallet_name, None, None, wallet_credentials)
# 4.
print_log('\n4. Open wallet and get handle from libindy\n')
wallet_handle = await wallet.open_wallet(wallet_name, None, wallet_credentials)
# 5.
print_log('\n5. Generating and storing steward DID and verkey\n')
steward_seed = '000000000000000000000000Steward1'
did_json = json.dumps({'seed': steward_seed})
steward_did, steward_verkey = await did.create_and_store_my_did(wallet_handle, did_json)
print_log('Steward DID: ', steward_did)
print_log('Steward Verkey: ', steward_verkey)
# 6.
print_log('\n6. Generating and storing trust anchor DID and verkey\n')
trust_anchor_did, trust_anchor_verkey = await did.create_and_store_my_did(wallet_handle, "{}")
print_log('Trust anchor DID: ', trust_anchor_did)
print_log('Trust anchor Verkey: ', trust_anchor_verkey)
# 7.
print_log('\n7. Building NYM request to add Trust Anchor to the ledger\n')
nym_transaction_request = await ledger.build_nym_request(submitter_did=steward_did,
target_did=trust_anchor_did,
ver_key=trust_anchor_verkey,
alias=None,
role='TRUST_ANCHOR')
print_log('NYM transaction request: ')
pprint.pprint(json.loads(nym_transaction_request))
# 8.
print_log('\n8. Sending NYM request to the ledger\n')
nym_transaction_response = await ledger.sign_and_submit_request(pool_handle=pool_handle,
wallet_handle=wallet_handle,
submitter_did=steward_did,
request_json=nym_transaction_request)
print_log('NYM transaction response: ')
pprint.pprint(json.loads(nym_transaction_response))
# 9.
print_log('\n9. Build the SCHEMA request to add new schema to the ledger as a Steward\n')
seq_no = 1
schema = {
'seqNo': seq_no,
'dest': steward_did,
'data': {
'id': '1',
'name': 'gvt',
'version': '1.0',
'ver': '1.0',
'attrNames': ['age', 'sex', 'height', 'name']
}
}
schema_data = schema['data']
print_log('Schema data: ')
pprint.pprint(schema_data)
print_log('Schema: ')
pprint.pprint(schema)
schema_request = await ledger.build_schema_request(steward_did, json.dumps(schema_data))
print_log('Schema request: ')
pprint.pprint(json.loads(schema_request))
# 10.
print_log('\n10. Sending the SCHEMA request to the ledger\n')
schema_response = await ledger.sign_and_submit_request(pool_handle, wallet_handle, steward_did, schema_request)
print_log('Schema response:')
pprint.pprint(json.loads(schema_response))
# 11.
print_log('\n11. Creating and storing CRED DEFINITION using anoncreds as Trust Anchor, for the given Schema\n')
cred_def_tag = 'cred_def_tag'
cred_def_type = 'CL'
cred_def_config = json.dumps({"support_revocation": False})
(cred_def_id, cred_def_json) = await anoncreds.issuer_create_and_store_credential_def(wallet_handle, trust_anchor_did, json.dumps(schema_data),
cred_def_tag, cred_def_type, cred_def_config)
print_log('Credential definition: ')
pprint.pprint(json.loads(cred_def_json))
# 12.
print_log('\n12. Closing wallet and pool\n')
await wallet.close_wallet(wallet_handle)
await pool.close_pool_ledger(pool_handle)
# 13.
print_log('\n13. Deleting created wallet\n')
await wallet.delete_wallet(wallet_name, wallet_credentials)
# 14.
print_log('\n14. Deleting pool ledger config\n')
await pool.delete_pool_ledger_config(pool_name)
except IndyError as e:
print('Error occurred: %s' % e)
def main():
loop = asyncio.get_event_loop()
loop.run_until_complete(write_schema_and_cred_def())
loop.close()
if __name__ == '__main__':
main()
|
the-stack_106_28139 | import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import requests
import base64
import os
import json
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' GLOBAL VARS '''
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
# Well known folders shortcut in MS Graph API
# For more information: https://docs.microsoft.com/en-us/graph/api/resources/mailfolder?view=graph-rest-1.0
WELL_KNOWN_FOLDERS = {
'archive': 'archive',
'conversation history': 'conversationhistory',
'deleted items': 'deleteditems',
'drafts': 'drafts',
'inbox': 'inbox',
'junk email': 'junkemail',
'outbox': 'outbox',
'sent items': 'sentitems',
}
EMAIL_DATA_MAPPING = {
'id': 'ID',
'createdDateTime': 'CreatedTime',
'lastModifiedDateTime': 'ModifiedTime',
'receivedDateTime': 'ReceivedTime',
'sentDateTime': 'SentTime',
'subject': 'Subject',
'importance': 'Importance',
'conversationId': 'ConversationID',
'isRead': 'IsRead',
'isDraft': 'IsDraft',
'internetMessageId': 'MessageID'
}
''' HELPER FUNCTIONS '''
def add_second_to_str_date(date_string, seconds=1):
"""
Add seconds to date string.
Is used as workaround to Graph API bug, for more information go to:
https://stackoverflow.com/questions/35729273/office-365-graph-api-greater-than-filter-on-received-date
:type date_string: ``str``
:param date_string: Date string to add seconds
:type seconds: int
:param seconds: Seconds to add to date, by default is set to 1
:return: Date time string appended seconds
:rtype: ``str``
"""
added_result = datetime.strptime(date_string, DATE_FORMAT) + timedelta(seconds=seconds)
return datetime.strftime(added_result, DATE_FORMAT)
def upload_file(filename, content, attachments_list):
"""
Uploads file to War room.
:type filename: ``str``
:param filename: file name to upload
:type content: ``str``
:param content: Content of file to upload
:type attachments_list: ``list``
:param attachments_list: List of uploaded file data to War Room
"""
file_result = fileResult(filename, content)
if is_error(file_result):
demisto.error(file_result['Contents'])
raise Exception(file_result['Contents'])
attachments_list.append({
'path': file_result['FileID'],
'name': file_result['File']
})
def read_file_and_encode64(attach_id):
"""
Reads file that was uploaded to War Room and encodes it's content to base 64.
:type attach_id: ``str``
:param attach_id: The id of uploaded file to War Room
:return: Base 64 encoded data, size of the encoded data in bytes and uploaded file name.
:rtype: ``bytes``, ``int``, ``str``
"""
try:
file_info = demisto.getFilePath(attach_id)
with open(file_info['path'], 'rb') as file_data:
b64_encoded_data = base64.b64encode(file_data.read())
file_size = os.path.getsize(file_info['path'])
return b64_encoded_data, file_size, file_info['name']
except Exception as e:
raise Exception(f'Unable to read and decode in base 64 file with id {attach_id}', e)
def prepare_args(command, args):
"""
Receives command and prepares the arguments for future usage.
:type command: ``str``
:param command: Command to execute
:type args: ``dict``
:param args: Demisto args
:return: Prepared args
:rtype: ``dict``
"""
if command in ['msgraph-mail-create-draft', 'send-mail']:
if args.get('htmlBody', None):
email_body = args.get('htmlBody')
else:
email_body = args.get('body', '')
return {
'to_recipients': argToList(args.get('to')),
'cc_recipients': argToList(args.get('cc')),
'bcc_recipients': argToList(args.get('bcc')),
'subject': args.get('subject', ''),
'body': email_body,
'body_type': args.get('body_type', 'html'),
'flag': args.get('flag', 'notFlagged'),
'importance': args.get('importance', 'Low'),
'internet_message_headers': argToList(args.get('headers')),
'attach_ids': argToList(args.get('attach_ids')),
'attach_names': argToList(args.get('attach_names')),
'attach_cids': argToList((args.get('attach_cids'))),
'manual_attachments': args.get('manualAttachObj', [])
}
elif command == 'msgraph-mail-reply-to':
return {
'to_recipients': argToList(args.get('to')),
'message_id': args.get('message_id', ''),
'comment': args.get('comment')
}
return args
def prepare_outputs_for_reply_mail_command(reply, email_to, message_id):
reply.pop('attachments', None)
to_recipients, cc_recipients, bcc_recipients = build_recipients_human_readable(reply)
reply['toRecipients'] = to_recipients
reply['ccRecipients'] = cc_recipients
reply['bccRecipients'] = bcc_recipients
reply['ID'] = message_id
message_content = assign_params(**reply)
human_readable = tableToMarkdown(f'Replied message was successfully sent to {", ".join(email_to)} .',
message_content)
return CommandResults(
outputs_prefix="MicrosoftGraph",
readable_output=human_readable,
outputs_key_field="SentMail",
outputs=message_content,
)
def build_recipients_human_readable(message_content):
to_recipients = []
cc_recipients = []
bcc_recipients = []
for recipients_dict in message_content.get('toRecipients', {}):
to_recipients.append(recipients_dict.get('emailAddress', {}).get('address'))
for recipients_dict in message_content.get('ccRecipients', {}):
cc_recipients.append(recipients_dict.get('emailAddress', {}).get('address'))
for recipients_dict in message_content.get('bccRecipients', {}):
bcc_recipients.append(recipients_dict.get('emailAddress', {}).get('address'))
return to_recipients, cc_recipients, bcc_recipients
''' MICROSOFT GRAPH MAIL CLIENT '''
class MsGraphClient:
"""
Microsoft Graph Mail Client enables authorized access to a user's Office 365 mail data in a personal account.
"""
ITEM_ATTACHMENT = '#microsoft.graph.itemAttachment'
FILE_ATTACHMENT = '#microsoft.graph.fileAttachment'
CONTEXT_DRAFT_PATH = 'MicrosoftGraph.Draft(val.ID && val.ID == obj.ID)'
CONTEXT_SENT_EMAIL_PATH = 'MicrosoftGraph.Email'
def __init__(self, self_deployed, tenant_id, auth_and_token_url, enc_key, app_name, base_url, use_ssl, proxy,
ok_codes, refresh_token, mailbox_to_fetch, folder_to_fetch, first_fetch_interval, emails_fetch_limit,
auth_code, redirect_uri):
self.ms_client = MicrosoftClient(self_deployed=self_deployed, tenant_id=tenant_id, auth_id=auth_and_token_url,
enc_key=enc_key, app_name=app_name, base_url=base_url, verify=use_ssl,
proxy=proxy, ok_codes=ok_codes, refresh_token=refresh_token,
auth_code=auth_code, redirect_uri=redirect_uri,
grant_type=AUTHORIZATION_CODE)
self._mailbox_to_fetch = mailbox_to_fetch
self._folder_to_fetch = folder_to_fetch
self._first_fetch_interval = first_fetch_interval
self._emails_fetch_limit = emails_fetch_limit
def _get_root_folder_children(self, user_id):
"""
Get the root folder (Top Of Information Store) children collection.
:type user_id: ``str``
:param user_id: Mailbox address
:raises: ``Exception``: No folders found under Top Of Information Store folder
:return: List of root folder children
rtype: ``list``
"""
suffix_endpoint = f'users/{user_id}/mailFolders/msgfolderroot/childFolders?$top=250'
root_folder_children = self.ms_client.http_request('GET', suffix_endpoint).get('value', None)
if not root_folder_children:
raise Exception("No folders found under Top Of Information Store folder")
return root_folder_children
def _get_folder_children(self, user_id, folder_id):
"""
Get the folder collection under the specified folder.
:type user_id ``str``
:param user_id: Mailbox address
:type folder_id: ``str``
:param folder_id: Folder id
:return: List of folders that contain basic folder information
:rtype: ``list``
"""
suffix_endpoint = f'users/{user_id}/mailFolders/{folder_id}/childFolders?$top=250'
folder_children = self.ms_client.http_request('GET', suffix_endpoint).get('value', [])
return folder_children
def _get_folder_info(self, user_id, folder_id):
"""
Returns folder information.
:type user_id: ``str``
:param user_id: Mailbox address
:type folder_id: ``str``
:param folder_id: Folder id
:raises: ``Exception``: No info found for folder {folder id}
:return: Folder information if found
:rtype: ``dict``
"""
suffix_endpoint = f'users/{user_id}/mailFolders/{folder_id}'
folder_info = self.ms_client.http_request('GET', suffix_endpoint)
if not folder_info:
raise Exception(f'No info found for folder {folder_id}')
return folder_info
def _get_folder_by_path(self, user_id, folder_path):
"""
Searches and returns basic folder information.
Receives mailbox address and folder path (e.g Inbox/Phishing) and iteratively retrieves folders info until
reaches the last folder of a path. In case that such folder exist, basic information that includes folder id,
display name, parent folder id, child folders count, unread items count and total items count will be returned.
:type user_id: ``str``
:param user_id: Mailbox address
:type folder_path: ``str``
:param folder_path: Folder path of searched folder
:raises: ``Exception``: No such folder exist: {folder path}
:return: Folder information if found
:rtype: ``dict``
"""
folders_names = folder_path.replace('\\', '/').split('/') # replaced backslash in original folder path
# Optimization step in order to improve performance before iterating the folder path in order to skip API call
# for getting Top of Information Store children collection if possible.
if folders_names[0].lower() in WELL_KNOWN_FOLDERS:
# check if first folder in the path is known folder in order to skip not necessary api call
folder_id = WELL_KNOWN_FOLDERS[folders_names[0].lower()] # get folder shortcut instead of using folder id
if len(folders_names) == 1: # in such case the folder path consist only from one well known folder
return self._get_folder_info(user_id, folder_id)
else:
current_directory_level_folders = self._get_folder_children(user_id, folder_id)
folders_names.pop(0) # remove the first folder name from the path before iterating
else: # in such case the optimization step is skipped
# current_directory_level_folders will be set to folders that are under Top Of Information Store (root)
current_directory_level_folders = self._get_root_folder_children(user_id)
for index, folder_name in enumerate(folders_names):
# searching for folder in current_directory_level_folders list by display name or id
found_folder = [f for f in current_directory_level_folders if
f.get('displayName', '').lower() == folder_name.lower() or f.get('id', '') == folder_name]
if not found_folder: # no folder found, return error
raise Exception(f'No such folder exist: {folder_path}')
found_folder = found_folder[0] # found_folder will be list with only one element in such case
if index == len(folders_names) - 1: # reached the final folder in the path
# skip get folder children step in such case
return found_folder
# didn't reach the end of the loop, set the current_directory_level_folders to folder children
current_directory_level_folders = self._get_folder_children(user_id, found_folder.get('id', ''))
def _fetch_last_emails(self, folder_id, last_fetch, exclude_ids):
"""
Fetches emails from given folder that were modified after specific datetime (last_fetch).
All fields are fetched for given email using select=* clause,
for more information https://docs.microsoft.com/en-us/graph/query-parameters.
The email will be excluded from returned results if it's id is presented in exclude_ids.
Number of fetched emails is limited by _emails_fetch_limit parameter.
The filtering and ordering is done based on modified time.
:type folder_id: ``str``
:param folder_id: Folder id
:type last_fetch: ``dict``
:param last_fetch: Previous fetch data
:type exclude_ids: ``list``
:param exclude_ids: List of previous fetch email ids to exclude in current run
:return: Fetched emails and exclude ids list that contains the new ids of fetched emails
:rtype: ``list`` and ``list``
"""
target_modified_time = add_second_to_str_date(last_fetch) # workaround to Graph API bug
suffix_endpoint = f"/users/{self._mailbox_to_fetch}/mailFolders/{folder_id}/messages"
params = {
"$filter": f"receivedDateTime gt {target_modified_time}",
"$orderby": "receivedDateTime asc",
"$select": "*",
"$top": self._emails_fetch_limit
}
fetched_emails = self.ms_client.http_request(
'GET', suffix_endpoint, params=params
).get('value', [])[:self._emails_fetch_limit]
if exclude_ids: # removing emails in order to prevent duplicate incidents
fetched_emails = [email for email in fetched_emails if email.get('id') not in exclude_ids]
fetched_emails_ids = [email.get('id') for email in fetched_emails]
return fetched_emails, fetched_emails_ids
@staticmethod
def _get_next_run_time(fetched_emails, start_time):
"""
Returns received time of last email if exist, else utc time that was passed as start_time.
The elements in fetched emails are ordered by modified time in ascending order,
meaning the last element has the latest received time.
:type fetched_emails: ``list``
:param fetched_emails: List of fetched emails
:type start_time: ``str``
:param start_time: utc string of format Y-m-dTH:M:SZ
:return: Returns str date of format Y-m-dTH:M:SZ
:rtype: `str`
"""
next_run_time = fetched_emails[-1].get('receivedDateTime') if fetched_emails else start_time
return next_run_time
@staticmethod
def _get_recipient_address(email_address):
"""
Receives dict of form "emailAddress":{"name":"_", "address":"_"} and return the address
:type email_address: ``dict``
:param email_address: Recipient address
:return: The address of recipient
:rtype: ``str``
"""
return email_address.get('emailAddress', {}).get('address', '')
@staticmethod
def _parse_email_as_labels(parsed_email):
"""
Parses the email as incident labels.
:type parsed_email: ``dict``
:param parsed_email: The parsed email from which create incidents labels.
:return: Incident labels
:rtype: ``list``
"""
labels = []
for (key, value) in parsed_email.items():
if key == 'Headers':
headers_labels = [
{'type': 'Email/Header/{}'.format(header.get('name', '')), 'value': header.get('value', '')}
for header in value]
labels.extend(headers_labels)
elif key in ['To', 'Cc', 'Bcc']:
recipients_labels = [{'type': f'Email/{key}', 'value': recipient} for recipient in value]
labels.extend(recipients_labels)
else:
labels.append({'type': f'Email/{key}', 'value': f'{value}'})
return labels
@staticmethod
def _parse_item_as_dict(email):
"""
Parses basic data of email.
Additional info https://docs.microsoft.com/en-us/graph/api/resources/message?view=graph-rest-1.0
:type email: ``dict``
:param email: Email to parse
:return: Parsed email
:rtype: ``dict``
"""
parsed_email = {EMAIL_DATA_MAPPING[k]: v for (k, v) in email.items() if k in EMAIL_DATA_MAPPING}
parsed_email['Headers'] = email.get('internetMessageHeaders', [])
email_body = email.get('body', {}) or email.get('uniqueBody', {})
parsed_email['Body'] = email_body.get('content', '')
parsed_email['BodyType'] = email_body.get('contentType', '')
parsed_email['Sender'] = MsGraphClient._get_recipient_address(email.get('sender', {}))
parsed_email['From'] = MsGraphClient._get_recipient_address(email.get('from', {}))
parsed_email['To'] = list(map(MsGraphClient._get_recipient_address, email.get('toRecipients', [])))
parsed_email['Cc'] = list(map(MsGraphClient._get_recipient_address, email.get('ccRecipients', [])))
parsed_email['Bcc'] = list(map(MsGraphClient._get_recipient_address, email.get('bccRecipients', [])))
return parsed_email
@staticmethod
def _build_recipient_input(recipients):
"""
Builds legal recipients list.
:type recipients: ``list``
:param recipients: List of recipients
:return: List of email addresses recipients
:rtype: ``list``
"""
return [{'emailAddress': {'address': r}} for r in recipients] if recipients else []
@staticmethod
def _build_body_input(body, body_type):
"""
Builds message body input.
:type body: ``str``
:param body: The body of the message
:type body_type: The body type of the message, html or text.
:param body_type:
:return: The message body
:rtype ``dict``
"""
return {
"content": body,
"contentType": body_type
}
@staticmethod
def _build_flag_input(flag):
"""
Builds flag status of the message.
:type flag: ``str``
:param flag: The flag of the message
:return: The flag status of the message
:rtype ``dict``
"""
return {'flagStatus': flag}
@staticmethod
def _build_headers_input(internet_message_headers):
"""
Builds valid headers input.
:type internet_message_headers: ``list``
:param internet_message_headers: List of headers to build.
:return: List of transformed headers
:rtype: ``list``
"""
return [{'name': kv[0], 'value': kv[1]} for kv in (h.split(':') for h in internet_message_headers)]
@classmethod
def _build_attachments_input(cls, ids, attach_names=None, is_inline=False):
"""
Builds valid attachment input of the message. Is used for both in-line and regular attachments.
:type ids: ``list``
:param ids: List of uploaded to War Room files ids
:type attach_names: ``list``
:param attach_names: List of attachment name, not required.
:type is_inline: ``bool``
:param is_inline: Indicates whether the attachment is inline or not
:return: List of valid attachments of message
:rtype: ``list``
"""
provided_names = bool(attach_names)
if provided_names and len(ids) != len(attach_names):
raise Exception("Invalid input, attach_ids and attach_names lists should be the same length.")
file_attachments_result = []
# in case that no attach names where provided, ids are zipped together and the attach_name value is ignored
attachments = zip(ids, attach_names) if provided_names else zip(ids, ids)
for attach_id, attach_name in attachments:
b64_encoded_data, file_size, uploaded_file_name = read_file_and_encode64(attach_id)
attachment = {
'@odata.type': cls.FILE_ATTACHMENT,
'contentBytes': b64_encoded_data.decode('utf-8'),
'isInline': is_inline,
'name': attach_name if provided_names else uploaded_file_name,
'size': file_size,
'contentId': attach_id,
}
file_attachments_result.append(attachment)
return file_attachments_result
@staticmethod
def _build_file_attachments_input(attach_ids, attach_names, attach_cids, manual_attachments):
"""
Builds both inline and regular attachments.
:type attach_ids: ``list``
:param attach_ids: List of uploaded to War Room regular attachments to send
:type attach_names: ``list``
:param attach_names: List of regular attachments names to send
:type attach_cids: ``list``
:param attach_cids: List of uploaded to War Room inline attachments to send
:type manual_attachments: ``list``
:param manual_attachments: List of manual attachments reports to send
:return: List of both inline and regular attachments of the message
:rtype: ``list``
"""
regular_attachments = MsGraphClient._build_attachments_input(ids=attach_ids, attach_names=attach_names)
inline_attachments = MsGraphClient._build_attachments_input(ids=attach_cids, is_inline=True)
# collecting manual attachments info
manual_att_ids = [os.path.basename(att['RealFileName']) for att in manual_attachments if 'RealFileName' in att]
manual_att_names = [att['FileName'] for att in manual_attachments if 'FileName' in att]
manual_report_attachments = MsGraphClient._build_attachments_input(ids=manual_att_ids,
attach_names=manual_att_names)
return regular_attachments + inline_attachments + manual_report_attachments
@staticmethod
def _build_message(to_recipients, cc_recipients, bcc_recipients, subject, body, body_type, flag, importance,
internet_message_headers, attach_ids, attach_names, attach_cids, manual_attachments):
"""
Builds valid message dict.
For more information https://docs.microsoft.com/en-us/graph/api/resources/message?view=graph-rest-1.0
"""
message = {
'toRecipients': MsGraphClient._build_recipient_input(to_recipients),
'ccRecipients': MsGraphClient._build_recipient_input(cc_recipients),
'bccRecipients': MsGraphClient._build_recipient_input(bcc_recipients),
'subject': subject,
'body': MsGraphClient._build_body_input(body=body, body_type=body_type),
'bodyPreview': body[:255],
'importance': importance,
'flag': MsGraphClient._build_flag_input(flag),
'attachments': MsGraphClient._build_file_attachments_input(attach_ids, attach_names, attach_cids,
manual_attachments)
}
if internet_message_headers:
message['internetMessageHeaders'] = MsGraphClient._build_headers_input(internet_message_headers)
return message
@staticmethod
def _build_reply(to_recipients, comment):
"""
Builds the reply message that includes recipients to reply and reply message.
:type to_recipients: ``list``
:param to_recipients: The recipients list to reply
:type comment: ``str``
:param comment: The message to reply.
:return: Returns legal reply message.
:rtype: ``dict``
"""
return {
'message': {
'toRecipients': MsGraphClient._build_recipient_input(to_recipients)
},
'comment': comment
}
def _get_attachment_mime(self, message_id, attachment_id):
"""
Gets attachment mime.
:type attachment_id: ``str``
:param attachment_id: Attachment id to get MIME
:return: The MIME of the attachment
:rtype: ``str``
"""
suffix_endpoint = f'users/{self._mailbox_to_fetch}/messages/{message_id}/attachments/{attachment_id}/$value'
mime_content = self.ms_client.http_request('GET', suffix_endpoint, resp_type='text')
return mime_content
def _get_email_attachments(self, message_id):
"""
Get email attachments and upload to War Room.
:type message_id: ``str``
:param message_id: The email id to get attachments
:return: List of uploaded to War Room data, uploaded file path and name
:rtype: ``list``
"""
attachment_results = [] # type: ignore
suffix_endpoint = f'users/{self._mailbox_to_fetch}/messages/{message_id}/attachments'
attachments = self.ms_client.http_request('Get', suffix_endpoint).get('value', [])
for attachment in attachments:
attachment_type = attachment.get('@odata.type', '')
attachment_name = attachment.get('name', 'untitled_attachment')
if attachment_type == self.FILE_ATTACHMENT:
try:
attachment_content = base64.b64decode(attachment.get('contentBytes', ''))
except Exception as e: # skip the uploading file step
demisto.info(f"MS-Graph-Listener: failed in decoding base64 file attachment with error {str(e)}")
continue
elif attachment_type == self.ITEM_ATTACHMENT:
attachment_id = attachment.get('id', '')
attachment_content = self._get_attachment_mime(message_id, attachment_id)
attachment_name = f'{attachment_name}.eml'
# upload the item/file attachment to War Room
upload_file(attachment_name, attachment_content, attachment_results)
return attachment_results
def _parse_email_as_incident(self, email):
"""
Parses fetched emails as incidents.
:type email: ``dict``
:param email: Fetched email to parse
:return: Parsed email
:rtype: ``dict``
"""
parsed_email = MsGraphClient._parse_item_as_dict(email)
if email.get('hasAttachments', False): # handling attachments of fetched email
parsed_email['Attachments'] = self._get_email_attachments(message_id=email.get('id', ''))
parsed_email['Mailbox'] = self._mailbox_to_fetch
incident = {
'name': parsed_email['Subject'],
'details': email.get('bodyPreview', '') or parsed_email['Body'],
'labels': MsGraphClient._parse_email_as_labels(parsed_email),
'occurred': parsed_email['ModifiedTime'],
'attachment': parsed_email.get('Attachments', []),
'rawJSON': json.dumps(parsed_email)
}
return incident
@logger
def fetch_incidents(self, last_run):
"""
Fetches emails from office 365 mailbox and creates incidents of parsed emails.
:type last_run: ``dict``
:param last_run:
Previous fetch run data that holds the fetch time in utc Y-m-dTH:M:SZ format,
ids of fetched emails, id and path of folder to fetch incidents from
:return: Next run data and parsed fetched incidents
:rtype: ``dict`` and ``list``
"""
last_fetch = last_run.get('LAST_RUN_TIME')
exclude_ids = last_run.get('LAST_RUN_IDS', [])
last_run_folder_path = last_run.get('LAST_RUN_FOLDER_PATH')
folder_path_changed = (last_run_folder_path != self._folder_to_fetch)
if folder_path_changed:
# detected folder path change, get new folder id
folder_id = self._get_folder_by_path(self._mailbox_to_fetch, self._folder_to_fetch).get('id')
demisto.info("MS-Graph-Listener: detected file path change, ignored last run.")
else:
# LAST_RUN_FOLDER_ID is stored in order to avoid calling _get_folder_by_path method in each fetch
folder_id = last_run.get('LAST_RUN_FOLDER_ID')
if not last_fetch or folder_path_changed: # initialized fetch
last_fetch, _ = parse_date_range(self._first_fetch_interval, date_format=DATE_FORMAT, utc=True)
demisto.info(f"MS-Graph-Listener: initialize fetch and pull emails from date :{last_fetch}")
fetched_emails, fetched_emails_ids = self._fetch_last_emails(folder_id=folder_id, last_fetch=last_fetch,
exclude_ids=exclude_ids)
incidents = list(map(self._parse_email_as_incident, fetched_emails))
next_run_time = MsGraphClient._get_next_run_time(fetched_emails, last_fetch)
next_run = {
'LAST_RUN_TIME': next_run_time,
'LAST_RUN_IDS': fetched_emails_ids,
'LAST_RUN_FOLDER_ID': folder_id,
'LAST_RUN_FOLDER_PATH': self._folder_to_fetch
}
demisto.info(f"MS-Graph-Listener: fetched {len(incidents)} incidents")
return next_run, incidents
def create_draft(self, **kwargs):
"""
Creates draft message in user's mailbox, in draft folder.
"""
suffix_endpoint = f'/users/{self._mailbox_to_fetch}/messages'
draft = MsGraphClient._build_message(**kwargs)
created_draft = self.ms_client.http_request('POST', suffix_endpoint, json_data=draft)
parsed_draft = MsGraphClient._parse_item_as_dict(created_draft)
human_readable = tableToMarkdown(f'Created draft with id: {parsed_draft.get("ID", "")}', parsed_draft)
ec = {self.CONTEXT_DRAFT_PATH: parsed_draft}
return human_readable, ec, created_draft
def send_email(self, **kwargs):
"""
Sends email from user's mailbox, the sent message will appear in Sent Items folder
"""
suffix_endpoint = f'/users/{self._mailbox_to_fetch}/sendMail'
message_content = MsGraphClient._build_message(**kwargs)
self.ms_client.http_request('POST', suffix_endpoint, json_data={'message': message_content},
resp_type="text")
message_content.pop('attachments', None)
message_content.pop('internet_message_headers', None)
human_readable = tableToMarkdown('Email was sent successfully.', message_content)
ec = {self.CONTEXT_SENT_EMAIL_PATH: message_content}
return human_readable, ec
def reply_to(self, to_recipients, comment, message_id):
"""
Sends reply message to recipients.
:type to_recipients: ``list``
:param to_recipients: List of recipients to reply.
:type comment: ``str``
:param comment: The comment to send as a reply
:type message_id: ``str``
:param message_id: The message id to reply.
:return: String representation of markdown message regarding successful message submission.
rtype: ``str``
"""
suffix_endpoint = f'/users/{self._mailbox_to_fetch}/messages/{message_id}/reply'
reply = MsGraphClient._build_reply(to_recipients, comment)
self.ms_client.http_request('POST', suffix_endpoint, json_data=reply, resp_type="text")
return f'### Replied to: {", ".join(to_recipients)} with comment: {comment}'
def reply_mail(self, args):
email_to = argToList(args.get('to'))
email_from = args.get('from', self._mailbox_to_fetch)
message_id = args.get('inReplyTo')
email_body = args.get('body', "")
email_subject = args.get('subject', "")
email_subject = f'Re: {email_subject}'
attach_ids = argToList(args.get('attachIDs'))
email_cc = argToList(args.get('cc'))
email_bcc = argToList(args.get('bcc'))
html_body = args.get('htmlBody')
attach_names = argToList(args.get('attachNames'))
attach_cids = argToList(args.get('attachCIDs'))
message_body = html_body or email_body
suffix_endpoint = f'/users/{email_from}/messages/{message_id}/reply'
reply = self.build_message_to_reply(email_to, email_cc, email_bcc, email_subject, message_body,
attach_ids,
attach_names, attach_cids)
self.ms_client.http_request('POST', suffix_endpoint, json_data={'message': reply, 'comment': message_body},
resp_type="text")
return prepare_outputs_for_reply_mail_command(reply, email_to, message_id)
def send_draft(self, draft_id):
"""
Send draft message.
:type draft_id: ``str``
:param draft_id: Draft id to send.
:return: String representation of markdown message regarding successful message submission.
:rtype: ``str``
"""
suffix_endpoint = f'/users/{self._mailbox_to_fetch}/messages/{draft_id}/send'
self.ms_client.http_request('POST', suffix_endpoint, resp_type="text")
return f'### Draft with: {draft_id} id was sent successfully.'
@staticmethod
def build_message_to_reply(to_recipients, cc_recipients, bcc_recipients, subject, email_body, attach_ids,
attach_names, attach_cids):
"""
Builds a valid reply message dict.
For more information https://docs.microsoft.com/en-us/graph/api/resources/message?view=graph-rest-1.0
"""
return {
'toRecipients': MsGraphClient._build_recipient_input(to_recipients),
'ccRecipients': MsGraphClient._build_recipient_input(cc_recipients),
'bccRecipients': MsGraphClient._build_recipient_input(bcc_recipients),
'subject': subject,
'bodyPreview': email_body[:255],
'attachments': MsGraphClient._build_file_attachments_input(attach_ids, attach_names, attach_cids, [])
}
def test_connection(self):
"""
Basic connection test instead of test-module.
:return: Returns markdown string representation of success or Exception in case of login failure.
rtype: ``str`` or Exception
"""
suffix_endpoint = f'users/{self._mailbox_to_fetch}'
user_response = self.ms_client.http_request('GET', suffix_endpoint)
if user_response.get('mail') != '' and user_response.get('id') != '':
return_outputs('```✅ Success!```')
else:
raise Exception("Failed validating the user.")
def main():
""" COMMANDS MANAGER / SWITCH PANEL """
params = demisto.params()
self_deployed = params.get('self_deployed', False)
# params related to common instance configuration
base_url = 'https://graph.microsoft.com/v1.0/'
use_ssl = not params.get('insecure', False)
proxy = params.get('proxy', False)
ok_codes = (200, 201, 202)
refresh_token = params.get('refresh_token', '')
auth_and_token_url = params.get('auth_id', '')
enc_key = params.get('enc_key', '')
app_name = 'ms-graph-mail-listener'
# params related to mailbox to fetch incidents
mailbox_to_fetch = params.get('mailbox_to_fetch', '')
folder_to_fetch = params.get('folder_to_fetch', 'Inbox')
first_fetch_interval = params.get('first_fetch', '15 minutes')
emails_fetch_limit = int(params.get('fetch_limit', '50'))
# params related to self deployed
tenant_id = refresh_token if self_deployed else ''
# params related to oproxy
# In case the script is running for the first time, refresh token is retrieved from integration parameters,
# in other case it's retrieved from integration context.
refresh_token = get_integration_context().get('current_refresh_token') or refresh_token
client = MsGraphClient(self_deployed, tenant_id, auth_and_token_url, enc_key, app_name, base_url, use_ssl, proxy,
ok_codes, refresh_token, mailbox_to_fetch, folder_to_fetch, first_fetch_interval,
emails_fetch_limit, auth_code=params.get('auth_code', ''),
redirect_uri=params.get('redirect_uri', ''))
try:
command = demisto.command()
args = prepare_args(command, demisto.args())
LOG(f'Command being called is {command}')
if command == 'test-module':
# cannot use test module due to the lack of ability to set refresh token to integration context
raise Exception("Please use !msgraph-mail-test instead")
if command == 'msgraph-mail-test':
client.test_connection()
if command == 'fetch-incidents':
next_run, incidents = client.fetch_incidents(demisto.getLastRun())
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif command == 'msgraph-mail-create-draft':
human_readable, ec, raw_response = client.create_draft(**args)
return_outputs(human_readable, ec, raw_response)
elif command == 'msgraph-mail-reply-to':
human_readable = client.reply_to(**args) # pylint: disable=E1123
return_outputs(human_readable)
elif command == 'msgraph-mail-send-draft':
human_readable = client.send_draft(**args) # pylint: disable=E1123
return_outputs(human_readable)
elif command == 'send-mail':
human_readable, ec = client.send_email(**args)
return_outputs(human_readable, ec)
elif command == 'reply-mail':
return_results(client.reply_mail(args))
except Exception as e:
return_error(str(e))
from MicrosoftApiModule import * # noqa: E402
if __name__ in ['__main__', '__builtin__', 'builtins']:
main()
|
the-stack_106_28140 | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/folstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *fol_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("fol-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
|
the-stack_106_28141 | from pyspider.libs.base_handler import *
class Handler(BaseHandler):
crawl_config = {
}
@every(minutes=24 * 60)
def on_start(self):
self.crawl('http://scrapy.org/', callback=self.index_page)
@config(age=10 * 24 * 60 * 60)
def index_page(self, response):
for each in response.doc('a[href^="http"]').items():
self.crawl(each.attr.href, callback=self.detail_page)
def detail_page(self, response):
return {
"url": response.url,
"title": response.doc('title').text(),
}
|
the-stack_106_28144 | from thrift.protocol import TCompactProtocol
from thrift.transport import THttpClient
from ttypes import LoginRequest
import json, requests, LineService
nama = 'Aditmadzs'
Headers = {
'User-Agent': "Line/2.1.5",
'X-Line-Application': "CHROMEOS\t2.1.5\t"+nama+"\t11.2.5",
"x-lal": "ja-US_US",
}
def qrLogin():
Headers.update({'x-lpqs' : '/api/v4/TalkService.do'})
transport = THttpClient.THttpClient('https://gd2.line.naver.jp/api/v4/TalkService.do')
transport.setCustomHeaders(Headers)
protocol = TCompactProtocol.TCompactProtocol(transport)
client = LineService.Client(protocol)
qr = client.getAuthQrcode(keepLoggedIn=1, systemName=nama)
link = "line://au/q/" + qr.verifier
print(link)
Headers.update({"x-lpqs" : '/api/v4/TalkService.do', 'X-Line-Access': qr.verifier})
json.loads(requests.session().get('https://gd2.line.naver.jp/Q', headers=Headers).text)
Headers.update({'x-lpqs' : '/api/v4p/rs'})
transport = THttpClient.THttpClient('https://gd2.line.naver.jp/api/v4p/rs')
transport.setCustomHeaders(Headers)
protocol = TCompactProtocol.TCompactProtocol(transport)
client = LineService.Client(protocol)
req = LoginRequest()
req.type = 1
req.verifier = qr.verifier
req.e2eeVersion = 1
res = client.loginZ(req)
print('\n')
print(res.authToken)
qrLogin()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.