content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
import os
import random
import functools
import six
import numpy as np
import torch
from torch import nn
from torch.utils.data.distributed import DistributedSampler
from aw_nas import utils
from aw_nas.final.base import FinalTrainer
from aw_nas.final.bnn_model import BNNGenotypeModel
from aw_nas.utils.common_utils import nullcontext
from aw_nas.utils.exception import expect
from aw_nas.utils import DataParallel
from aw_nas.utils import DistributedDataParallel
from aw_nas.utils.torch_utils import calib_bn, GroupSampler, DistributedGroupSampler
from aw_nas.utils.parallel_utils import get_dist_info
try:
from torch.nn import SyncBatchNorm
convert_sync_bn = SyncBatchNorm.convert_sync_batchnorm
except ImportError:
utils.getLogger("cnn_trainer").warn(
"Import convert_sync_bn failed! SyncBatchNorm might not work!")
convert_sync_bn = lambda m: m
def _warmup_update_lr(optimizer, epoch, init_lr, warmup_epochs, warmup_ratio=0.0):
"""
update learning rate of optimizers
"""
lr = (init_lr - warmup_ratio) * epoch / warmup_epochs + warmup_ratio
for param_group in optimizer.param_groups:
param_group["lr"] = lr
return lr
def worker_init_fn(worker_id, num_workers, rank, seed):
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
class CNNFinalTrainer(FinalTrainer): #pylint: disable=too-many-instance-attributes
NAME = "cnn_trainer"
def __init__(self, model, dataset, device, gpus, objective,#pylint: disable=dangerous-default-value
multiprocess=False,
epochs=600, batch_size=96,
optimizer_type="SGD", optimizer_kwargs=None,
learning_rate=0.025, momentum=0.9,
warmup_epochs=0,
optimizer_scheduler={
"type": "CosineAnnealingLR",
"T_max": 600,
"eta_min": 0.001
},
weight_decay=3e-4, no_bias_decay=False,
grad_clip=5.0,
auxiliary_head=False, auxiliary_weight=0.4,
add_regularization=False,
save_as_state_dict=False,
workers_per_queue=2,
eval_no_grad=True,
eval_every=1,
eval_batch_size=1,
calib_bn_setup=False, # for OFA final model
seed=None,
schedule_cfg=None):
super(CNNFinalTrainer, self).__init__(schedule_cfg)
self.model = model
self.parallel_model = None
self.dataset = dataset
self.device = device
self.gpus = gpus
self.multiprocess = multiprocess
self.objective = objective
self._perf_func = self.objective.get_perfs
self._perf_names = self.objective.perf_names()
self._obj_loss = self.objective.get_loss
self.epochs = epochs
self.warmup_epochs = warmup_epochs
self.optimizer_type = optimizer_type
self.optimizer_kwargs = optimizer_kwargs
self.learning_rate = learning_rate
self.grad_clip = grad_clip
self.auxiliary_head = auxiliary_head
self.auxiliary_weight = auxiliary_weight
self.add_regularization = add_regularization
self.save_as_state_dict = save_as_state_dict
self.eval_no_grad = eval_no_grad
self.eval_every = eval_every
self.calib_bn_setup = calib_bn_setup
# for optimizer
self.weight_decay = weight_decay
self.no_bias_decay = no_bias_decay
self.learning_rate = learning_rate
self.momentum = momentum
self.optimizer_scheduler_cfg = optimizer_scheduler
self._criterion = nn.CrossEntropyLoss().to(self.device)
_splits = self.dataset.splits()
train_kwargs = getattr(_splits["train"], "kwargs", {})
test_kwargs = getattr(_splits["test"], "kwargs", train_kwargs)
"""
GroupSampler is needed when `keep_ratio` in dataset is set True.
It makes two group of images: aspect ratio > 1 , and aspect ratio < 1.
`shuffle` is invalid when using GroupSampler because it cannot
guarantee the original order of images.
"""
group = train_kwargs.pop("group_sample", False)
test_kwargs["shuffle"] = False
if self.multiprocess:
sampler = DistributedGroupSampler(_splits["train"], None,
batch_size) if group \
else DistributedSampler(_splits["train"], shuffle=True)
test_kwargs["sampler"] = DistributedSampler(_splits["test"],
shuffle=False)
else:
sampler = GroupSampler(_splits["train"], None, batch_size) if group \
else None
if sampler is None:
train_kwargs["shuffle"] = True
else:
train_kwargs.pop("shuffle", None)
train_kwargs["sampler"] = sampler
rank, world_size = get_dist_info()
init_fn = functools.partial(worker_init_fn, num_workers=workers_per_queue, rank=rank,
seed=seed) if seed is not None else None
self.train_queue = torch.utils.data.DataLoader(
_splits["train"], batch_size=batch_size, pin_memory=False,
num_workers=workers_per_queue,
worker_init_fn=init_fn,
**train_kwargs)
self.valid_queue = torch.utils.data.DataLoader(
_splits["test"], batch_size=eval_batch_size, pin_memory=False,
num_workers=workers_per_queue, **test_kwargs)
if self.calib_bn_setup:
self.model = calib_bn(self.model, self.train_queue)
# optimizer and scheduler is called in `trainer.setup` call
self.optimizer = None
self.scheduler = None
# states of the trainer
self.last_epoch = 0
self.epoch = 0
self.save_every = None
self.report_every = None
self.train_dir = None
self._is_setup = False
def setup(self, load=None, load_state_dict=None,
save_every=None, train_dir=None, report_every=50):
expect(not (load is not None and load_state_dict is not None),
"`load` and `load_state_dict` cannot be passed simultaneously.")
if load is not None:
self.load(load)
else:
assert self.model is not None
if load_state_dict is not None:
self._load_state_dict(load_state_dict)
self.logger.info("param size = {} M".format( \
utils.count_parameters(
self.model,
count_binary=isinstance(self.model, BNNGenotypeModel))/1.e6))
if self.model is not None:
self._parallelize()
self.optimizer = self._init_optimizer()
self.scheduler = self._init_scheduler(self.optimizer, self.optimizer_scheduler_cfg)
self.save_every = save_every
self.train_dir = train_dir
self.report_every = report_every
expect(self.save_every is None or self.train_dir is not None,
"when `save_every` is not None, make sure `train_dir` is not None")
self._is_setup = True
def save(self, path):
rank = (os.environ.get("LOCAL_RANK"))
if rank is not None and rank != '0':
return
path = utils.makedir(path)
if self.save_as_state_dict:
torch.save(self.model.state_dict(), os.path.join(path, "model_state.pt"))
else:
# save the model directly instead of the state_dict,
# so that it can be loaded and run directly, without specificy configuration
torch.save(self.model, os.path.join(path, "model.pt"))
torch.save({
"epoch": self.epoch,
"optimizer":self.optimizer.state_dict()
}, os.path.join(path, "optimizer.pt"))
if self.scheduler is not None:
torch.save(self.scheduler.state_dict(), os.path.join(path, "scheduler.pt"))
self.logger.info("Saved checkpoint to %s", path)
def load(self, path):
# load the model
m_path = os.path.join(path, "model.pt") if os.path.isdir(path) else path
if not os.path.exists(m_path):
m_path = os.path.join(path, "model_state.pt")
self._load_state_dict(m_path)
else:
self.model = torch.load(m_path, map_location=torch.device("cpu"))
self.model.to(self.device)
self._parallelize()
log_strs = ["model from {}".format(m_path)]
# init the optimzier/scheduler
self.optimizer = self._init_optimizer()
self.scheduler = self._init_scheduler(self.optimizer, self.optimizer_scheduler_cfg)
o_path = os.path.join(path, "optimizer.pt") if os.path.isdir(path) else None
if o_path and os.path.exists(o_path):
checkpoint = torch.load(o_path, map_location=torch.device("cpu"))
self.optimizer.load_state_dict(checkpoint["optimizer"])
log_strs.append("optimizer from {}".format(o_path))
self.last_epoch = checkpoint["epoch"]
# load the optimizer/scheduler
if self.scheduler is not None:
s_path = os.path.join(path, "scheduler.pt") if os.path.isdir(path) else None
if s_path and os.path.exists(s_path):
self.scheduler.load_state_dict(torch.load(s_path, map_location=torch.device("cpu")))
log_strs.append("scheduler from {}".format(s_path))
self.logger.info("param size = %f M",
utils.count_parameters(self.model) / 1.e6)
self.logger.info("Loaded checkpoint from %s: %s", path, ", ".join(log_strs))
self.logger.info("Last epoch: %d", self.last_epoch)
def train(self):
if len(self.gpus) >= 2:
self._forward_once_for_flops(self.model)
# save the model.log
if self.train_dir is not None:
with open(os.path.join(self.train_dir, "model.log"),"w") as f:
f.write(str(self.model))
for epoch in range(self.last_epoch+1, self.epochs+1):
self.epoch = epoch
self.on_epoch_start(epoch)
if epoch < self.warmup_epochs:
_warmup_update_lr(self.optimizer, epoch, self.learning_rate, self.warmup_epochs)
else:
if self.scheduler is not None and epoch != 1:
self.scheduler.step()
self.logger.info("epoch %d lr %e", epoch, self.optimizer.param_groups[0]["lr"])
train_acc, train_obj = self.train_epoch(self.train_queue, self.parallel_model,
self._criterion, self.optimizer,
self.device, epoch)
self.logger.info("train_acc %f ; train_obj %f", train_acc, train_obj)
if self.save_every and epoch % self.save_every == 0:
path = os.path.join(self.train_dir, str(epoch))
self.save(path)
if epoch % self.eval_every == 0:
valid_acc, valid_obj, valid_perfs = self.infer_epoch(self.valid_queue,
self.parallel_model,
self._criterion, self.device)
self.logger.info("valid_acc %f ; valid_obj %f ; valid performances: %s",
valid_acc, valid_obj,
"; ".join(
["{}: {:.3f}".format(n, v) for n, v in valid_perfs.items()]))
self.on_epoch_end(epoch)
self.save(os.path.join(self.train_dir, "final"))
def evaluate_split(self, split):
if len(self.gpus) >= 2:
self._forward_once_for_flops(self.model)
assert split in {"train", "test"}
if split == "test":
queue = self.valid_queue
else:
queue = self.train_queue
acc, obj, perfs = self.infer_epoch(queue, self.parallel_model,
self._criterion, self.device)
self.logger.info("acc %f ; obj %f ; performance: %s", acc, obj,
"; ".join(
["{}: {:.3f}".format(n, v) for n, v in perfs.items()]))
return acc, obj
@classmethod
def supported_data_types(cls):
return ["image"]
def _load_state_dict(self, path):
# load state dict
checkpoint = torch.load(path, map_location=torch.device("cpu"))
extra_keys = set(checkpoint.keys()).difference(set(self.model.state_dict().keys()))
if extra_keys:
self.logger.error("%d extra keys in checkpoint! "
"Make sure the genotype match", len(extra_keys))
missing_keys = {key for key in set(self.model.state_dict().keys())\
.difference(checkpoint.keys()) \
if "auxiliary" not in key}
if missing_keys:
self.logger.error(("{} missing keys will not be loaded! Check your genotype, "
"This should be due to you're using the state dict dumped by"
" `awnas eval-arch --save-state-dict` in an old version, "
"and your genotype actually skip some "
"cells, which might means, many parameters of your "
"sub-network is not actually active, "
"and this genotype might not be so effective.")
.format(len(missing_keys)))
self.logger.error(str(missing_keys))
self.logger.info(self.model.load_state_dict(checkpoint, strict=False))
def _parallelize(self):
if self.multiprocess:
self.model = convert_sync_bn(self.model).to(self.device)
self.parallel_model = DistributedDataParallel(
self.model, self.gpus, broadcast_buffers=False, find_unused_parameters=True)
elif len(self.gpus) >= 2:
self.parallel_model = DataParallel(self.model, self.gpus).to(self.device)
else:
self.parallel_model = self.model
def _init_optimizer(self):
group_weight = []
group_bias = []
for name, param in self.model.named_parameters():
if "bias" in name:
group_bias.append(param)
else:
group_weight.append(param)
assert len(list(self.model.parameters())) == len(group_weight) + len(group_bias)
optim_cls = getattr(torch.optim, self.optimizer_type)
if self.optimizer_type == "Adam":
optim_kwargs = {
"lr": self.learning_rate,
"weight_decay": self.weight_decay
}
else:
optim_kwargs = {
"lr": self.learning_rate,
"momentum": self.momentum,
"weight_decay": self.weight_decay
}
optim_kwargs.update(self.optimizer_kwargs or {})
optimizer = optim_cls(
[{"params": group_weight},
{"params": group_bias,
"weight_decay": 0 if self.no_bias_decay else self.weight_decay}],
**optim_kwargs)
return optimizer
@staticmethod
def _init_scheduler(optimizer, cfg):
if cfg:
cfg = {k:v for k, v in six.iteritems(cfg)}
sch_cls = utils.get_scheduler_cls(cfg.pop("type"))
return sch_cls(optimizer, **cfg)
return None
def train_epoch(self, train_queue, model, criterion, optimizer, device, epoch):
expect(self._is_setup, "trainer.setup should be called first")
objs = utils.AverageMeter()
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
model.train()
for step, (inputs, target) in enumerate(train_queue):
inputs = inputs.to(device)
target = target.to(device)
optimizer.zero_grad()
if self.auxiliary_head: # assume model return two logits in train mode
logits, logits_aux = model(inputs)
loss = self._obj_loss(inputs, logits, target, model,
add_evaluator_regularization=self.add_regularization)
loss_aux = criterion(logits_aux, target)
loss += self.auxiliary_weight * loss_aux
else:
logits = model(inputs)
loss = self._obj_loss(inputs, logits, target, model,
add_evaluator_regularization=self.add_regularization)
#torch.distributed.all_reduce(loss, op=torch.distributed.ReduceOp.SUM)
loss.backward()
if isinstance(self.grad_clip, (int, float)) and self.grad_clip > 0:
nn.utils.clip_grad_norm_(model.parameters(), self.grad_clip)
optimizer.step()
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = inputs.size(0)
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
del loss
if step % self.report_every == 0:
self.logger.info("train %03d %.3f; %.2f%%; %.2f%%",
step, objs.avg, top1.avg, top5.avg)
return top1.avg, objs.avg
def infer_epoch(self, valid_queue, model, criterion, device):
expect(self._is_setup, "trainer.setup should be called first")
objs = utils.AverageMeter()
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
objective_perfs = utils.OrderedStats()
all_perfs = []
model.eval()
context = torch.no_grad if self.eval_no_grad else nullcontext
with context():
for step, (inputs, target) in enumerate(valid_queue):
inputs = inputs.to(device)
target = target.to(device)
logits = model(inputs)
loss = criterion(logits, target)
perfs = self._perf_func(inputs, logits, target, model)
all_perfs.append(perfs)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = inputs.size(0)
# objective_perfs.update(dict(zip(self._perf_names, perfs)), n=n)
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
del loss
if step % self.report_every == 0:
all_perfs_by_name = list(zip(*all_perfs))
# support use objective aggregate fn, for stat method other than mean
# e.g., adversarial distance median; detection mAP (see det_trainer.py)
obj_perfs = {
k: self.objective.aggregate_fn(k, False)(v)
for k, v in zip(self._perf_names, all_perfs_by_name)
}
self.logger.info("valid %03d %e %f %f %s", step, objs.avg, top1.avg, top5.avg,
"; ".join(["{}: {:.3f}".format(perf_n, v) \
# for perf_n, v in objective_perfs.avgs().items()]))
for perf_n, v in obj_perfs.items()]))
all_perfs_by_name = list(zip(*all_perfs))
obj_perfs = {
k: self.objective.aggregate_fn(k, False)(v)
for k, v in zip(self._perf_names, all_perfs_by_name)
}
return top1.avg, objs.avg, obj_perfs
def on_epoch_start(self, epoch):
super(CNNFinalTrainer, self).on_epoch_start(epoch)
self.model.on_epoch_start(epoch)
self.objective.on_epoch_start(epoch)
def on_epoch_end(self, epoch):
super(CNNFinalTrainer, self).on_epoch_end(epoch)
self.model.on_epoch_end(epoch)
self.objective.on_epoch_end(epoch)
def _forward_once_for_flops(self, model):
# forward the model once to get the flops calculated
self.logger.info("Training parallel: Forward one batch for the flops information")
inputs, _ = next(iter(self.train_queue))
model(inputs.to(self.device))
| 42.495851 | 103 | 0.583362 | [
"MIT"
] | Harald-R/aw_nas | aw_nas/final/cnn_trainer.py | 20,483 | Python |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import asyncio
import dataclasses
import enum
import json
import logging
import os
import subprocess
import tempfile
import traceback
from pathlib import Path
from typing import (
TypeVar,
Type,
Union,
Optional,
AsyncIterator,
Set,
List,
Sequence,
Dict,
Callable,
)
import dataclasses_json
from libcst.metadata import CodeRange
from .. import (
log,
json_rpc,
error,
version,
command_arguments,
configuration as configuration_module,
statistics_logger,
timer,
)
from ..coverage_collector import coverage_collector_for_module, CoveredAndUncoveredLines
from . import (
backend_arguments,
commands,
language_server_protocol as lsp,
server_connection,
async_server_connection as connection,
start,
incremental,
location_lookup,
query,
server_event,
statistics,
)
LOG: logging.Logger = logging.getLogger(__name__)
COMMAND_NAME = "persistent"
CONSECUTIVE_START_ATTEMPT_THRESHOLD: int = 5
class LSPEvent(enum.Enum):
INITIALIZED = "initialized"
NOT_INITIALIZED = "not initialized"
CONNECTED = "connected"
NOT_CONNECTED = "not connected"
NOT_CONFIGURED = "not configured"
DISCONNECTED = "disconnected"
SUSPENDED = "suspended"
STOPPED = "stopped"
COVERED = "covered"
def _log_lsp_event(
remote_logging: Optional[backend_arguments.RemoteLogging],
event: LSPEvent,
integers: Optional[Dict[str, int]] = None,
normals: Optional[Dict[str, Optional[str]]] = None,
) -> None:
if remote_logging is not None:
logger = remote_logging.logger
if logger is not None:
log_identifier = remote_logging.identifier
statistics_logger.log(
category=statistics_logger.LoggerCategory.LSP_EVENTS,
logger=logger,
integers=integers,
normals={
**(normals or {}),
"event": event.value,
"pyre client version": version.__version__,
**(
{"identifier": log_identifier}
if log_identifier is not None
else {}
),
},
)
@dataclasses.dataclass(frozen=True)
class PyreServerStartOptions:
binary: str
server_identifier: str
start_arguments: start.Arguments
ide_features: Optional[configuration_module.IdeFeatures]
strict_default: bool
excludes: Sequence[str]
@staticmethod
def read_from(
command_argument: command_arguments.CommandArguments, base_directory: Path
) -> "PyreServerStartOptions":
configuration = configuration_module.create_configuration(
command_argument, base_directory
)
binary_location = configuration.get_binary_respecting_override()
if binary_location is None:
raise configuration_module.InvalidConfiguration(
"Cannot locate a Pyre binary to run."
)
start_arguments = start.create_server_arguments(
configuration,
command_arguments.StartArguments(
changed_files_path=command_argument.changed_files_path,
debug=command_argument.debug,
enable_memory_profiling=command_argument.enable_memory_profiling,
enable_profiling=command_argument.enable_profiling,
load_initial_state_from=command_argument.load_initial_state_from,
log_identifier=command_argument.log_identifier,
logging_sections=command_argument.logging_sections,
no_saved_state=command_argument.no_saved_state,
no_watchman=False,
noninteractive=command_argument.noninteractive,
save_initial_state_to=command_argument.save_initial_state_to,
saved_state_project=command_argument.saved_state_project,
sequential=command_argument.sequential,
show_error_traces=command_argument.show_error_traces,
store_type_check_resolution=False,
terminal=False,
wait_on_initialization=True,
),
)
if start_arguments.watchman_root is None:
raise commands.ClientException(
"Cannot locate a `watchman` root. Pyre's server will not function "
"properly."
)
return PyreServerStartOptions(
binary=binary_location,
server_identifier=start.get_server_identifier(configuration),
start_arguments=start_arguments,
ide_features=configuration.ide_features,
strict_default=configuration.strict,
excludes=configuration.excludes,
)
PyreServerStartOptionsReader = Callable[[], PyreServerStartOptions]
def read_server_start_options(
server_start_options_reader: PyreServerStartOptionsReader,
remote_logging: Optional[backend_arguments.RemoteLogging],
) -> "PyreServerStartOptions":
try:
LOG.info("Reading Pyre server configurations...")
return server_start_options_reader()
except Exception:
_log_lsp_event(
remote_logging=remote_logging,
event=LSPEvent.NOT_CONFIGURED,
normals={
"exception": traceback.format_exc(),
},
)
raise
def process_initialize_request(
parameters: lsp.InitializeParameters,
ide_features: Optional[configuration_module.IdeFeatures] = None,
) -> lsp.InitializeResult:
LOG.info(
f"Received initialization request from {parameters.client_info} "
f" (pid = {parameters.process_id})"
)
server_info = lsp.Info(name="pyre", version=version.__version__)
server_capabilities = lsp.ServerCapabilities(
text_document_sync=lsp.TextDocumentSyncOptions(
open_close=True,
change=lsp.TextDocumentSyncKind.NONE,
save=lsp.SaveOptions(include_text=False),
),
**(
{"hover_provider": ide_features.is_hover_enabled()}
if ide_features is not None
else {}
),
)
return lsp.InitializeResult(
capabilities=server_capabilities, server_info=server_info
)
@dataclasses.dataclass(frozen=True)
class InitializationSuccess:
client_capabilities: lsp.ClientCapabilities
client_info: Optional[lsp.Info] = None
initialization_options: Optional[lsp.InitializationOptions] = None
@dataclasses.dataclass(frozen=True)
class InitializationFailure:
exception: Optional[json_rpc.JSONRPCException] = None
@dataclasses.dataclass(frozen=True)
class InitializationExit:
pass
async def try_initialize(
input_channel: connection.TextReader,
output_channel: connection.TextWriter,
server_start_options_reader: PyreServerStartOptionsReader,
) -> Union[InitializationSuccess, InitializationFailure, InitializationExit]:
"""
Read an LSP message from the input channel and try to initialize an LSP
server. Also write to the output channel with proper response if the input
message is a request instead of a notification.
The function can return one of three possibilities:
- If the initialization succeeds, return `InitializationSuccess`.
- If the initialization fails, return `InitializationFailure`. There could
be many reasons for the failure: The incoming LSP message may not be an
initiailization request. The incoming LSP request may be malformed. Or the
client may not complete the handshake by sending back an `initialized` request.
- If an exit notification is received, return `InitializationExit`. The LSP
spec allows exiting a server without a preceding initialize request.
"""
request = None
try:
request = await lsp.read_json_rpc(input_channel)
LOG.debug(f"Received pre-initialization LSP request: {request}")
request_id = request.id
if request_id is None:
return (
InitializationExit()
if request.method == "exit"
else InitializationFailure()
)
if request.method != "initialize":
raise lsp.ServerNotInitializedError("An initialize request is needed.")
request_parameters = request.parameters
if request_parameters is None:
raise lsp.ServerNotInitializedError(
"Missing parameters for initialize request."
)
initialize_parameters = lsp.InitializeParameters.from_json_rpc_parameters(
request_parameters
)
try:
server_start_options = read_server_start_options(
server_start_options_reader, remote_logging=None
)
except configuration_module.InvalidConfiguration as e:
raise lsp.ServerNotInitializedError(str(e))
result = process_initialize_request(
initialize_parameters, server_start_options.ide_features
)
await lsp.write_json_rpc(
output_channel,
# pyre-fixme[16]: Pyre doesn't understand `dataclasses_json`
json_rpc.SuccessResponse(id=request_id, result=result.to_dict()),
)
initialized_notification = await lsp.read_json_rpc(input_channel)
if initialized_notification.method == "shutdown":
await _wait_for_exit(input_channel, output_channel)
return InitializationExit()
elif initialized_notification.method != "initialized":
actual_message = json.dumps(initialized_notification.json())
raise lsp.ServerNotInitializedError(
"Failed to receive an `initialized` request from client. "
+ f"Got {log.truncate(actual_message, 100)}"
)
return InitializationSuccess(
client_capabilities=initialize_parameters.capabilities,
client_info=initialize_parameters.client_info,
initialization_options=initialize_parameters.initialization_options,
)
except json_rpc.JSONRPCException as json_rpc_error:
await lsp.write_json_rpc(
output_channel,
json_rpc.ErrorResponse(
id=request.id if request is not None else None,
code=json_rpc_error.error_code(),
message=str(json_rpc_error),
data={"retry": False},
),
)
return InitializationFailure(exception=json_rpc_error)
@connection.asynccontextmanager
async def _read_lsp_request(
input_channel: connection.TextReader, output_channel: connection.TextWriter
) -> AsyncIterator[json_rpc.Request]:
message = None
try:
message = await lsp.read_json_rpc(input_channel)
yield message
except json_rpc.JSONRPCException as json_rpc_error:
await lsp.write_json_rpc(
output_channel,
json_rpc.ErrorResponse(
# pyre-ignore[16] - refinement doesn't work here for some reason
id=message.id if message is not None else None,
code=json_rpc_error.error_code(),
message=str(json_rpc_error),
),
)
async def _wait_for_exit(
input_channel: connection.TextReader, output_channel: connection.TextWriter
) -> None:
"""
Wait for an LSP "exit" request from the `input_channel`. This is mostly useful
when the LSP server has received a "shutdown" request, in which case the LSP
specification dictates that only "exit" can be sent from the client side.
If a non-exit LSP request is received, drop it and keep waiting on another
"exit" request.
"""
while True:
async with _read_lsp_request(input_channel, output_channel) as request:
if request.method == "exit":
return
else:
raise json_rpc.InvalidRequestError(
f"Only exit requests are accepted after shutdown. Got {request}."
)
async def _publish_diagnostics(
output_channel: connection.TextWriter,
path: Path,
diagnostics: Sequence[lsp.Diagnostic],
) -> None:
LOG.debug(f"Publish diagnostics for {path}: {diagnostics}")
await lsp.write_json_rpc(
output_channel,
json_rpc.Request(
method="textDocument/publishDiagnostics",
parameters=json_rpc.ByNameParameters(
{
"uri": lsp.DocumentUri.from_file_path(path).unparse(),
"diagnostics": [
# pyre-fixme[16]: Pyre doesn't understand `dataclasses_json`
diagnostic.to_dict()
for diagnostic in diagnostics
],
}
),
),
)
@connection.asynccontextmanager
async def _read_server_response(
server_input_channel: connection.TextReader,
) -> AsyncIterator[str]:
try:
raw_response = await server_input_channel.read_until(separator="\n")
yield raw_response
except incremental.InvalidServerResponse as error:
LOG.error(f"Pyre server returns invalid response: {error}")
TypeInfo = str
LocationTypeLookup = location_lookup.LocationLookup[TypeInfo]
@dataclasses.dataclass(frozen=True)
class TypeCoverageQuery:
id: Union[int, str, None]
path: Path
@dataclasses.dataclass(frozen=True)
class TypesQuery:
path: Path
@dataclasses.dataclass
class PyreQueryState:
# Shared mutable state.
path_to_location_type_lookup: Dict[Path, LocationTypeLookup] = dataclasses.field(
default_factory=dict
)
# Queue of queries.
queries: "asyncio.Queue[Union[TypeCoverageQuery, TypesQuery]]" = dataclasses.field(
default_factory=asyncio.Queue
)
def hover_response_for_position(
self, path: Path, lsp_position: lsp.LspPosition
) -> lsp.HoverResponse:
pyre_position = lsp_position.to_pyre_position()
LOG.info(f"Looking up type for path {path} and position {pyre_position}...")
location_type_lookup = self.path_to_location_type_lookup.get(path)
if location_type_lookup is None:
LOG.info(f"Did not find any type info for path {path}.")
return lsp.HoverResponse.empty()
type_info = location_type_lookup[pyre_position]
if type_info is None:
LOG.info(f"Did not find a type for position {pyre_position}.")
return lsp.HoverResponse.empty()
return lsp.HoverResponse(contents=f"```{type_info}```")
@dataclasses_json.dataclass_json(
letter_case=dataclasses_json.LetterCase.CAMEL,
undefined=dataclasses_json.Undefined.EXCLUDE,
)
@dataclasses.dataclass(frozen=True)
class LineColumn:
line: int
column: int
def to_position(self) -> lsp.Position:
return lsp.Position(line=self.line, character=self.column)
@dataclasses_json.dataclass_json(
letter_case=dataclasses_json.LetterCase.CAMEL,
undefined=dataclasses_json.Undefined.EXCLUDE,
)
@dataclasses.dataclass(frozen=True)
class LocationInfo:
start: LineColumn
stop: LineColumn
@dataclasses_json.dataclass_json(
letter_case=dataclasses_json.LetterCase.CAMEL,
undefined=dataclasses_json.Undefined.EXCLUDE,
)
@dataclasses.dataclass(frozen=True)
class LocationAnnotation:
location: LocationInfo
annotation: str
@dataclasses_json.dataclass_json(
letter_case=dataclasses_json.LetterCase.CAMEL,
undefined=dataclasses_json.Undefined.EXCLUDE,
)
@dataclasses.dataclass(frozen=True)
class PathTypeInfo:
path: str
types: List[LocationAnnotation]
def get_location_type_lookup(self) -> LocationTypeLookup:
return LocationTypeLookup(
[
(
location_annotation.location.start.to_position(),
location_annotation.location.stop.to_position(),
location_annotation.annotation,
)
for location_annotation in self.types
]
)
async def _send_query_request(
output_channel: connection.TextWriter, query_text: str
) -> None:
query_message = json.dumps(["Query", query_text])
LOG.debug(f"Sending `{log.truncate(query_message, 400)}`")
await output_channel.write(f"{query_message}\n")
async def _receive_query_response(
input_channel: connection.TextReader,
) -> Optional[query.Response]:
async with _read_server_response(input_channel) as raw_response:
LOG.debug(f"Received `{log.truncate(raw_response, 400)}`")
try:
return query.parse_query_response(raw_response)
except query.InvalidQueryResponse as exception:
LOG.info(
f"Failed to parse json {raw_response} due to exception: {exception}"
)
return None
@dataclasses_json.dataclass_json(
letter_case=dataclasses_json.LetterCase.CAMEL,
undefined=dataclasses_json.Undefined.EXCLUDE,
)
@dataclasses.dataclass(frozen=True)
class QueryTypesResponse:
response: List[PathTypeInfo]
@dataclasses_json.dataclass_json(
letter_case=dataclasses_json.LetterCase.CAMEL,
undefined=dataclasses_json.Undefined.EXCLUDE,
)
@dataclasses.dataclass(frozen=True)
class QueryModulesOfPathResponse:
response: List[str]
_T = TypeVar("_T")
def _interpret_response(
response: query.Response, response_type: Type[_T]
) -> Optional[_T]:
try:
# pyre-ignore[16]: Pyre doesn't understand dataclasses_json
return response_type.from_dict(response.payload)
except (
KeyError,
ValueError,
dataclasses_json.mm.ValidationError,
) as exception:
LOG.info(
f"When interpretting {response.payload} as {response_type.__name__} "
f"got: {type(exception).__name__}({exception})"
)
return None
@dataclasses.dataclass
class ServerState:
# Immutable States
client_capabilities: lsp.ClientCapabilities = lsp.ClientCapabilities()
# Mutable States
consecutive_start_failure: int = 0
is_user_notified_on_buck_failure: bool = False
opened_documents: Set[Path] = dataclasses.field(default_factory=set)
diagnostics: Dict[Path, List[lsp.Diagnostic]] = dataclasses.field(
default_factory=dict
)
last_diagnostic_update_timer: Optional[timer.Timer] = None
query_state: PyreQueryState = dataclasses.field(default_factory=PyreQueryState)
class PyreServer:
# I/O Channels
input_channel: connection.TextReader
output_channel: connection.TextWriter
# `pyre_manager` is responsible for handling all interactions with background
# Pyre server.
pyre_manager: connection.BackgroundTaskManager
pyre_query_manager: connection.BackgroundTaskManager
# NOTE: `state` is mutable and can be changed on `pyre_manager` side.
state: ServerState
def __init__(
self,
input_channel: connection.TextReader,
output_channel: connection.TextWriter,
state: ServerState,
pyre_manager: connection.BackgroundTaskManager,
pyre_query_manager: connection.BackgroundTaskManager,
) -> None:
self.input_channel = input_channel
self.output_channel = output_channel
self.state = state
self.pyre_manager = pyre_manager
self.pyre_query_manager = pyre_query_manager
async def wait_for_exit(self) -> int:
await _wait_for_exit(self.input_channel, self.output_channel)
return 0
async def _try_restart_pyre_server(self) -> None:
if self.state.consecutive_start_failure < CONSECUTIVE_START_ATTEMPT_THRESHOLD:
await self.pyre_manager.ensure_task_running()
else:
LOG.info(
"Not restarting Pyre since failed consecutive start attempt limit"
" has been reached."
)
async def process_open_request(
self, parameters: lsp.DidOpenTextDocumentParameters
) -> None:
document_path = parameters.text_document.document_uri().to_file_path()
if document_path is None:
raise json_rpc.InvalidRequestError(
f"Document URI is not a file: {parameters.text_document.uri}"
)
self.state.opened_documents.add(document_path)
self.state.query_state.queries.put_nowait(TypesQuery(document_path))
LOG.info(f"File opened: {document_path}")
# Attempt to trigger a background Pyre server start on each file open
if not self.pyre_manager.is_task_running():
await self._try_restart_pyre_server()
async def process_close_request(
self, parameters: lsp.DidCloseTextDocumentParameters
) -> None:
document_path = parameters.text_document.document_uri().to_file_path()
if document_path is None:
raise json_rpc.InvalidRequestError(
f"Document URI is not a file: {parameters.text_document.uri}"
)
try:
self.state.opened_documents.remove(document_path)
self.state.query_state.path_to_location_type_lookup.pop(document_path, None)
LOG.info(f"File closed: {document_path}")
except KeyError:
LOG.warning(f"Trying to close an un-opened file: {document_path}")
async def process_did_save_request(
self, parameters: lsp.DidSaveTextDocumentParameters
) -> None:
document_path = parameters.text_document.document_uri().to_file_path()
if document_path is None:
raise json_rpc.InvalidRequestError(
f"Document URI is not a file: {parameters.text_document.uri}"
)
if document_path not in self.state.opened_documents:
return
self.state.query_state.queries.put_nowait(TypesQuery(document_path))
# Attempt to trigger a background Pyre server start on each file save
if not self.pyre_manager.is_task_running():
await self._try_restart_pyre_server()
async def process_hover_request(
self,
parameters: lsp.HoverTextDocumentParameters,
request_id: Union[int, str, None],
) -> None:
"""Always respond to a hover request even for non-tracked paths.
Otherwise, VS Code hover will wait for Pyre until it times out, meaning
that messages from other hover providers will be delayed."""
document_path = parameters.text_document.document_uri().to_file_path()
if document_path is None:
raise json_rpc.InvalidRequestError(
f"Document URI is not a file: {parameters.text_document.uri}"
)
if document_path not in self.state.opened_documents:
response = lsp.HoverResponse.empty()
else:
self.state.query_state.queries.put_nowait(TypesQuery(document_path))
response = self.state.query_state.hover_response_for_position(
Path(document_path), parameters.position
)
await lsp.write_json_rpc(
self.output_channel,
json_rpc.SuccessResponse(
id=request_id,
# pyre-ignore[16]: Pyre does not understand
# `dataclasses_json`.
result=response.to_dict(),
),
)
async def process_type_coverage_request(
self,
parameters: lsp.TypeCoverageTextDocumentParameters,
request_id: Union[int, str, None],
) -> None:
document_path = parameters.text_document.document_uri().to_file_path()
if document_path is None:
raise json_rpc.InvalidRequestError(
f"Document URI is not a file: {parameters.text_document.uri}"
)
await self.state.query_state.queries.put(
TypeCoverageQuery(id=request_id, path=document_path)
)
async def _run(self) -> int:
while True:
async with _read_lsp_request(
self.input_channel, self.output_channel
) as request:
LOG.debug(f"Received LSP request: {log.truncate(str(request), 400)}")
if request.method == "exit":
return commands.ExitCode.FAILURE
elif request.method == "shutdown":
await lsp.write_json_rpc(
self.output_channel,
json_rpc.SuccessResponse(id=request.id, result=None),
)
return await self.wait_for_exit()
elif request.method == "textDocument/didOpen":
parameters = request.parameters
if parameters is None:
raise json_rpc.InvalidRequestError(
"Missing parameters for didOpen method"
)
await self.process_open_request(
lsp.DidOpenTextDocumentParameters.from_json_rpc_parameters(
parameters
)
)
elif request.method == "textDocument/didClose":
parameters = request.parameters
if parameters is None:
raise json_rpc.InvalidRequestError(
"Missing parameters for didClose method"
)
await self.process_close_request(
lsp.DidCloseTextDocumentParameters.from_json_rpc_parameters(
parameters
)
)
elif request.method == "textDocument/didSave":
parameters = request.parameters
if parameters is None:
raise json_rpc.InvalidRequestError(
"Missing parameters for didSave method"
)
await self.process_did_save_request(
lsp.DidSaveTextDocumentParameters.from_json_rpc_parameters(
parameters
)
)
elif request.method == "textDocument/hover":
parameters = request.parameters
if parameters is None:
raise json_rpc.InvalidRequestError(
"Missing parameters for hover method"
)
await self.process_hover_request(
lsp.HoverTextDocumentParameters.from_json_rpc_parameters(
parameters
),
request.id,
)
elif request.method == "textDocument/typeCoverage":
parameters = request.parameters
if parameters is None:
raise json_rpc.InvalidRequestError(
"Missing parameters for typeCoverage method"
)
await self.process_type_coverage_request(
lsp.TypeCoverageTextDocumentParameters.from_json_rpc_parameters(
parameters
),
request.id,
)
elif request.id is not None:
raise lsp.RequestCancelledError("Request not supported yet")
async def run(self) -> int:
try:
await self.pyre_manager.ensure_task_running()
await self.pyre_query_manager.ensure_task_running()
return await self._run()
finally:
await self.pyre_manager.ensure_task_stop()
await self.pyre_query_manager.ensure_task_stop()
@dataclasses.dataclass(frozen=True)
class StartSuccess:
pass
@dataclasses.dataclass(frozen=True)
class BuckStartFailure:
message: str
@dataclasses.dataclass(frozen=True)
class OtherStartFailure:
message: str
detail: str
async def _start_pyre_server(
binary_location: str, pyre_arguments: start.Arguments
) -> Union[StartSuccess, BuckStartFailure, OtherStartFailure]:
try:
with backend_arguments.temporary_argument_file(
pyre_arguments
) as argument_file_path:
server_environment = {
**os.environ,
# This is to make sure that backend server shares the socket root
# directory with the client.
# TODO(T77556312): It might be cleaner to turn this into a
# configuration option instead.
"TMPDIR": tempfile.gettempdir(),
}
with start.background_server_log_file(
Path(pyre_arguments.base_arguments.log_path)
) as server_stderr:
server_process = await asyncio.create_subprocess_exec(
binary_location,
"newserver",
str(argument_file_path),
stdout=subprocess.PIPE,
stderr=server_stderr,
env=server_environment,
start_new_session=True,
)
server_stdout = server_process.stdout
if server_stdout is None:
raise RuntimeError(
"asyncio.create_subprocess_exec failed to set up a pipe for "
"server stdout"
)
await server_event.Waiter(wait_on_initialization=True).async_wait_on(
connection.TextReader(connection.StreamBytesReader(server_stdout))
)
return StartSuccess()
except server_event.ServerStartException as error:
message = str(error)
LOG.error(message)
if error.kind == server_event.ErrorKind.BUCK_USER:
return BuckStartFailure(message)
else:
# We know where the exception come from. Let's keep the error details
# succinct.
return OtherStartFailure(message=message, detail=message)
except Exception as error:
# These exceptions are unexpected. Let's keep verbose stack traces to
# help with post-mortem analyses.
message = str(error)
detail = traceback.format_exc()
LOG.error(f"{detail}")
return OtherStartFailure(message=message, detail=detail)
@dataclasses.dataclass(frozen=True)
class TypeErrorSubscription:
errors: List[error.Error] = dataclasses.field(default_factory=list)
def _parse_type_error_subscription(response: object) -> TypeErrorSubscription:
return TypeErrorSubscription(
errors=incremental.parse_type_error_response_json(["TypeErrors", response])
)
@dataclasses.dataclass(frozen=True)
class StatusUpdateSubscription:
kind: str
def _parse_status_update_subscription(response: object) -> StatusUpdateSubscription:
if not isinstance(response, list) or len(response) == 0:
raise incremental.InvalidServerResponse(
f"Status update subscription must be a nonempty list. Got {response}"
)
kind = response[0]
if not isinstance(kind, str):
raise incremental.InvalidServerResponse(
f"Response kind of a status update must be a string. Got {response}"
)
return StatusUpdateSubscription(kind=kind)
SubscriptionBody = Union[TypeErrorSubscription, StatusUpdateSubscription]
@dataclasses.dataclass(frozen=True)
class SubscriptionResponse:
name: str
body: SubscriptionBody
def parse_subscription_response(response: str) -> SubscriptionResponse:
try:
response_json = json.loads(response)
# The response JSON is expected to have the following forms:
# `{"name": "foo", "body": ["TypeErrors", [error_json, ...]]}`
# `{"name": "foo", "body": ["StatusUpdate", ["message_kind", ...]]}`
if isinstance(response_json, dict):
name = response_json.get("name", None)
body = response_json.get("body", None)
if (
name is not None
and body is not None
and isinstance(body, list)
and len(body) > 1
):
tag = body[0]
if tag == "TypeErrors":
return SubscriptionResponse(
name=name, body=_parse_type_error_subscription(body[1])
)
elif tag == "StatusUpdate":
return SubscriptionResponse(
name=name, body=_parse_status_update_subscription(body[1])
)
raise incremental.InvalidServerResponse(
f"Unexpected JSON subscription from server: {response_json}"
)
except json.JSONDecodeError as decode_error:
message = f"Cannot parse subscription as JSON: {decode_error}"
raise incremental.InvalidServerResponse(message) from decode_error
def type_error_to_diagnostic(type_error: error.Error) -> lsp.Diagnostic:
return lsp.Diagnostic(
range=lsp.Range(
start=lsp.Position(line=type_error.line - 1, character=type_error.column),
end=lsp.Position(
line=type_error.stop_line - 1, character=type_error.stop_column
),
),
message=type_error.description,
severity=lsp.DiagnosticSeverity.ERROR,
code=None,
source="Pyre",
)
def type_errors_to_diagnostics(
type_errors: Sequence[error.Error],
) -> Dict[Path, List[lsp.Diagnostic]]:
result: Dict[Path, List[lsp.Diagnostic]] = {}
for type_error in type_errors:
result.setdefault(type_error.path, []).append(
type_error_to_diagnostic(type_error)
)
return result
def uncovered_range_to_diagnostic(uncovered_range: CodeRange) -> lsp.Diagnostic:
return lsp.Diagnostic(
range=lsp.Range(
start=lsp.Position(
line=uncovered_range.start.line - 1,
character=uncovered_range.start.column,
),
end=lsp.Position(
line=uncovered_range.end.line - 1, character=uncovered_range.end.column
),
),
message=(
"This function is not type checked. "
"Consider adding parameter or return type annotations."
),
)
def to_coverage_result(
covered_and_uncovered_lines: CoveredAndUncoveredLines,
uncovered_ranges: List[CodeRange],
) -> lsp.TypeCoverageResult:
num_covered = len(covered_and_uncovered_lines.covered_lines)
num_uncovered = len(covered_and_uncovered_lines.uncovered_lines)
num_total = num_covered + num_uncovered
if num_total == 0:
return lsp.TypeCoverageResult(
covered_percent=100.0, uncovered_ranges=[], default_message=""
)
else:
return lsp.TypeCoverageResult(
covered_percent=100.0 * num_covered / num_total,
uncovered_ranges=[
uncovered_range_to_diagnostic(uncovered_range)
for uncovered_range in uncovered_ranges
],
default_message="Consider adding type annotations.",
)
def file_not_typechecked_coverage_result() -> lsp.TypeCoverageResult:
return lsp.TypeCoverageResult(
covered_percent=0.0,
uncovered_ranges=[
lsp.Diagnostic(
range=lsp.Range(
start=lsp.Position(
line=0,
character=0,
),
end=lsp.Position(line=1, character=0),
),
message="This file is not type checked by Pyre.",
)
],
default_message="",
)
def path_to_coverage_result(path: Path, strict_default: bool) -> lsp.TypeCoverageResult:
module = statistics.parse_path_to_module(path)
if module is None:
raise lsp.RequestCancelledError(
f"Unable to compute coverage information for {path}"
)
coverage_collector = coverage_collector_for_module(
str(path), module, strict_default
)
covered_and_uncovered_lines = coverage_collector.covered_and_uncovered_lines()
uncovered_ranges = [f.code_range for f in coverage_collector.uncovered_functions()]
return to_coverage_result(covered_and_uncovered_lines, uncovered_ranges)
class PyreQueryHandler(connection.BackgroundTask):
def __init__(
self,
state: PyreQueryState,
server_start_options_reader: PyreServerStartOptionsReader,
client_output_channel: connection.TextWriter,
) -> None:
self.state = state
self.server_start_options_reader = server_start_options_reader
self.client_output_channel = client_output_channel
async def _query(
self, query_text: str, socket_path: Path
) -> Optional[query.Response]:
LOG.info(f"Querying for `{query_text}`")
try:
async with connection.connect_in_text_mode(socket_path) as (
input_channel,
output_channel,
):
await _send_query_request(output_channel, query_text)
return await _receive_query_response(input_channel)
except connection.ConnectionFailure:
LOG.error(
"Could not establish connection with an existing Pyre server "
f"at {socket_path}."
)
return None
async def _query_and_interpret_response(
self, query_text: str, socket_path: Path, response_type: Type[_T]
) -> Optional[_T]:
query_response = await self._query(query_text, socket_path)
if query_response is None:
return None
else:
return _interpret_response(query_response, response_type)
async def _query_types(
self, paths: List[Path], socket_path: Path
) -> Optional[Dict[Path, LocationTypeLookup]]:
path_string = ", ".join(f"'{path}'" for path in paths)
query_text = f"types({path_string})"
query_types_response = await self._query_and_interpret_response(
query_text, socket_path, QueryTypesResponse
)
if query_types_response is None:
return None
return {
Path(path_type_info.path): path_type_info.get_location_type_lookup()
for path_type_info in query_types_response.response
}
async def _update_types_for_paths(
self,
paths: List[Path],
socket_path: Path,
) -> None:
new_path_to_location_type_dict = await self._query_types(paths, socket_path)
if new_path_to_location_type_dict is None:
return
for path, location_type_lookup in new_path_to_location_type_dict.items():
self.state.path_to_location_type_lookup[path] = location_type_lookup
async def _query_modules_of_path(
self,
path: Path,
socket_path: Path,
) -> Optional[QueryModulesOfPathResponse]:
return await self._query_and_interpret_response(
f"modules_of_path('{path}')", socket_path, QueryModulesOfPathResponse
)
async def _query_is_typechecked(
self,
path: Path,
socket_path: Path,
) -> Optional[bool]:
response = await self._query_modules_of_path(path, socket_path)
if response is None:
return None
else:
return len(response.response) > 0
async def _query_type_coverage(
self,
path: Path,
strict_default: bool,
socket_path: Path,
) -> Optional[lsp.TypeCoverageResult]:
is_typechecked = await self._query_is_typechecked(path, socket_path)
if is_typechecked is None:
return None
elif is_typechecked:
return path_to_coverage_result(path, strict_default)
else:
return file_not_typechecked_coverage_result()
async def _handle_type_coverage_query(
self,
query: TypeCoverageQuery,
strict_default: bool,
socket_path: Path,
) -> None:
type_coverage_result = await self._query_type_coverage(
query.path,
strict_default,
socket_path,
)
if type_coverage_result is not None:
await lsp.write_json_rpc(
self.client_output_channel,
json_rpc.SuccessResponse(
id=query.id,
# pyre-ignore[16]: Pyre does not understand
# `dataclasses_json`.
result=type_coverage_result.to_dict(),
),
)
async def _run(self, server_start_options: "PyreServerStartOptions") -> None:
start_arguments = server_start_options.start_arguments
local_root = start_arguments.base_arguments.relative_local_root
socket_path = server_connection.get_default_socket_path(
project_root=Path(start_arguments.base_arguments.global_root),
relative_local_root=Path(local_root) if local_root else None,
)
strict_default = server_start_options.strict_default
type_queries_enabled = (
server_start_options.ide_features is not None
and server_start_options.ide_features.is_hover_enabled()
)
while True:
query = await self.state.queries.get()
if isinstance(query, TypesQuery):
if type_queries_enabled:
await self._update_types_for_paths([query.path], socket_path)
elif isinstance(query, TypeCoverageQuery):
await self._handle_type_coverage_query(
query, strict_default, socket_path
)
def read_server_start_options(self) -> "PyreServerStartOptions":
try:
LOG.info("Reading Pyre server configurations...")
return self.server_start_options_reader()
except Exception:
LOG.error("Pyre query handler failed to read server configuration")
raise
async def run(self) -> None:
# Re-read server start options on every run, to make sure the server
# start options are always up-to-date.
server_start_options = self.read_server_start_options()
try:
LOG.info(
"Running Pyre query manager using"
f" configuration: {server_start_options}"
)
await self._run(server_start_options)
except Exception:
LOG.error("Failed to run the Pyre query handler")
raise
def _client_has_status_bar_support(
client_capabilities: lsp.ClientCapabilities,
) -> bool:
window_capabilities = client_capabilities.window
if window_capabilities is not None:
return window_capabilities.status is not None
else:
return False
async def _write_status(
output_channel: connection.TextWriter,
message: str,
short_message: Optional[str] = None,
level: lsp.MessageType = lsp.MessageType.INFO,
) -> None:
await lsp.write_json_rpc(
output_channel,
json_rpc.Request(
id=0, # the value doesn't matter but the existence does
method="window/showStatus",
parameters=json_rpc.ByNameParameters(
{
"type": int(level),
"message": message,
**(
{} if short_message is None else {"shortMessage": short_message}
),
}
),
),
)
async def _write_notification(
output_channel: connection.TextWriter,
message: str,
short_message: Optional[str] = None,
level: lsp.MessageType = lsp.MessageType.INFO,
) -> None:
await lsp.write_json_rpc(
output_channel,
json_rpc.Request(
method="window/showMessage",
parameters=json_rpc.ByNameParameters(
{
"type": int(level),
"message": (
message
if short_message is None
else f"{short_message}: {message}"
),
}
),
),
)
class PyreServerHandler(connection.BackgroundTask):
server_start_options_reader: PyreServerStartOptionsReader
remote_logging: Optional[backend_arguments.RemoteLogging]
client_output_channel: connection.TextWriter
server_state: ServerState
def __init__(
self,
server_start_options_reader: PyreServerStartOptionsReader,
client_output_channel: connection.TextWriter,
server_state: ServerState,
remote_logging: Optional[backend_arguments.RemoteLogging] = None,
) -> None:
self.server_start_options_reader = server_start_options_reader
self.remote_logging = remote_logging
self.client_output_channel = client_output_channel
self.server_state = server_state
async def show_notification_message_to_client(
self,
message: str,
level: lsp.MessageType = lsp.MessageType.INFO,
) -> None:
await _write_notification(self.client_output_channel, message, level=level)
async def show_status_message_to_client(
self,
message: str,
short_message: Optional[str] = None,
level: lsp.MessageType = lsp.MessageType.INFO,
fallback_to_notification: bool = False,
) -> None:
if _client_has_status_bar_support(self.server_state.client_capabilities):
await _write_status(
self.client_output_channel, message, short_message, level
)
elif fallback_to_notification:
await _write_notification(
self.client_output_channel, message, short_message, level
)
async def log_and_show_status_message_to_client(
self,
message: str,
short_message: Optional[str] = None,
level: lsp.MessageType = lsp.MessageType.INFO,
fallback_to_notification: bool = False,
) -> None:
log_message = (
message if short_message is None else f"[{short_message}] {message}"
)
if level == lsp.MessageType.ERROR:
LOG.error(log_message)
elif level == lsp.MessageType.WARNING:
LOG.warning(log_message)
elif level == lsp.MessageType.INFO:
LOG.info(log_message)
else:
LOG.debug(log_message)
await self.show_status_message_to_client(
message, short_message, level, fallback_to_notification
)
def update_type_errors(self, type_errors: Sequence[error.Error]) -> None:
LOG.info(
"Refereshing type errors received from Pyre server. "
f"Total number of type errors is {len(type_errors)}."
)
incremental.log_error_statistics(
remote_logging=self.remote_logging,
type_errors=type_errors,
command_name=COMMAND_NAME,
)
self.server_state.diagnostics = type_errors_to_diagnostics(type_errors)
async def clear_type_errors_for_client(self) -> None:
for path in self.server_state.diagnostics:
await _publish_diagnostics(self.client_output_channel, path, [])
last_update_timer = self.server_state.last_diagnostic_update_timer
if last_update_timer is not None:
_log_lsp_event(
self.remote_logging,
LSPEvent.COVERED,
integers={"duration": int(last_update_timer.stop_in_millisecond())},
)
# Reset the timestamp to avoid duplicate counting
last_update_timer.reset()
async def show_type_errors_to_client(self) -> None:
for path, diagnostics in self.server_state.diagnostics.items():
await _publish_diagnostics(self.client_output_channel, path, diagnostics)
last_update_timer = self.server_state.last_diagnostic_update_timer
if last_update_timer is not None:
last_update_timer.reset()
async def handle_type_error_subscription(
self, type_error_subscription: TypeErrorSubscription
) -> None:
await self.clear_type_errors_for_client()
self.update_type_errors(type_error_subscription.errors)
await self.show_type_errors_to_client()
await self.log_and_show_status_message_to_client(
"Pyre has completed an incremental check and is currently "
"watching on futher source changes.",
short_message="Pyre Ready",
level=lsp.MessageType.INFO,
)
async def handle_status_update_subscription(
self, status_update_subscription: StatusUpdateSubscription
) -> None:
await self.clear_type_errors_for_client()
if status_update_subscription.kind == "Rebuilding":
await self.log_and_show_status_message_to_client(
"Pyre is busy rebuilding the project for type checking...",
short_message="Pyre (waiting for Buck)",
level=lsp.MessageType.WARNING,
)
elif status_update_subscription.kind == "Rechecking":
await self.log_and_show_status_message_to_client(
"Pyre is busy re-type-checking the project...",
short_message="Pyre (checking)",
level=lsp.MessageType.WARNING,
)
async def _handle_subscription_body(
self, subscription_body: SubscriptionBody
) -> None:
if isinstance(subscription_body, TypeErrorSubscription):
await self.handle_type_error_subscription(subscription_body)
elif isinstance(subscription_body, StatusUpdateSubscription):
await self.handle_status_update_subscription(subscription_body)
async def _subscribe_to_type_error(
self,
server_input_channel: connection.TextReader,
server_output_channel: connection.TextWriter,
) -> None:
subscription_name = f"persistent_{os.getpid()}"
await server_output_channel.write(
f'["SubscribeToTypeErrors", "{subscription_name}"]\n'
)
async with _read_server_response(server_input_channel) as first_response:
initial_type_errors = incremental.parse_type_error_response(first_response)
self.update_type_errors(initial_type_errors)
await self.show_type_errors_to_client()
while True:
async with _read_server_response(
server_input_channel
) as raw_subscription_response:
subscription_response = parse_subscription_response(
raw_subscription_response
)
if subscription_name == subscription_response.name:
await self._handle_subscription_body(subscription_response.body)
async def subscribe_to_type_error(
self,
server_input_channel: connection.TextReader,
server_output_channel: connection.TextWriter,
) -> None:
try:
await self._subscribe_to_type_error(
server_input_channel, server_output_channel
)
finally:
await self.show_status_message_to_client(
"Lost connection to the background Pyre Server. "
"This usually happens when Pyre detect changes in project which "
"it was not able to handle incrementally. "
"A new Pyre server will be started next time you open or save "
"a .py file",
short_message="Pyre Stopped",
level=lsp.MessageType.ERROR,
fallback_to_notification=True,
)
await self.clear_type_errors_for_client()
self.server_state.diagnostics = {}
@staticmethod
def _auxiliary_logging_info(
server_start_options: PyreServerStartOptions,
) -> Dict[str, Optional[str]]:
relative_local_root = (
server_start_options.start_arguments.base_arguments.relative_local_root
)
return {
"binary": server_start_options.binary,
"log_path": server_start_options.start_arguments.base_arguments.log_path,
"global_root": (
server_start_options.start_arguments.base_arguments.global_root
),
**(
{}
if relative_local_root is None
else {"local_root": relative_local_root}
),
}
async def _run(self, server_start_options: PyreServerStartOptions) -> None:
server_identifier = server_start_options.server_identifier
start_arguments = server_start_options.start_arguments
local_root = start_arguments.base_arguments.relative_local_root
socket_path = server_connection.get_default_socket_path(
project_root=Path(start_arguments.base_arguments.global_root),
relative_local_root=Path(local_root) if local_root else None,
)
connection_timer = timer.Timer()
try:
async with connection.connect_in_text_mode(socket_path) as (
input_channel,
output_channel,
):
await self.log_and_show_status_message_to_client(
"Established connection with existing Pyre server at "
f"`{server_identifier}`.",
short_message="Pyre Ready",
level=lsp.MessageType.INFO,
fallback_to_notification=True,
)
self.server_state.consecutive_start_failure = 0
self.server_state.is_user_notified_on_buck_failure = False
_log_lsp_event(
remote_logging=self.remote_logging,
event=LSPEvent.CONNECTED,
integers={"duration": int(connection_timer.stop_in_millisecond())},
normals={
"connected_to": "already_running_server",
**self._auxiliary_logging_info(server_start_options),
},
)
await self.subscribe_to_type_error(input_channel, output_channel)
return
except connection.ConnectionFailure:
pass
await self.log_and_show_status_message_to_client(
f"Starting a new Pyre server at `{server_identifier}` in "
"the background.",
short_message="Starting Pyre...",
level=lsp.MessageType.WARNING,
fallback_to_notification=True,
)
start_status = await _start_pyre_server(
server_start_options.binary, start_arguments
)
if isinstance(start_status, StartSuccess):
await self.log_and_show_status_message_to_client(
f"Pyre server at `{server_identifier}` has been initialized.",
short_message="Pyre Ready",
level=lsp.MessageType.INFO,
fallback_to_notification=True,
)
async with connection.connect_in_text_mode(socket_path) as (
input_channel,
output_channel,
):
self.server_state.consecutive_start_failure = 0
self.server_state.is_user_notified_on_buck_failure = False
_log_lsp_event(
remote_logging=self.remote_logging,
event=LSPEvent.CONNECTED,
integers={"duration": int(connection_timer.stop_in_millisecond())},
normals={
"connected_to": "newly_started_server",
**self._auxiliary_logging_info(server_start_options),
},
)
await self.subscribe_to_type_error(input_channel, output_channel)
elif isinstance(start_status, BuckStartFailure):
# Buck start failures are intentionally not counted towards
# `consecutive_start_failure` -- they happen far too often in practice
# so we do not want them to trigger suspensions.
_log_lsp_event(
remote_logging=self.remote_logging,
event=LSPEvent.NOT_CONNECTED,
integers={"duration": int(connection_timer.stop_in_millisecond())},
normals={
**self._auxiliary_logging_info(server_start_options),
"exception": str(start_status.message),
},
)
if not self.server_state.is_user_notified_on_buck_failure:
await self.show_notification_message_to_client(
f"Cannot start a new Pyre server at `{server_identifier}` "
"due to Buck failure. If you added or changed a target, "
"make sure the target file is parsable and the owning "
"targets are buildable by Buck. If you removed a target, "
"makre sure that target is not explicitly referenced from the "
"Pyre configuration file of the containing project.",
level=lsp.MessageType.ERROR,
)
self.server_state.is_user_notified_on_buck_failure = True
await self.show_status_message_to_client(
f"Cannot start a new Pyre server at `{server_identifier}`. "
f"{start_status.message}",
short_message="Pyre Stopped",
level=lsp.MessageType.INFO,
fallback_to_notification=False,
)
elif isinstance(start_status, OtherStartFailure):
self.server_state.consecutive_start_failure += 1
if (
self.server_state.consecutive_start_failure
< CONSECUTIVE_START_ATTEMPT_THRESHOLD
):
_log_lsp_event(
remote_logging=self.remote_logging,
event=LSPEvent.NOT_CONNECTED,
integers={"duration": int(connection_timer.stop_in_millisecond())},
normals={
**self._auxiliary_logging_info(server_start_options),
"exception": str(start_status.detail),
},
)
await self.show_status_message_to_client(
f"Cannot start a new Pyre server at `{server_identifier}`. "
f"{start_status.message}",
short_message="Pyre Stopped",
level=lsp.MessageType.INFO,
fallback_to_notification=True,
)
else:
await self.show_status_message_to_client(
f"Pyre server restart at `{server_identifier}` has been "
"failing repeatedly. Disabling The Pyre plugin for now.",
short_message="Pyre Disabled",
level=lsp.MessageType.ERROR,
fallback_to_notification=True,
)
_log_lsp_event(
remote_logging=self.remote_logging,
event=LSPEvent.SUSPENDED,
normals=self._auxiliary_logging_info(server_start_options),
)
else:
raise RuntimeError("Impossible type for `start_status`")
async def run(self) -> None:
# Re-read server start options on every run, to make sure the server
# start options are always up-to-date.
server_start_options = read_server_start_options(
self.server_start_options_reader, self.remote_logging
)
session_timer = timer.Timer()
try:
LOG.info(f"Starting Pyre server from configuration: {server_start_options}")
await self._run(server_start_options)
except Exception:
_log_lsp_event(
remote_logging=self.remote_logging,
event=LSPEvent.DISCONNECTED,
integers={"duration": int(session_timer.stop_in_millisecond())},
normals={
**self._auxiliary_logging_info(server_start_options),
"exception": traceback.format_exc(),
},
)
raise
async def run_persistent(
server_start_options_reader: PyreServerStartOptionsReader,
remote_logging: Optional[backend_arguments.RemoteLogging],
) -> int:
stdin, stdout = await connection.create_async_stdin_stdout()
while True:
initialize_result = await try_initialize(
stdin, stdout, server_start_options_reader
)
if isinstance(initialize_result, InitializationExit):
LOG.info("Received exit request before initialization.")
return 0
elif isinstance(initialize_result, InitializationSuccess):
LOG.info("Initialization successful.")
client_info = initialize_result.client_info
_log_lsp_event(
remote_logging=remote_logging,
event=LSPEvent.INITIALIZED,
normals=(
{}
if client_info is None
else {
"lsp client name": client_info.name,
"lsp client version": client_info.version,
}
),
)
client_capabilities = initialize_result.client_capabilities
LOG.debug(f"Client capabilities: {client_capabilities}")
initial_server_state = ServerState(client_capabilities=client_capabilities)
pyre_query_handler = PyreQueryHandler(
state=initial_server_state.query_state,
server_start_options_reader=server_start_options_reader,
client_output_channel=stdout,
)
server = PyreServer(
input_channel=stdin,
output_channel=stdout,
state=initial_server_state,
pyre_manager=connection.BackgroundTaskManager(
PyreServerHandler(
server_start_options_reader=server_start_options_reader,
remote_logging=remote_logging,
client_output_channel=stdout,
server_state=initial_server_state,
)
),
pyre_query_manager=connection.BackgroundTaskManager(pyre_query_handler),
)
return await server.run()
elif isinstance(initialize_result, InitializationFailure):
exception = initialize_result.exception
message = (
str(exception) if exception is not None else "ignoring notification"
)
LOG.info(f"Initialization failed: {message}")
_log_lsp_event(
remote_logging=remote_logging,
event=LSPEvent.NOT_INITIALIZED,
normals=(
{
"exception": message,
}
),
)
# Loop until we get either InitializeExit or InitializeSuccess
else:
raise RuntimeError("Cannot determine the type of initialize_result")
def run(
command_argument: command_arguments.CommandArguments,
base_directory: Path,
remote_logging: Optional[backend_arguments.RemoteLogging],
) -> int:
def read_server_start_options() -> PyreServerStartOptions:
return PyreServerStartOptions.read_from(command_argument, base_directory)
command_timer = timer.Timer()
error_message: Optional[str] = None
try:
return asyncio.get_event_loop().run_until_complete(
run_persistent(
read_server_start_options,
remote_logging,
)
)
except Exception as error:
error_message = str(error)
return 1
finally:
_log_lsp_event(
remote_logging,
LSPEvent.STOPPED,
integers={"duration": int(command_timer.stop_in_millisecond())},
normals={
**({"exception": error_message} if error_message is not None else {})
},
)
| 37.465507 | 88 | 0.62575 | [
"MIT"
] | dmitryvinn/pyre-check-1 | client/commands/persistent.py | 64,628 | Python |
# -*- coding: utf-8 -*-
'''
==============
scrim.commands
==============
Implements functionality available across multiple shell scripting languages.
'''
from __future__ import absolute_import
import abc
from collections import namedtuple
from fstrings import f
import ntpath
import posixpath
ABC = abc.ABCMeta('ABC', (object,), {})
Command = namedtuple('Command', 'name args kwargs')
RawCommand = namedtuple('RawCommand', 'command required_shell')
class CommandExecutor(object):
'''Forward commands to the specified ShellCommands implementation.
RawCommands are returned as-is if the shell matches the RawCommands
required_shell. You shouldn't need to use this class directly.
'''
def __call__(self, command, shell):
if isinstance(command, Command):
method = getattr(SHELL_COMMANDS[shell], command.name)
return method(*command.args, **command.kwargs)
elif isinstance(command, RawCommand):
if shell == command.required_shell:
return command.command
else:
raise TypeError(f(
'command must be Command or RawCommand not {}', type(command)
))
class ShellCommands(ABC):
'''Defines the interface for all ShellCommand implementations. These are
the common commands we want to define for all shells.
Sometimes commands are available across multiple shells like: cd. For
clarity we define these commands for each implementation anyways instead of
using inheritance.
We only cover commands with functonality present in all the shells we
support. Special cases can be handled using :meth:`Scrim.raw`.
When choosing a method name for a command, generally use the most well
known and widely used command. This will frequently come from bash. For
example given the choice between `cat` in bash, `type` in batch, and
`Get-Content` in powershell, we choose `cat` as the method name.
In special cases we define methods that cover similar functionality for
multiple shells like: `set_env` and `unset_env`. No shell has `set_env`
or `unset_env` commands, but, powershell does have special syntax for
setting and unsetting environment variables and bash has `export`.
Therefore, we define `set_env` and `unset_env` for all implementations,
even when batch has no direct analogy. This provides users with a
memorable set of commands that cover all shells.
Attributes:
shell: Should match the SCRIM_SHELL value set in one of the scrim
scripts. For example:
- scrim.bat defines SCRIM_SHELL as cmd.exe
- scrim.ps1 defines SCRIM_SHELL as powershell.exe
- scrim.sh defines SCRIM_SHELL as bash
'''
@abc.abstractproperty
def shell(self):
raise NotImplementedError
@abc.abstractmethod
def execute(self, expression):
raise NotImplementedError
@abc.abstractmethod
def echo(self, message):
raise NotImplementedError
@abc.abstractmethod
def set(self, var, value):
raise NotImplementedError
@abc.abstractmethod
def unset(self, var):
raise NotImplementedError
@abc.abstractmethod
def set_env(self, var, value):
raise NotImplementedError
@abc.abstractmethod
def unset_env(self, var):
raise NotImplementedError
@abc.abstractmethod
def cd(self, path):
raise NotImplementedError
@abc.abstractmethod
def pushd(self, path):
raise NotImplementedError
@abc.abstractmethod
def popd(self):
raise NotImplementedError
@abc.abstractmethod
def cat(self):
raise NotImplementedError
class BatchCommands(ShellCommands):
shell = 'cmd.exe'
def execute(self, expression):
return f('call {expression}')
def echo(self, message):
return f('echo {message}')
def set(self, var, value):
return f('set "{var}={value}"')
def unset(self, var):
return f('set "{var}="')
set_env = set
unset_env = unset
def cd(self, path):
path = ntpath.normpath(path)
return f('cd {path}')
def pushd(self, path):
path = ntpath.normpath(path)
return f('pushd {path}')
def popd(self):
return 'popd'
def cat(self, path):
path = ntpath.normpath(path)
return f('type {path}')
class PowershellCommands(ShellCommands):
shell = 'powershell.exe'
def execute(self, expression):
return f('Invoke-Expression {expression}')
def echo(self, message):
return f('Write-Host {message}')
def set(self, var, value):
return f('${var}={value}')
def unset(self, var, value):
return f('Remove-Variable {var}')
def set_env(self, var, value):
return f('$env:{var}={value}')
def unset_env(self, var, value):
return f('Remove-Item Env:{var}')
def cd(self, path):
path = ntpath.normpath(path)
return f('cd {path}')
def pushd(self, path):
path = ntpath.normpath(path)
return f('Push-Location -Path "{path}"')
def popd(self):
return 'Pop-Location'
def cat(self, path):
path = ntpath.normpath(path)
return f('Get-Content {path}')
class BashCommands(ShellCommands):
shell = 'bash'
def execute(self, expression):
return f('$({expression})')
def echo(self, message):
return f('echo {message}')
def set(self, var, value):
return f('{var}={value}')
def unset(self, var):
return f('unset {var}')
def set_env(self, var, value):
return f('export {var}={value}')
def unset_env(self, var):
return f('unset {var}')
def cd(self, path):
path = posixpath.normpath(path)
return f('cd {path}')
def pushd(self, path):
path = posixpath.normpath(path)
return f('pushd {path}')
def popd(self):
return 'popd'
def cat(self, path):
path = posixpath.normpath(path)
return f('cat {path}')
SHELL_COMMANDS = dict((c.shell, c()) for c in ShellCommands.__subclasses__())
| 26.547414 | 79 | 0.643124 | [
"MIT"
] | danbradham/scrim | scrim/commands.py | 6,159 | Python |
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
from typing import List, Any, Union
class IsisTrillPseudoNode(Base):
"""TRILL Pseudo Node Configuration
The IsisTrillPseudoNode class encapsulates a list of isisTrillPseudoNode resources that are managed by the system.
A list of resources can be retrieved from the server using the IsisTrillPseudoNode.find() method.
"""
__slots__ = ()
_SDM_NAME = 'isisTrillPseudoNode'
_SDM_ATT_MAP = {
'Active': 'active',
'BroadcastRootPriority': 'broadcastRootPriority',
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'Name': 'name',
'Nickname': 'nickname',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(IsisTrillPseudoNode, self).__init__(parent, list_op)
@property
def Active(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Activate/Deactivate Configuration
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Active']))
@property
def BroadcastRootPriority(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Broadcast Root Priority
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BroadcastRootPriority']))
@property
def Count(self):
# type: () -> int
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
# type: () -> str
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def Nickname(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Nickname
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Nickname']))
def update(self, Name=None):
# type: (str) -> IsisTrillPseudoNode
"""Updates isisTrillPseudoNode resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, Name=None):
# type: (str) -> IsisTrillPseudoNode
"""Adds a new isisTrillPseudoNode resource on the json, only valid with config assistant
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Returns
-------
- self: This instance with all currently retrieved isisTrillPseudoNode resources using find and the newly added isisTrillPseudoNode resources available through an iterator or index
Raises
------
- Exception: if this function is not being used with config assistance
"""
return self._add_xpath(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(self, Count=None, DescriptiveName=None, Name=None):
# type: (int, str, str) -> IsisTrillPseudoNode
"""Finds and retrieves isisTrillPseudoNode resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve isisTrillPseudoNode resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all isisTrillPseudoNode resources from the server.
Args
----
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Returns
-------
- self: This instance with matching isisTrillPseudoNode resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of isisTrillPseudoNode data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the isisTrillPseudoNode resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def Abort(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the abort operation on the server.
Abort CPF control plane (equals to demote to kUnconfigured state).
abort(async_operation=bool)
---------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('abort', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the start operation on the server.
Start CPF control plane (equals to promote to negotiated state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
start(async_operation=bool)
---------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
start(SessionIndices=list, async_operation=bool)
------------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
start(SessionIndices=string, async_operation=bool)
--------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the stop operation on the server.
Stop CPF control plane (equals to demote to PreValidated-DoDDone state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
stop(async_operation=bool)
--------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
stop(SessionIndices=list, async_operation=bool)
-----------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
stop(SessionIndices=string, async_operation=bool)
-------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stop', payload=payload, response_object=None)
def get_device_ids(self, PortNames=None, Active=None, BroadcastRootPriority=None, Nickname=None):
"""Base class infrastructure that gets a list of isisTrillPseudoNode device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- Active (str): optional regex of active
- BroadcastRootPriority (str): optional regex of broadcastRootPriority
- Nickname (str): optional regex of nickname
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
| 43.201342 | 193 | 0.6524 | [
"MIT"
] | OpenIxia/ixnetwork_restpy | uhd_restpy/testplatform/sessions/ixnetwork/topology/isistrillpseudonode_173e4463dccc2001457569c77f3570e0.py | 12,874 | Python |
from .customer_service import *
from .transaction_service import *
from .webhook_service import *
| 24.5 | 34 | 0.816327 | [
"MIT"
] | Nyior/django-rest-paystack | paystack/services/__init__.py | 98 | Python |
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import re
import saspy as sp2
class SASdata:
"""
**Overview**
The SASdata object is a reference to a SAS Data Set or View. It is used to access data that exists in the SAS session.
You create a SASdata object by using the sasdata() method of the SASsession object.
Parms for the sasdata() method of the SASsession object are:
:param table: [Required] the name of the SAS Data Set or View
:param libref: [Defaults to WORK] the libref for the SAS Data Set or View.
:param results: format of results, SASsession.results is default, PANDAS, HTML or TEXT are the alternatives
:param dsopts: a dictionary containing any of the following SAS data set options(where, drop, keep, obs, firstobs, format):
- where is a string
- keep are strings or list of strings.
- drop are strings or list of strings.
- obs is a numbers - either string or int
- first obs is a numbers - either string or int
- format is a string or dictionary { var: format }
.. code-block:: python
{'where' : 'msrp < 20000 and make = "Ford"',
'keep' : 'msrp enginesize Cylinders Horsepower Weight',
'drop' : ['msrp', 'enginesize', 'Cylinders', 'Horsepower', 'Weight'],
'obs' : 10,
'firstobs' : '12'
'format' : {'money': 'dollar10', 'time': 'tod5.'}
}
"""
def __init__(self, sassession, libref, table, results='', dsopts: dict=None):
self.sas = sassession
self.logger = logging.getLogger(__name__)
if results == '':
results = sassession.results
failed = 0
if results.upper() == "HTML":
if self.sas.sascfg.display.lower() == 'jupyter':
try:
from IPython.display import HTML
except:
failed = 1
if failed and not self.sas.batch:
self.HTML = 0
else:
self.HTML = 1
else:
self.HTML = 1
else:
self.HTML = 0
if len(libref):
self.libref = libref
else:
if self.sas.exist(table, libref='user'):
self.libref = 'USER'
else:
self.libref = 'WORK'
# hack till the bug gets fixed
if self.sas.sascfg.mode == 'HTTP':
self.libref = 'WORK'
self.table = table
self.dsopts = dsopts if dsopts is not None else {}
self.results = results
self.tabulate = sp2.Tabulate(sassession, self)
def __getitem__(self, key):
print(key)
print(type(key))
def __repr__(self):
"""
display info about this object ...
:return: output
"""
x = "Libref = %s\n" % self.libref
x += "Table = %s\n" % self.table
x += "Dsopts = %s\n" % str(self.dsopts)
x += "Results = %s\n" % self.results
return(x)
def set_results(self, results: str):
"""
This method set the results attribute for the SASdata object; it stays in effect till changed
results - set the default result type for this SASdata object. 'Pandas' or 'HTML' or 'TEXT'.
:param results: format of results, SASsession.results is default, PANDAS, HTML or TEXT are the alternatives
:return: None
"""
if results.upper() == "HTML":
self.HTML = 1
else:
self.HTML = 0
self.results = results
def _is_valid(self):
if self.sas.exist(self.table, self.libref):
return None
else:
msg = "The SAS Data Set that this SASdata object refers to, " + self.libref + '.' + self.table + ", does not exist in this SAS session at this time."
ll = {'LOG': msg, 'LST': msg}
return ll
def _checkLogForError(self, log):
lines = re.split(r'[\n]\s*', log)
for line in lines:
if line[self.sas.logoffset:].startswith('ERROR'):
return (False, line)
return (True, '')
def _returnPD(self, code, tablename, **kwargs):
"""
private function to take a sas code normally to create a table, generate pandas data frame and cleanup.
:param code: string of SAS code
:param tablename: the name of the SAS Data Set
:param kwargs:
:return: Pandas Data Frame
"""
if self.sas.sascfg.pandas:
raise type(self.sas.sascfg.pandas)(self.sas.sascfg.pandas.msg)
libref = kwargs.get('libref','work')
ll = self.sas._io.submit(code)
check, errorMsg = self._checkLogForError(ll['LOG'])
if not check:
raise ValueError("Internal code execution failed: " + errorMsg)
if isinstance(tablename, str):
df = self.sas.sasdata2dataframe(tablename, libref)
self.sas._io.submit("proc delete data=%s.%s; run;" % (libref, tablename))
elif isinstance(tablename, list):
df = dict()
for t in tablename:
# strip leading '_' from names and capitalize for dictionary labels
if self.sas.exist(t, libref):
df[t.replace('_', '').capitalize()] = self.sas.sasdata2dataframe(t, libref)
self.sas._io.submit("proc delete data=%s.%s; run;" % (libref, t))
else:
raise SyntaxError("The tablename must be a string or list %s was submitted" % str(type(tablename)))
return df
def _dsopts(self):
"""
This method builds out data set options clause for this SASdata object: '(where= , keeep=, obs=, ...)'
"""
return self.sas._dsopts(self.dsopts)
def where(self, where: str) -> 'SASdata':
"""
This method returns a clone of the SASdata object, with the where attribute set. The original SASdata object is not affected.
:param where: the where clause to apply
:return: SAS data object
"""
sd = SASdata(self.sas, self.libref, self.table, dsopts=dict(self.dsopts))
sd.HTML = self.HTML
sd.dsopts['where'] = where
return sd
def head(self, obs=5):
"""
display the first n rows of a table
:param obs: the number of rows of the table that you want to display. The default is 5
:return:
"""
topts = dict(self.dsopts)
topts['obs'] = obs
code = "proc print data=" + self.libref + '.' + self.table + self.sas._dsopts(topts) + ";run;"
if self.sas.nosub:
print(code)
return
if self.results.upper() == 'PANDAS':
code = "data _head ; set %s.%s %s; run;" % (self.libref, self.table, self.sas._dsopts(topts))
return self._returnPD(code, '_head')
else:
ll = self._is_valid()
if self.HTML:
if not ll:
ll = self.sas._io.submit(code)
if not self.sas.batch:
self.sas.DISPLAY(self.sas.HTML(ll['LST']))
else:
return ll
else:
if not ll:
ll = self.sas._io.submit(code, "text")
if not self.sas.batch:
print(ll['LST'])
else:
return ll
def tail(self, obs=5):
"""
display the last n rows of a table
:param obs: the number of rows of the table that you want to display. The default is 5
:return:
"""
code = "proc sql;select count(*) format best32. into :lastobs from "
code += self.libref + '.' + self.table + self._dsopts()
code += ";%put lastobs=&lastobs lastobsend=;\nquit;"
nosub = self.sas.nosub
self.sas.nosub = False
le = self._is_valid()
if not le:
ll = self.sas.submit(code, "text")
lastobs = ll['LOG'].rpartition("lastobs=")
lastobs = lastobs[2].partition(" lastobsend=")
lastobs = int(lastobs[0])
else:
lastobs = obs
firstobs = lastobs - (obs - 1)
if firstobs < 1:
firstobs = 1
topts = dict(self.dsopts)
topts['obs'] = lastobs
topts['firstobs'] = firstobs
code = "proc print data=" + self.libref + '.'
code += self.table + self.sas._dsopts(topts) + ";run;"
self.sas.nosub = nosub
if self.sas.nosub:
print(code)
return
if self.results.upper() == 'PANDAS':
code = "data _tail ; set %s.%s %s; run;" % (self.libref, self.table, self.sas._dsopts(topts))
return self._returnPD(code, '_tail')
else:
if self.HTML:
if not le:
ll = self.sas._io.submit(code)
else:
ll = le
if not self.sas.batch:
self.sas.DISPLAY(self.sas.HTML(ll['LST']))
else:
return ll
else:
if not le:
ll = self.sas._io.submit(code, "text")
else:
ll = le
if not self.sas.batch:
print(ll['LST'])
else:
return ll
def obs(self):
"""
return the number of observations for your SASdata object
"""
code = "proc sql;select count(*) format best32. into :lastobs from "
code += self.libref + '.' + self.table + self._dsopts()
code += ";%put lastobs=&lastobs lastobsend=;\nquit;"
if self.sas.nosub:
print(code)
return
le = self._is_valid()
if not le:
ll = self.sas.submit(code, "text")
lastobs = ll['LOG'].rpartition("lastobs=")
lastobs = lastobs[2].partition(" lastobsend=")
lastobs = int(lastobs[0])
else:
print("The SASdata object is not valid. The table doesn't exist in this SAS session at this time.")
lastobs = None
return lastobs
def partition(self, var: str = '', fraction: float = .7, seed: int = 9878, kfold: int = 1,
out: 'SASdata' = None, singleOut: bool = True) -> object:
"""
Partition a sas data object using SRS sampling or if a variable is specified then
stratifying with respect to that variable
:param var: variable(s) for stratification. If multiple then space delimited list
:param fraction: fraction to split
:param seed: random seed
:param kfold: number of k folds
:param out: the SAS data object
:param singleOut: boolean to return single table or seperate tables
:return: Tuples or SAS data object
"""
# loop through for k folds cross-validation
i = 1
# initialize code string so that loops work
code = ''
# Make sure kfold was an integer
try:
k = int(kfold)
except ValueError:
print("Kfold must be an integer")
if out is None:
out_table = self.table
out_libref = self.libref
elif not isinstance(out, str):
out_table = out.table
out_libref = out.libref
else:
try:
out_table = out.split('.')[1]
out_libref = out.split('.')[0]
except IndexError:
out_table = out
out_libref = 'work'
while i <= k:
# get the list of variables
if k == 1:
code += "proc hpsample data=%s.%s %s out=%s.%s %s samppct=%s seed=%s Partition;\n" % (
self.libref, self.table, self._dsopts(), out_libref, out_table, self._dsopts(), fraction * 100,
seed)
else:
seed += 1
code += "proc hpsample data=%s.%s %s out=%s.%s %s samppct=%s seed=%s partition PARTINDNAME=_cvfold%s;\n" % (
self.libref, self.table, self._dsopts(), out_libref, out_table, self._dsopts(), fraction * 100,
seed, i)
# Get variable info for stratified sampling
if len(var) > 0:
if i == 1:
num_string = """
data _null_; file LOG;
d = open('{0}.{1}');
nvars = attrn(d, 'NVARS');
put 'VARLIST=';
do i = 1 to nvars;
vart = vartype(d, i);
var = varname(d, i);
if vart eq 'N' then
put %upcase('var=') var %upcase('varEND=');
end;
put 'VARLISTEND=';
run;
"""
# ignore teach_me_SAS mode to run contents
nosub = self.sas.nosub
self.sas.nosub = False
ll = self.sas.submit(num_string.format(self.libref, self.table + self._dsopts()))
self.sas.nosub = nosub
numlist = []
log = ll['LOG'].rpartition('VARLISTEND=')[0].rpartition('VARLIST=')
for vari in range(log[2].count('VAR=')):
log = log[2].partition('VAR=')[2].partition(' VAREND=')
numlist.append(log[0].strip())
# check if var is in numlist
if isinstance(var, str):
tlist = var.split()
elif isinstance(var, list):
tlist = var
else:
raise SyntaxError("var must be a string or list you submitted: %s" % str(type(var)))
if set(numlist).isdisjoint(tlist):
if isinstance(var, str):
code += "class _character_;\ntarget %s;\nvar _numeric_;\n" % var
else:
code += "class _character_;\ntarget %s;\nvar _numeric_;\n" % " ".join(var)
else:
varlist = [x for x in numlist if x not in tlist]
varlist.extend(["_cvfold%s" % j for j in range(1, i) if k > 1 and i > 1])
code += "class %s _character_;\ntarget %s;\nvar %s;\n" % (var, var, " ".join(varlist))
else:
code += "class _character_;\nvar _numeric_;\n"
code += "run;\n"
i += 1
# split_code is used if singleOut is False it generates the needed SAS code to break up the kfold partition set.
split_code = ''
if not singleOut:
split_code += 'DATA '
for j in range(1, k + 1):
split_code += "\t%s.%s%s_train(drop=_Partind_ _cvfold:)\n" % (out_libref, out_table, j)
split_code += "\t%s.%s%s_score(drop=_Partind_ _cvfold:)\n" % (out_libref, out_table, j)
split_code += ';\n \tset %s.%s;\n' % (out_libref, out_table)
for z in range(1, k + 1):
split_code += "\tif _cvfold%s = 1 or _partind_ = 1 then output %s.%s%s_train;\n" % (z, out_libref, out_table, z)
split_code += "\telse output %s.%s%s_score;\n" % (out_libref, out_table, z)
split_code += 'run;'
runcode = True
if self.sas.nosub:
print(code + '\n\n' + split_code)
runcode = False
ll = self._is_valid()
if ll:
runcode = False
if runcode:
ll = self.sas.submit(code + split_code, "text")
elog = []
for line in ll['LOG'].splitlines():
if line[self.sas.logoffset:].startswith('ERROR'):
elog.append(line)
if len(elog):
raise RuntimeError("\n".join(elog))
if not singleOut:
outTableList = []
if k == 1:
return (self.sas.sasdata(out_table + str(k) + "_train", out_libref, dsopts=self._dsopts()),
self.sas.sasdata(out_table + str(k) + "_score", out_libref, dsopts=self._dsopts()))
for j in range(1, k + 1):
outTableList.append((self.sas.sasdata(out_table + str(j) + "_train", out_libref, dsopts=self._dsopts()),
self.sas.sasdata(out_table + str(j) + "_score", out_libref, dsopts=self._dsopts())))
return outTableList
if out:
if not isinstance(out, str):
return out
else:
return self.sas.sasdata(out_table, out_libref, self.results)
else:
return self
def contents(self):
"""
display metadata about the table. size, number of rows, columns and their data type ...
:return: output
"""
code = "proc contents data=" + self.libref + '.' + self.table + self._dsopts() + ";run;"
if self.sas.nosub:
print(code)
return
ll = self._is_valid()
if self.results.upper() == 'PANDAS':
code = "proc contents data=%s.%s %s ;" % (self.libref, self.table, self._dsopts())
code += "ods output Attributes=work._attributes;"
code += "ods output EngineHost=work._EngineHost;"
code += "ods output Variables=work._Variables;"
code += "ods output Sortedby=work._Sortedby;"
code += "run;"
return self._returnPD(code, ['_attributes', '_EngineHost', '_Variables', '_Sortedby'])
else:
if self.HTML:
if not ll:
ll = self.sas._io.submit(code)
if not self.sas.batch:
self.sas.DISPLAY(self.sas.HTML(ll['LST']))
else:
return ll
else:
if not ll:
ll = self.sas._io.submit(code, "text")
if not self.sas.batch:
print(ll['LST'])
else:
return ll
def columnInfo(self):
"""
display metadata about the table, size, number of rows, columns and their data type
"""
code = "proc contents data=" + self.libref + '.' + self.table + ' ' + self._dsopts() + ";ods select Variables;run;"
if self.sas.nosub:
print(code)
return
if self.results.upper() == 'PANDAS':
code = "proc contents data=%s.%s %s ;ods output Variables=work._variables ;run;" % (self.libref, self.table, self._dsopts())
df = self._returnPD(code, '_variables')
df['Type'] = df['Type'].str.rstrip()
return df
else:
ll = self._is_valid()
if self.HTML:
if not ll:
ll = self.sas._io.submit(code)
if not self.sas.batch:
self.sas.DISPLAY(self.sas.HTML(ll['LST']))
else:
return ll
else:
if not ll:
ll = self.sas._io.submit(code, "text")
if not self.sas.batch:
print(ll['LST'])
else:
return ll
def info(self):
"""
Display the column info on a SAS data object
:return: Pandas data frame
"""
if self.results.casefold() != 'pandas':
print("The info method only works with Pandas results")
return None
info_code = """
data work._statsInfo ;
do rows=0 by 1 while( not last ) ;
set {0}.{1}{2} end=last;
array chrs _character_ ;
array nums _numeric_ ;
array ccounts(999) _temporary_ ;
array ncounts(999) _temporary_ ;
do over chrs;
ccounts(_i_) + missing(chrs) ;
end;
do over nums;
ncounts(_i_) + missing(nums);
end;
end ;
length Variable $32 type $8. ;
Do over chrs;
Type = 'char';
Variable = vname(chrs) ;
N = rows;
Nmiss = ccounts(_i_) ;
Output ;
end ;
Do over nums;
Type = 'numeric';
Variable = vname(nums) ;
N = rows;
Nmiss = ncounts(_i_) ;
if variable ^= 'rows' then output;
end ;
stop;
keep Variable N NMISS Type ;
run;
"""
if self.sas.nosub:
print(info_code.format(self.libref, self.table, self._dsopts()))
return None
df = self._returnPD(info_code.format(self.libref, self.table, self._dsopts()), '_statsInfo')
df = df.iloc[:, :]
df.index.name = None
df.name = None
return df
def describe(self):
"""
display descriptive statistics for the table; summary statistics.
:return:
"""
return self.means()
def means(self):
"""
display descriptive statistics for the table; summary statistics. This is an alias for 'describe'
:return:
"""
dsopts = self._dsopts().partition(';\n\tformat')
code = "proc means data=" + self.libref + '.' + self.table + dsopts[0] + " stackodsoutput n nmiss median mean std min p25 p50 p75 max;"
code += dsopts[1]+dsopts[2]+"run;"
if self.sas.nosub:
print(code)
return
ll = self._is_valid()
if self.results.upper() == 'PANDAS':
code = "proc means data=%s.%s %s stackodsoutput n nmiss median mean std min p25 p50 p75 max; %s ods output Summary=work._summary; run;" % (
self.libref, self.table, dsopts[0], dsopts[1]+dsopts[2])
return self._returnPD(code, '_summary')
else:
if self.HTML:
if not ll:
ll = self.sas._io.submit(code)
if not self.sas.batch:
self.sas.DISPLAY(self.sas.HTML(ll['LST']))
else:
return ll
else:
if not ll:
ll = self.sas._io.submit(code, "text")
if not self.sas.batch:
print(ll['LST'])
else:
return ll
def impute(self, vars: dict, replace: bool = False, prefix: str = 'imp_', out: 'SASdata' = None) -> 'SASdata':
"""
Imputes missing values for a SASdata object.
:param vars: a dictionary in the form of {'varname':'impute type'} or {'impute type':'[var1, var2]'}
:param replace:
:param prefix:
:param out:
:return:
"""
outstr = ''
if out:
if isinstance(out, str):
fn = out.partition('.')
if fn[1] == '.':
out_libref = fn[0]
out_table = fn[2]
else:
out_libref = ''
out_table = fn[0]
else:
out_libref = out.libref
out_table = out.table
outstr = "out=%s.%s" % (out_libref, out_table)
else:
out_table = self.table
out_libref = self.libref
# get list of variables and types
varcode = "data _null_; d = open('" + self.libref + "." + self.table + "');\n"
varcode += "nvars = attrn(d, 'NVARS');\n"
varcode += "put 'VARNUMS=' nvars 'VARNUMS_END=';\n"
varcode += "put 'VARLIST=';\n"
varcode += "do i = 1 to nvars; var = varname(d, i); put %upcase('var=') var %upcase('varEND='); end;\n"
varcode += "put 'TYPELIST=';\n"
varcode += "do i = 1 to nvars; var = vartype(d, i); put %upcase('type=') var %upcase('typeEND='); end;\n"
varcode += "put 'END_ALL_VARS_AND_TYPES=';\n"
varcode += "run;"
ll = self.sas._io.submit(varcode, "text")
l2 = ll['LOG'].rpartition("VARNUMS=")[2].partition("VARNUMS_END=")
nvars = int(float(l2[0].strip()))
varlist = []
log = ll['LOG'].rpartition('TYPELIST=')[0].rpartition('VARLIST=')
for vari in range(log[2].count('VAR=')):
log = log[2].partition('VAR=')[2].partition('VAREND=')
varlist.append(log[0].strip().upper())
typelist = []
log = ll['LOG'].rpartition('END_ALL_VARS_AND_TYPES=')[0].rpartition('TYPELIST=')
for typei in range(log[2].count('VAR=')):
log = log[2].partition('TYPE=')[2].partition('TYPEEND=')
typelist.append(log[0].strip().upper())
varListType = dict(zip(varlist, typelist))
# process vars dictionary to generate code
## setup default statements
sql = "proc sql;\n select\n"
sqlsel = ' %s(%s),\n'
sqlinto = ' into\n'
if len(out_libref)>0 :
ds1 = "data " + out_libref + "." + out_table + "; set " + self.libref + "." + self.table + self._dsopts() + ";\n"
else:
ds1 = "data " + out_table + "; set " + self.libref + "." + self.table + self._dsopts() + ";\n"
dsmiss = 'if missing({0}) then {1} = {2};\n'
if replace:
dsmiss = prefix+'{1} = {0}; if missing({0}) then %s{1} = {2};\n' % prefix
modesql = ''
modeq = "proc sql outobs=1;\n select %s, count(*) as freq into :imp_mode_%s, :imp_mode_freq\n"
modeq += " from %s where %s is not null group by %s order by freq desc, %s;\nquit;\n"
# pop the values key because it needs special treatment
contantValues = vars.pop('value', None)
if contantValues is not None:
if not all(isinstance(x, tuple) for x in contantValues):
raise SyntaxError("The elements in the 'value' key must be tuples")
for t in contantValues:
if varListType.get(t[0].upper()) == "N":
ds1 += dsmiss.format((t[0], t[0], t[1]))
else:
ds1 += dsmiss.format(t[0], t[0], '"' + str(t[1]) + '"')
for key, values in vars.items():
if key.lower() in ['midrange', 'random']:
for v in values:
sql += sqlsel % ('max', v)
sql += sqlsel % ('min', v)
sqlinto += ' :imp_max_' + v + ',\n'
sqlinto += ' :imp_min_' + v + ',\n'
if key.lower() == 'midrange':
ds1 += dsmiss.format(v, v, '(&imp_min_' + v + '.' + ' + ' + '&imp_max_' + v + '.' + ') / 2')
elif key.lower() == 'random':
# random * (max - min) + min
ds1 += dsmiss.format(v, v, '(&imp_max_' + v + '.' + ' - ' + '&imp_min_' + v + '.' + ') * ranuni(0)' + '+ &imp_min_' + v + '.')
else:
raise SyntaxError("This should not happen!!!!")
else:
for v in values:
sql += sqlsel % (key, v)
sqlinto += ' :imp_' + v + ',\n'
if key.lower == 'mode':
modesql += modeq % (v, v, self.libref + "." + self.table + self._dsopts() , v, v, v)
if varListType.get(v.upper()) == "N":
ds1 += dsmiss.format(v, v, '&imp_' + v + '.')
else:
ds1 += dsmiss.format(v, v, '"&imp_' + v + '."')
if len(sql) > 20:
sql = sql.rstrip(', \n') + '\n' + sqlinto.rstrip(', \n') + '\n from ' + self.libref + '.' + self.table + self._dsopts() + ';\nquit;\n'
else:
sql = ''
ds1 += 'run;\n'
if self.sas.nosub:
print(modesql + sql + ds1)
return None
ll = self.sas.submit(modesql + sql + ds1)
return self.sas.sasdata(out_table, libref=out_libref, results=self.results, dsopts=self._dsopts())
def sort(self, by: str, out: object = '', **kwargs) -> 'SASdata':
"""
Sort the SAS Data Set
:param by: REQUIRED variable to sort by (BY <DESCENDING> variable-1 <<DESCENDING> variable-2 ...>;)
:param out: OPTIONAL takes either a string 'libref.table' or 'table' which will go to WORK or USER
if assigned or a sas data object'' will sort in place if allowed
:param kwargs:
:return: SASdata object if out= not specified, or a new SASdata object for out= when specified
:Example:
#. wkcars.sort('type')
#. wkcars2 = sas.sasdata('cars2')
#. wkcars.sort('cylinders', wkcars2)
#. cars2=cars.sort('DESCENDING origin', out='foobar')
#. cars.sort('type').head()
#. stat_results = stat.reg(model='horsepower = Cylinders EngineSize', by='type', data=wkcars.sort('type'))
#. stat_results2 = stat.reg(model='horsepower = Cylinders EngineSize', by='type', data=wkcars.sort('type','work.cars'))
"""
outstr = ''
options = ''
if out:
if isinstance(out, str):
fn = out.partition('.')
if fn[1] == '.':
libref = fn[0]
table = fn[2]
outstr = "out=%s.%s" % (libref, table)
else:
libref = ''
table = fn[0]
outstr = "out=" + table
else:
libref = out.libref
table = out.table
outstr = "out=%s.%s" % (out.libref, out.table)
if 'options' in kwargs:
options = kwargs['options']
code = "proc sort data=%s.%s%s %s %s ;\n" % (self.libref, self.table, self._dsopts(), outstr, options)
code += "by %s;" % by
code += "run\n;"
runcode = True
if self.sas.nosub:
print(code)
runcode = False
ll = self._is_valid()
if ll:
runcode = False
if runcode:
ll = self.sas.submit(code, "text")
elog = []
for line in ll['LOG'].splitlines():
if line[self.sas.logoffset:].startswith('ERROR'):
elog.append(line)
if len(elog):
raise RuntimeError("\n".join(elog))
if out:
if not isinstance(out, str):
return out
else:
return self.sas.sasdata(table, libref, self.results)
else:
return self
def add_vars(self, vars: dict, out: object = None, **kwargs) -> 'SASLOG':
"""
Copy table to itesf, or to 'out=' table and add any vars if you want
:param vars: REQUIRED dictionayr of variable names (keys) and assignment statement (values)
to maintain variable order use collections.OrderedDict Assignment statements must be valid
SAS assignment expressions.
:param out: OPTIONAL takes a SASdata Object you create ahead of time. If not specified, replaces the existing table
and the current SAS data object still refers to the replacement table.
:param kwargs:
:return: SAS Log showing what happened
:Example:
#. cars = sas.sasdata('cars', 'sashelp')
#. wkcars = sas.sasdata('cars')
#. cars.add_vars({'PW_ratio': 'weight / horsepower', 'Overhang' : 'length - wheelbase'}, wkcars)
#. wkcars.head()
"""
if out is not None:
if not isinstance(out, SASdata):
print("out= needs to be a SASdata object")
return None
else:
outtab = out.libref + '.' + out.table + out._dsopts()
else:
outtab = self.libref + '.' + self.table + self._dsopts()
code = "data "+outtab+"; set " + self.libref + '.' + self.table + self._dsopts() + ";\n"
for key in vars.keys():
code += key+" = "+vars[key]+";\n"
code += "; run;"
if self.sas.nosub:
print(code)
return
ll = self._is_valid()
if not ll:
ll = self.sas._io.submit(code, "text")
if not self.sas.batch:
print(ll['LOG'])
else:
return ll
def assessModel(self, target: str, prediction: str, nominal: bool = True, event: str = '', **kwargs):
"""
This method will calculate assessment measures using the SAS AA_Model_Eval Macro used for SAS Enterprise Miner.
Not all datasets can be assessed. This is designed for scored data that includes a target and prediction columns
TODO: add code example of build, score, and then assess
:param target: string that represents the target variable in the data
:param prediction: string that represents the numeric prediction column in the data. For nominal targets this should a probability between (0,1).
:param nominal: boolean to indicate if the Target Variable is nominal because the assessment measures are different.
:param event: string which indicates which value of the nominal target variable is the event vs non-event
:param kwargs:
:return: SAS result object
"""
# submit autocall macro
self.sas.submit("%aamodel;")
objtype = "datastep"
objname = '{s:{c}^{n}}'.format(s=self.table[:3], n=3,
c='_') + self.sas._objcnt() # translate to a libname so needs to be less than 8
code = "%macro proccall(d);\n"
# build parameters
score_table = str(self.libref + '.' + self.table)
binstats = str(objname + '.' + "ASSESSMENTSTATISTICS")
out = str(objname + '.' + "ASSESSMENTBINSTATISTICS")
level = 'interval'
# var = 'P_' + target
if nominal:
level = 'class'
# the user didn't specify the event for a nominal Give them the possible choices
try:
if len(event) < 1:
raise Exception(event)
except Exception:
print("No event was specified for a nominal target. Here are possible options:\n")
event_code = "proc hpdmdb data=%s.%s %s classout=work._DMDBCLASSTARGET(keep=name nraw craw level frequency nmisspercent);" % (
self.libref, self.table, self._dsopts())
event_code += "\nclass %s ; \nrun;" % target
event_code += "data _null_; set work._DMDBCLASSTARGET; where ^(NRAW eq . and CRAW eq '') and lowcase(name)=lowcase('%s');" % target
ec = self.sas._io.submit(event_code)
HTML(ec['LST'])
# TODO: Finish output of the list of nominals variables
if nominal:
code += "%%aa_model_eval(DATA=%s%s, TARGET=%s, VAR=%s, level=%s, BINSTATS=%s, bins=100, out=%s, EVENT=%s);" \
% (score_table, self._dsopts(), target, prediction, level, binstats, out, event)
else:
code += "%%aa_model_eval(DATA=%s%s, TARGET=%s, VAR=%s, level=%s, BINSTATS=%s, bins=100, out=%s);" \
% (score_table, self._dsopts(), target, prediction, level, binstats, out)
rename_char = """
data {0};
set {0};
if level in ("INTERVAL", "INT") then do;
rename _sse_ = SumSquaredError
_div_ = Divsor
_ASE_ = AverageSquaredError
_RASE_ = RootAverageSquaredError
_MEANP_ = MeanPredictionValue
_STDP_ = StandardDeviationPrediction
_CVP_ = CoefficientVariationPrediction;
end;
else do;
rename CR = MaxClassificationRate
KSCut = KSCutOff
CRDEPTH = MaxClassificationDepth
MDepth = MedianClassificationDepth
MCut = MedianEventDetectionCutOff
CCut = ClassificationCutOff
_misc_ = MisClassificationRate;
end;
run;
"""
code += rename_char.format(binstats)
if nominal:
# TODO: add graphics code here to return to the SAS results object
graphics ="""
ODS PROCLABEL='ERRORPLOT' ;
proc sgplot data={0};
title "Error and Correct rate by Depth";
series x=depth y=correct_rate;
series x=depth y=error_rate;
yaxis label="Percentage" grid;
run;
/* roc chart */
ODS PROCLABEL='ROCPLOT' ;
proc sgplot data={0};
title "ROC Curve";
series x=one_minus_specificity y=sensitivity;
yaxis grid;
run;
/* Lift and Cumulative Lift */
ODS PROCLABEL='LIFTPLOT' ;
proc sgplot data={0};
Title "Lift and Cumulative Lift";
series x=depth y=c_lift;
series x=depth y=lift;
yaxis grid;
run;
"""
code += graphics.format(out)
code += "run; quit; %mend;\n"
code += "%%mangobj(%s,%s,%s);" % (objname, objtype, self.table)
if self.sas.nosub:
print(code)
return
ll = self.sas.submit(code, 'text')
obj1 = sp2.SASProcCommons._objectmethods(self, objname)
return sp2.SASresults(obj1, self.sas, objname, self.sas.nosub, ll['LOG'])
def to_csv(self, file: str, opts: dict = None) -> str:
"""
This method will export a SAS Data Set to a file in CSV format.
:param file: the OS filesystem path of the file to be created (exported from this SAS Data Set)
:return:
"""
opts = opts if opts is not None else {}
ll = self._is_valid()
if ll:
if not self.sas.batch:
print(ll['LOG'])
else:
return ll
else:
return self.sas.write_csv(file, self.table, self.libref, self.dsopts, opts)
def score(self, file: str = '', code: str = '', out: 'SASdata' = None) -> 'SASdata':
"""
This method is meant to update a SAS Data object with a model score file.
:param file: a file reference to the SAS score code
:param code: a string of the valid SAS score code
:param out: Where to the write the file. Defaults to update in place
:return: The Scored SAS Data object.
"""
if out is not None:
outTable = out.table
outLibref = out.libref
else:
outTable = self.table
outLibref = self.libref
codestr = code
code = "data %s.%s%s;" % (outLibref, outTable, self._dsopts())
code += "set %s.%s%s;" % (self.libref, self.table, self._dsopts())
if len(file)>0:
code += '%%include "%s";' % file
else:
code += "%s;" %codestr
code += "run;"
if self.sas.nosub:
print(code)
return None
ll = self._is_valid()
if not ll:
html = self.HTML
self.HTML = 1
ll = self.sas._io.submit(code)
self.HTML = html
if not self.sas.batch:
self.sas.DISPLAY(self.sas.HTML(ll['LST']))
else:
return ll
def to_frame(self, **kwargs) -> 'pd.DataFrame':
"""
Export this SAS Data Set to a Pandas Data Frame
:param kwargs:
:return: Pandas data frame
:rtype: 'pd.DataFrame'
"""
return self.to_df(**kwargs)
def to_df(self, method: str = 'MEMORY', **kwargs) -> 'pd.DataFrame':
"""
Export this SAS Data Set to a Pandas Data Frame
:param method: defaults to MEMORY; the original method. CSV is the other choice which uses an intermediary csv file; faster for large data
:param kwargs:
:return: Pandas data frame
"""
ll = self._is_valid()
if ll:
print(ll['LOG'])
return None
else:
if self.sas.sascfg.pandas:
raise type(self.sas.sascfg.pandas)(self.sas.sascfg.pandas.msg)
return self.sas.sasdata2dataframe(self.table, self.libref, self.dsopts, method, **kwargs)
def to_df_CSV(self, tempfile: str=None, tempkeep: bool=False, **kwargs) -> 'pd.DataFrame':
"""
Export this SAS Data Set to a Pandas Data Frame via CSV file
:param tempfile: [optional] an OS path for a file to use for the local CSV file; default it a temporary file that's cleaned up
:param tempkeep: if you specify your own file to use with tempfile=, this controls whether it's cleaned up after using it
:param kwargs:
:return: Pandas data frame
:rtype: 'pd.DataFrame'
"""
return self.to_df(method='CSV', tempfile=tempfile, tempkeep=tempkeep, **kwargs)
def to_json(self, pretty: bool = False, sastag: bool = False, **kwargs) -> str:
"""
Export this SAS Data Set to a JSON Object
PROC JSON documentation: http://go.documentation.sas.com/?docsetId=proc&docsetVersion=9.4&docsetTarget=p06hstivs0b3hsn1cb4zclxukkut.htm&locale=en
:param pretty: boolean False return JSON on one line True returns formatted JSON
:param sastag: include SAS meta tags
:param kwargs:
:return: JSON str
"""
code = "filename file1 temp;\n"
code += "proc json out=file1"
if pretty:
code += " pretty "
if not sastag:
code += " nosastags "
code +=";\n export %s.%s %s;\n run;" % (self.libref, self.table, self._dsopts())
if self.sas.nosub:
print(code)
return None
ll = self._is_valid()
runcode = True
if ll:
runcode = False
if runcode:
ll = self.sas.submit(code, "text")
elog = []
fpath=''
for line in ll['LOG'].splitlines():
if line[self.sas.logoffset:].startswith('JSONFilePath:'):
fpath = line[14:]
if line[self.sas.logoffset:].startswith('ERROR'):
elog.append(line)
if len(elog):
raise RuntimeError("\n".join(elog))
if len(fpath):
with open(fpath, 'r') as myfile:
json_str = myfile.read()
return json_str
def heatmap(self, x: str, y: str, options: str = '', title: str = '',
label: str = '') -> object:
"""
Documentation link: http://support.sas.com/documentation/cdl/en/grstatproc/67909/HTML/default/viewer.htm#n0w12m4cn1j5c6n12ak64u1rys4w.htm
:param x: x variable
:param y: y variable
:param options: display options (string)
:param title: graph title
:param label:
:return:
"""
code = "proc sgplot data=%s.%s %s;" % (self.libref, self.table, self._dsopts())
if len(options):
code += "\n\theatmap x='%s'n y='%s'n / %s;" % (x, y, options)
else:
code += "\n\theatmap x='%s'n y='%s'n;" % (x, y)
if len(label) > 0:
code += " LegendLABEL='" + label + "'"
code += ";\n"
if len(title) > 0:
code += "\ttitle '%s';\n" % title
code += "run;\ntitle;"
if self.sas.nosub:
print(code)
return
ll = self._is_valid()
if not ll:
html = self.HTML
self.HTML = 1
ll = self.sas._io.submit(code)
self.HTML = html
if not self.sas.batch:
self.sas.DISPLAY(self.sas.HTML(ll['LST']))
else:
return ll
def hist(self, var: str, title: str = '',
label: str = '') -> object:
"""
This method requires a numeric column (use the contents method to see column types) and generates a histogram.
:param var: the NUMERIC variable (column) you want to plot
:param title: an optional Title for the chart
:param label: LegendLABEL= value for sgplot
:return:
"""
code = "proc sgplot data=" + self.libref + '.' + self.table + self._dsopts()
code += ";\n\thistogram '" + var + "'n / scale=count"
if len(label) > 0:
code += " LegendLABEL='" + label + "'"
code += ";\n"
if len(title) > 0:
code += '\ttitle "' + title + '";\n'
code += "\tdensity '" + var + "'n;\nrun;\n" + "title;"
if self.sas.nosub:
print(code)
return
ll = self._is_valid()
if not ll:
html = self.HTML
self.HTML = 1
ll = self.sas._io.submit(code)
self.HTML = html
if not self.sas.batch:
self.sas.DISPLAY(self.sas.HTML(ll['LST']))
else:
return ll
def top(self, var: str, n: int = 10, order: str = 'freq', title: str = '') -> object:
"""
Return the most commonly occuring items (levels)
:param var: the CHAR variable (column) you want to count
:param n: the top N to be displayed (defaults to 10)
:param order: default to most common use order='data' to get then in alphbetic order
:param title: an optional Title for the chart
:return: Data Table
"""
code = "proc freq data=%s.%s %s order=%s noprint;" % (self.libref, self.table, self._dsopts(), order)
code += "\n\ttables '%s'n / out=tmpFreqOut;" % var
code += "\nrun;"
if len(title) > 0:
code += '\ttitle "' + title + '";\n'
code += "proc print data=tmpFreqOut(obs=%s); \nrun;" % n
code += 'title;'
if self.sas.nosub:
print(code)
return
ll = self._is_valid()
if self.results.upper() == 'PANDAS':
code = "proc freq data=%s.%s%s order=%s noprint;" % (self.libref, self.table, self._dsopts(), order)
code += "\n\ttables '%s'n / out=tmpFreqOut;" % var
code += "\nrun;"
code += "\ndata tmpFreqOut; set tmpFreqOut(obs=%s); run;" % n
return self._returnPD(code, 'tmpFreqOut')
else:
if self.HTML:
if not ll:
ll = self.sas._io.submit(code)
if not self.sas.batch:
self.sas.DISPLAY(self.sas.HTML(ll['LST']))
else:
return ll
else:
if not ll:
ll = self.sas._io.submit(code, "text")
if not self.sas.batch:
print(ll['LST'])
else:
return ll
def bar(self, var: str, title: str = '', label: str = '') -> object:
"""
This method requires a character column (use the contents method to see column types)
and generates a bar chart.
:param var: the CHAR variable (column) you want to plot
:param title: an optional title for the chart
:param label: LegendLABEL= value for sgplot
:return: graphic plot
"""
code = "proc sgplot data=" + self.libref + '.' + self.table + self._dsopts()
code += ";\n\tvbar '" + var + "'n"
if len(label) > 0:
code += " / LegendLABEL='" + label + "'"
code += ";\n"
if len(title) > 0:
code += '\ttitle "' + title + '";\n'
code += 'run;\ntitle;'
if self.sas.nosub:
print(code)
return
ll = self._is_valid()
if not ll:
html = self.HTML
self.HTML = 1
ll = self.sas._io.submit(code)
self.HTML = html
if not self.sas.batch:
self.sas.DISPLAY(self.sas.HTML(ll['LST']))
else:
return ll
def series(self, x: str, y: list, title: str = '') -> object:
"""
This method plots a series of x,y coordinates. You can provide a list of y columns for multiple line plots.
:param x: the x axis variable; generally a time or continuous variable.
:param y: the y axis variable(s), you can specify a single column or a list of columns
:param title: an optional Title for the chart
:return: graph object
"""
code = "proc sgplot data=" + self.libref + '.' + self.table + self._dsopts() + ";\n"
if len(title) > 0:
code += '\ttitle "' + title + '";\n'
if isinstance(y, list):
num = len(y)
else:
num = 1
y = [y]
for i in range(num):
code += "\tseries x='" + x + "'n y='" + str(y[i]) + "'n;\n"
code += 'run;\n' + 'title;'
if self.sas.nosub:
print(code)
return
ll = self._is_valid()
if not ll:
html = self.HTML
self.HTML = 1
ll = self.sas._io.submit(code)
self.HTML = html
if not self.sas.batch:
self.sas.DISPLAY(self.sas.HTML(ll['LST']))
else:
return ll
def scatter(self, x: str, y: list, title: str = '') -> object:
"""
This method plots a scatter of x,y coordinates. You can provide a list of y columns for multiple line plots.
:param x: the x axis variable; generally a time or continuous variable.
:param y: the y axis variable(s), you can specify a single column or a list of columns
:param title: an optional Title for the chart
:return: graph object
"""
code = "proc sgplot data=" + self.libref + '.' + self.table + self._dsopts() + ";\n"
if len(title) > 0:
code += '\ttitle "' + title + '";\n'
if isinstance(y, list):
num = len(y)
else:
num = 1
y = [y]
for i in range(num):
code += "\tscatter x='" + x + "'n y='" + y[i] + "'n;\n"
code += 'run;\n' + 'title;'
if self.sas.nosub:
print(code)
return
ll = self._is_valid()
if not ll:
html = self.HTML
self.HTML = 1
ll = self.sas._io.submit(code)
self.HTML = html
if not self.sas.batch:
self.sas.DISPLAY(self.sas.HTML(ll['LST']))
else:
return ll
| 38.832838 | 161 | 0.499244 | [
"Apache-2.0"
] | kjnh10/saspy | saspy/sasdata.py | 52,269 | Python |
import re
from model.contact import Contact
def test_firstname_on_home_page(app, check_ui):
contact_from_home_page = app.contact.get_contact_list()[0]
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0)
assert contact_from_home_page.firstname == merge_firstname_like_on_home_page(contact_from_edit_page)
if check_ui:
assert sorted(contact_from_home_page.firstname, key=Contact.id_or_max) == sorted(merge_firstname_like_on_home_page(contact_from_edit_page), key=Contact.id_or_max)
def test_firstname_on_contact_view_page(app):
contact_from_view_page = app.contact.get_contact_from_view_page(0)
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0)
assert contact_from_view_page.firstname == contact_from_edit_page.firstname
def clear(s):
return re.sub("[\n\s+$]", "", s)
def merge_firstname_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "", (map(lambda x: clear(x), [contact.firstname])))) | 45.318182 | 170 | 0.792377 | [
"Apache-2.0"
] | 200312/python_training | test/test_name.py | 997 | Python |
from flask import jsonify
from flask_restful import Resource
class SKU(Resource):
def get(self):
return jsonify(
{}
)
| 13.909091 | 34 | 0.601307 | [
"MIT"
] | VadymHutei/ukubuka-back | app/resources/sku.py | 153 | Python |
# Given a binary tree, check whether it is a mirror of itself (ie, symmetric around its center).
# For example, this binary tree is symmetric:
1
/ \
2 2
/ \ / \
3 4 4 3
But the following is not:
1
/ \
2 2
\ \
3 3
class Solution(object):
def help(self, el, r):
if el == None and r == None:
return True
if el and r and el.val == r.val:
return self.help(el.right, r.left) and self.help(el.left, r.right)
return False
def isSymmetric(self, root):
if root:
return self.help(root.left, root.right)
return True | 21.793103 | 96 | 0.550633 | [
"MIT"
] | SakiFu/leetcode | python/symmetric_tree.py | 632 | Python |
"""SqlAlchemy models."""
import datetime
from blog.extensions import db
from blog.category.models import Category
TITLE_LEN = 255
URL_LEN = 255
POST_STATUSES = {
0: 'Draft',
1: 'Page',
2: 'Archive',
3: 'Special',
4: 'Published',
}
class Post(db.Model):
"""orm model for blog post."""
__tablename__ = 'posts'
id = db.Column(db.Integer, primary_key=True)
pagetitle = db.Column(db.String(TITLE_LEN), default='')
alias = db.Column(db.String(TITLE_LEN), unique=True, nullable=False)
content = db.Column(db.Text)
createdon = db.Column(db.DateTime, default=datetime.datetime.now)
publishedon = db.Column(db.DateTime, default=datetime.datetime.now)
status = db.Column(db.Integer, default=0)
bg = db.Column(db.String(URL_LEN), default='')
category_id = db.Column(db.Integer, db.ForeignKey('categories.id'))
category = db.relationship(Category, backref="Post")
tags = db.relationship("Tag", secondary="posts_tags")
def __str__(self):
return f'{self.pagetitle}'
| 28.162162 | 72 | 0.675624 | [
"MIT"
] | gunlinux/gunlinux.org | blog/post/models.py | 1,042 | Python |
# -*- coding: utf-8 -*-
"""
Module entry point.
------------------------------------------------------------------------------
This file is part of grepros - grep for ROS bag files and live topics.
Released under the BSD License.
@author Erki Suurjaak
@created 24.10.2021
@modified 02.11.2021
------------------------------------------------------------------------------
"""
from . import main
if "__main__" == __name__:
main.run()
| 25.111111 | 78 | 0.429204 | [
"BSD-3-Clause"
] | suurjaak/grepros | src/grepros/__main__.py | 452 | Python |
"""
This module contains functions to:
- solve a single equation for a single variable, in any domain either real or complex.
- solve a system of linear equations with N variables and M equations.
- solve a system of Non Linear Equations with N variables and M equations
"""
from __future__ import print_function, division
from sympy.core.sympify import sympify
from sympy.core import S, Pow, Dummy, pi, Expr, Wild, Mul, Equality
from sympy.core.numbers import I, Number, Rational, oo
from sympy.core.function import (Lambda, expand, expand_complex)
from sympy.core.relational import Eq
from sympy.simplify.simplify import simplify, fraction, trigsimp
from sympy.core.symbol import Symbol
from sympy.functions import (log, Abs, tan, cot, sin, cos, sec, csc, exp,
acos, asin, acsc, asec, arg,
piecewise_fold)
from sympy.functions.elementary.trigonometric import (TrigonometricFunction,
HyperbolicFunction)
from sympy.functions.elementary.miscellaneous import real_root
from sympy.sets import (FiniteSet, EmptySet, imageset, Interval, Intersection,
Union, ConditionSet, ImageSet, Complement)
from sympy.matrices import Matrix
from sympy.polys import (roots, Poly, degree, together, PolynomialError,
RootOf)
from sympy.solvers.solvers import checksol, denoms, unrad, _simple_dens
from sympy.solvers.polysys import solve_poly_system
from sympy.solvers.inequalities import solve_univariate_inequality
from sympy.utilities import filldedent
from sympy.calculus.util import periodicity
from sympy.core.compatibility import ordered, default_sort_key
def _invert(f_x, y, x, domain=S.Complexes):
"""
Reduce the complex valued equation ``f(x) = y`` to a set of equations
``{g(x) = h_1(y), g(x) = h_2(y), ..., g(x) = h_n(y) }`` where ``g(x)`` is
a simpler function than ``f(x)``. The return value is a tuple ``(g(x),
set_h)``, where ``g(x)`` is a function of ``x`` and ``set_h`` is
the set of function ``{h_1(y), h_2(y), ..., h_n(y)}``.
Here, ``y`` is not necessarily a symbol.
The ``set_h`` contains the functions along with the information
about their domain in which they are valid, through set
operations. For instance, if ``y = Abs(x) - n``, is inverted
in the real domain, then, the ``set_h`` doesn't simply return
`{-n, n}`, as the nature of `n` is unknown; rather it will return:
`Intersection([0, oo) {n}) U Intersection((-oo, 0], {-n})`
By default, the complex domain is used but note that inverting even
seemingly simple functions like ``exp(x)`` can give very different
result in the complex domain than are obtained in the real domain.
(In the case of ``exp(x)``, the inversion via ``log`` is multi-valued
in the complex domain, having infinitely many branches.)
If you are working with real values only (or you are not sure which
function to use) you should probably use set the domain to
``S.Reals`` (or use `invert\_real` which does that automatically).
Examples
========
>>> from sympy.solvers.solveset import invert_complex, invert_real
>>> from sympy.abc import x, y
>>> from sympy import exp, log
When does exp(x) == y?
>>> invert_complex(exp(x), y, x)
(x, ImageSet(Lambda(_n, I*(2*_n*pi + arg(y)) + log(Abs(y))), Integers()))
>>> invert_real(exp(x), y, x)
(x, Intersection((-oo, oo), {log(y)}))
When does exp(x) == 1?
>>> invert_complex(exp(x), 1, x)
(x, ImageSet(Lambda(_n, 2*_n*I*pi), Integers()))
>>> invert_real(exp(x), 1, x)
(x, {0})
See Also
========
invert_real, invert_complex
"""
x = sympify(x)
if not x.is_Symbol:
raise ValueError("x must be a symbol")
f_x = sympify(f_x)
if not f_x.has(x):
raise ValueError("Inverse of constant function doesn't exist")
y = sympify(y)
if y.has(x):
raise ValueError("y should be independent of x ")
if domain.is_subset(S.Reals):
x, s = _invert_real(f_x, FiniteSet(y), x)
else:
x, s = _invert_complex(f_x, FiniteSet(y), x)
return x, s.intersection(domain) if isinstance(s, FiniteSet) else s
invert_complex = _invert
def invert_real(f_x, y, x, domain=S.Reals):
"""
Inverts a real-valued function. Same as _invert, but sets
the domain to ``S.Reals`` before inverting.
"""
return _invert(f_x, y, x, domain)
def _invert_real(f, g_ys, symbol):
"""Helper function for _invert."""
if f == symbol:
return (f, g_ys)
n = Dummy('n', real=True)
if hasattr(f, 'inverse') and not isinstance(f, (
TrigonometricFunction,
HyperbolicFunction,
)):
if len(f.args) > 1:
raise ValueError("Only functions with one argument are supported.")
return _invert_real(f.args[0],
imageset(Lambda(n, f.inverse()(n)), g_ys),
symbol)
if isinstance(f, Abs):
pos = Interval(0, S.Infinity)
neg = Interval(S.NegativeInfinity, 0)
return _invert_real(f.args[0],
Union(imageset(Lambda(n, n), g_ys).intersect(pos),
imageset(Lambda(n, -n), g_ys).intersect(neg)), symbol)
if f.is_Add:
# f = g + h
g, h = f.as_independent(symbol)
if g is not S.Zero:
return _invert_real(h, imageset(Lambda(n, n - g), g_ys), symbol)
if f.is_Mul:
# f = g*h
g, h = f.as_independent(symbol)
if g is not S.One:
return _invert_real(h, imageset(Lambda(n, n/g), g_ys), symbol)
if f.is_Pow:
base, expo = f.args
base_has_sym = base.has(symbol)
expo_has_sym = expo.has(symbol)
if not expo_has_sym:
res = imageset(Lambda(n, real_root(n, expo)), g_ys)
if expo.is_rational:
numer, denom = expo.as_numer_denom()
if numer == S.One or numer == - S.One:
return _invert_real(base, res, symbol)
else:
if numer % 2 == 0:
n = Dummy('n')
neg_res = imageset(Lambda(n, -n), res)
return _invert_real(base, res + neg_res, symbol)
else:
return _invert_real(base, res, symbol)
else:
if not base.is_positive:
raise ValueError("x**w where w is irrational is not "
"defined for negative x")
return _invert_real(base, res, symbol)
if not base_has_sym:
return _invert_real(expo,
imageset(Lambda(n, log(n)/log(base)), g_ys), symbol)
if isinstance(f, TrigonometricFunction):
if isinstance(g_ys, FiniteSet):
def inv(trig):
if isinstance(f, (sin, csc)):
F = asin if isinstance(f, sin) else acsc
return (lambda a: n*pi + (-1)**n*F(a),)
if isinstance(f, (cos, sec)):
F = acos if isinstance(f, cos) else asec
return (
lambda a: 2*n*pi + F(a),
lambda a: 2*n*pi - F(a),)
if isinstance(f, (tan, cot)):
return (lambda a: n*pi + f.inverse()(a),)
n = Dummy('n', integer=True)
invs = S.EmptySet
for L in inv(f):
invs += Union(*[imageset(Lambda(n, L(g)), S.Integers) for g in g_ys])
return _invert_real(f.args[0], invs, symbol)
return (f, g_ys)
def _invert_complex(f, g_ys, symbol):
"""Helper function for _invert."""
if f == symbol:
return (f, g_ys)
n = Dummy('n')
if f.is_Add:
# f = g + h
g, h = f.as_independent(symbol)
if g is not S.Zero:
return _invert_complex(h, imageset(Lambda(n, n - g), g_ys), symbol)
if f.is_Mul:
# f = g*h
g, h = f.as_independent(symbol)
if g is not S.One:
return _invert_complex(h, imageset(Lambda(n, n/g), g_ys), symbol)
if hasattr(f, 'inverse') and \
not isinstance(f, TrigonometricFunction) and \
not isinstance(f, exp):
if len(f.args) > 1:
raise ValueError("Only functions with one argument are supported.")
return _invert_complex(f.args[0],
imageset(Lambda(n, f.inverse()(n)), g_ys), symbol)
if isinstance(f, exp):
if isinstance(g_ys, FiniteSet):
exp_invs = Union(*[imageset(Lambda(n, I*(2*n*pi + arg(g_y)) +
log(Abs(g_y))), S.Integers)
for g_y in g_ys if g_y != 0])
return _invert_complex(f.args[0], exp_invs, symbol)
return (f, g_ys)
def domain_check(f, symbol, p):
"""Returns False if point p is infinite or any subexpression of f
is infinite or becomes so after replacing symbol with p. If none of
these conditions is met then True will be returned.
Examples
========
>>> from sympy import Mul, oo
>>> from sympy.abc import x
>>> from sympy.solvers.solveset import domain_check
>>> g = 1/(1 + (1/(x + 1))**2)
>>> domain_check(g, x, -1)
False
>>> domain_check(x**2, x, 0)
True
>>> domain_check(1/x, x, oo)
False
* The function relies on the assumption that the original form
of the equation has not been changed by automatic simplification.
>>> domain_check(x/x, x, 0) # x/x is automatically simplified to 1
True
* To deal with automatic evaluations use evaluate=False:
>>> domain_check(Mul(x, 1/x, evaluate=False), x, 0)
False
"""
f, p = sympify(f), sympify(p)
if p.is_infinite:
return False
return _domain_check(f, symbol, p)
def _domain_check(f, symbol, p):
# helper for domain check
if f.is_Atom and f.is_finite:
return True
elif f.subs(symbol, p).is_infinite:
return False
else:
return all([_domain_check(g, symbol, p)
for g in f.args])
def _is_finite_with_finite_vars(f, domain=S.Complexes):
"""
Return True if the given expression is finite. For symbols that
don't assign a value for `complex` and/or `real`, the domain will
be used to assign a value; symbols that don't assign a value
for `finite` will be made finite. All other assumptions are
left unmodified.
"""
def assumptions(s):
A = s.assumptions0
if A.get('finite', None) is None:
A['finite'] = True
A.setdefault('complex', True)
A.setdefault('real', domain.is_subset(S.Reals))
return A
reps = {s: Dummy(**assumptions(s)) for s in f.free_symbols}
return f.xreplace(reps).is_finite
def _is_function_class_equation(func_class, f, symbol):
""" Tests whether the equation is an equation of the given function class.
The given equation belongs to the given function class if it is
comprised of functions of the function class which are multiplied by
or added to expressions independent of the symbol. In addition, the
arguments of all such functions must be linear in the symbol as well.
Examples
========
>>> from sympy.solvers.solveset import _is_function_class_equation
>>> from sympy import tan, sin, tanh, sinh, exp
>>> from sympy.abc import x
>>> from sympy.functions.elementary.trigonometric import (TrigonometricFunction,
... HyperbolicFunction)
>>> _is_function_class_equation(TrigonometricFunction, exp(x) + tan(x), x)
False
>>> _is_function_class_equation(TrigonometricFunction, tan(x) + sin(x), x)
True
>>> _is_function_class_equation(TrigonometricFunction, tan(x**2), x)
False
>>> _is_function_class_equation(TrigonometricFunction, tan(x + 2), x)
True
>>> _is_function_class_equation(HyperbolicFunction, tanh(x) + sinh(x), x)
True
"""
if f.is_Mul or f.is_Add:
return all(_is_function_class_equation(func_class, arg, symbol)
for arg in f.args)
if f.is_Pow:
if not f.exp.has(symbol):
return _is_function_class_equation(func_class, f.base, symbol)
else:
return False
if not f.has(symbol):
return True
if isinstance(f, func_class):
try:
g = Poly(f.args[0], symbol)
return g.degree() <= 1
except PolynomialError:
return False
else:
return False
def _solve_as_rational(f, symbol, domain):
""" solve rational functions"""
f = together(f, deep=True)
g, h = fraction(f)
if not h.has(symbol):
return _solve_as_poly(g, symbol, domain)
else:
valid_solns = _solveset(g, symbol, domain)
invalid_solns = _solveset(h, symbol, domain)
return valid_solns - invalid_solns
def _solve_trig(f, symbol, domain):
""" Helper to solve trigonometric equations """
f = trigsimp(f)
f_original = f
f = f.rewrite(exp)
f = together(f)
g, h = fraction(f)
y = Dummy('y')
g, h = g.expand(), h.expand()
g, h = g.subs(exp(I*symbol), y), h.subs(exp(I*symbol), y)
if g.has(symbol) or h.has(symbol):
return ConditionSet(symbol, Eq(f, 0), S.Reals)
solns = solveset_complex(g, y) - solveset_complex(h, y)
if isinstance(solns, FiniteSet):
result = Union(*[invert_complex(exp(I*symbol), s, symbol)[1]
for s in solns])
return Intersection(result, domain)
elif solns is S.EmptySet:
return S.EmptySet
else:
return ConditionSet(symbol, Eq(f_original, 0), S.Reals)
def _solve_as_poly(f, symbol, domain=S.Complexes):
"""
Solve the equation using polynomial techniques if it already is a
polynomial equation or, with a change of variables, can be made so.
"""
result = None
if f.is_polynomial(symbol):
solns = roots(f, symbol, cubics=True, quartics=True,
quintics=True, domain='EX')
num_roots = sum(solns.values())
if degree(f, symbol) <= num_roots:
result = FiniteSet(*solns.keys())
else:
poly = Poly(f, symbol)
solns = poly.all_roots()
if poly.degree() <= len(solns):
result = FiniteSet(*solns)
else:
result = ConditionSet(symbol, Eq(f, 0), domain)
else:
poly = Poly(f)
if poly is None:
result = ConditionSet(symbol, Eq(f, 0), domain)
gens = [g for g in poly.gens if g.has(symbol)]
if len(gens) == 1:
poly = Poly(poly, gens[0])
gen = poly.gen
deg = poly.degree()
poly = Poly(poly.as_expr(), poly.gen, composite=True)
poly_solns = FiniteSet(*roots(poly, cubics=True, quartics=True,
quintics=True).keys())
if len(poly_solns) < deg:
result = ConditionSet(symbol, Eq(f, 0), domain)
if gen != symbol:
y = Dummy('y')
inverter = invert_real if domain.is_subset(S.Reals) else invert_complex
lhs, rhs_s = inverter(gen, y, symbol)
if lhs == symbol:
result = Union(*[rhs_s.subs(y, s) for s in poly_solns])
else:
result = ConditionSet(symbol, Eq(f, 0), domain)
else:
result = ConditionSet(symbol, Eq(f, 0), domain)
if result is not None:
if isinstance(result, FiniteSet):
# this is to simplify solutions like -sqrt(-I) to sqrt(2)/2
# - sqrt(2)*I/2. We are not expanding for solution with free
# variables because that makes the solution more complicated. For
# example expand_complex(a) returns re(a) + I*im(a)
if all([s.free_symbols == set() and not isinstance(s, RootOf)
for s in result]):
s = Dummy('s')
result = imageset(Lambda(s, expand_complex(s)), result)
if isinstance(result, FiniteSet):
result = result.intersection(domain)
return result
else:
return ConditionSet(symbol, Eq(f, 0), domain)
def _has_rational_power(expr, symbol):
"""
Returns (bool, den) where bool is True if the term has a
non-integer rational power and den is the denominator of the
expression's exponent.
Examples
========
>>> from sympy.solvers.solveset import _has_rational_power
>>> from sympy import sqrt
>>> from sympy.abc import x
>>> _has_rational_power(sqrt(x), x)
(True, 2)
>>> _has_rational_power(x**2, x)
(False, 1)
"""
a, p, q = Wild('a'), Wild('p'), Wild('q')
pattern_match = expr.match(a*p**q) or {}
if pattern_match.get(a, S.Zero) is S.Zero:
return (False, S.One)
elif p not in pattern_match.keys():
return (False, S.One)
elif isinstance(pattern_match[q], Rational) \
and pattern_match[p].has(symbol):
if not pattern_match[q].q == S.One:
return (True, pattern_match[q].q)
if not isinstance(pattern_match[a], Pow) \
or isinstance(pattern_match[a], Mul):
return (False, S.One)
else:
return _has_rational_power(pattern_match[a], symbol)
def _solve_radical(f, symbol, solveset_solver):
""" Helper function to solve equations with radicals """
eq, cov = unrad(f)
if not cov:
result = solveset_solver(eq, symbol) - \
Union(*[solveset_solver(g, symbol) for g in denoms(f, [symbol])])
else:
y, yeq = cov
if not solveset_solver(y - I, y):
yreal = Dummy('yreal', real=True)
yeq = yeq.xreplace({y: yreal})
eq = eq.xreplace({y: yreal})
y = yreal
g_y_s = solveset_solver(yeq, symbol)
f_y_sols = solveset_solver(eq, y)
result = Union(*[imageset(Lambda(y, g_y), f_y_sols)
for g_y in g_y_s])
if isinstance(result, Complement):
solution_set = result
else:
f_set = [] # solutions for FiniteSet
c_set = [] # solutions for ConditionSet
for s in result:
if checksol(f, symbol, s):
f_set.append(s)
else:
c_set.append(s)
solution_set = FiniteSet(*f_set) + ConditionSet(symbol, Eq(f, 0), FiniteSet(*c_set))
return solution_set
def _solve_abs(f, symbol, domain):
""" Helper function to solve equation involving absolute value function """
if not domain.is_subset(S.Reals):
raise ValueError(filldedent('''
Absolute values cannot be inverted in the
complex domain.'''))
p, q, r = Wild('p'), Wild('q'), Wild('r')
pattern_match = f.match(p*Abs(q) + r) or {}
if not pattern_match.get(p, S.Zero).is_zero:
f_p, f_q, f_r = pattern_match[p], pattern_match[q], pattern_match[r]
q_pos_cond = solve_univariate_inequality(f_q >= 0, symbol,
relational=False)
q_neg_cond = solve_univariate_inequality(f_q < 0, symbol,
relational=False)
sols_q_pos = solveset_real(f_p*f_q + f_r,
symbol).intersect(q_pos_cond)
sols_q_neg = solveset_real(f_p*(-f_q) + f_r,
symbol).intersect(q_neg_cond)
return Union(sols_q_pos, sols_q_neg)
else:
return ConditionSet(symbol, Eq(f, 0), domain)
def solve_decomposition(f, symbol, domain):
"""
Function to solve equations via the principle of "Decomposition
and Rewriting".
Examples
========
>>> from sympy import exp, sin, Symbol, pprint, S
>>> from sympy.solvers.solveset import solve_decomposition as sd
>>> x = Symbol('x')
>>> f1 = exp(2*x) - 3*exp(x) + 2
>>> sd(f1, x, S.Reals)
{0, log(2)}
>>> f2 = sin(x)**2 + 2*sin(x) + 1
>>> pprint(sd(f2, x, S.Reals), use_unicode=False)
3*pi
{2*n*pi + ---- | n in Integers()}
2
>>> f3 = sin(x + 2)
>>> pprint(sd(f3, x, S.Reals), use_unicode=False)
{2*n*pi - 2 | n in Integers()} U {pi*(2*n + 1) - 2 | n in Integers()}
"""
from sympy.solvers.decompogen import decompogen
from sympy.calculus.util import function_range
# decompose the given function
g_s = decompogen(f, symbol)
# `y_s` represents the set of values for which the function `g` is to be
# solved.
# `solutions` represent the solutions of the equations `g = y_s` or
# `g = 0` depending on the type of `y_s`.
# As we are interested in solving the equation: f = 0
y_s = FiniteSet(0)
for g in g_s:
frange = function_range(g, symbol, domain)
y_s = Intersection(frange, y_s)
result = S.EmptySet
if isinstance(y_s, FiniteSet):
for y in y_s:
solutions = solveset(Eq(g, y), symbol, domain)
if not isinstance(solutions, ConditionSet):
result += solutions
else:
if isinstance(y_s, ImageSet):
iter_iset = (y_s,)
elif isinstance(y_s, Union):
iter_iset = y_s.args
for iset in iter_iset:
new_solutions = solveset(Eq(iset.lamda.expr, g), symbol, domain)
dummy_var = tuple(iset.lamda.expr.free_symbols)[0]
base_set = iset.base_set
if isinstance(new_solutions, FiniteSet):
new_exprs = new_solutions
elif isinstance(new_solutions, Intersection):
if isinstance(new_solutions.args[1], FiniteSet):
new_exprs = new_solutions.args[1]
for new_expr in new_exprs:
result += ImageSet(Lambda(dummy_var, new_expr), base_set)
if result is S.EmptySet:
return ConditionSet(symbol, Eq(f, 0), domain)
y_s = result
return y_s
def _solveset(f, symbol, domain, _check=False):
"""Helper for solveset to return a result from an expression
that has already been sympify'ed and is known to contain the
given symbol."""
# _check controls whether the answer is checked or not
from sympy.simplify.simplify import signsimp
orig_f = f
f = together(f)
if f.is_Mul:
_, f = f.as_independent(symbol, as_Add=False)
if f.is_Add:
a, h = f.as_independent(symbol)
m, h = h.as_independent(symbol, as_Add=False)
f = a/m + h # XXX condition `m != 0` should be added to soln
f = piecewise_fold(f)
# assign the solvers to use
solver = lambda f, x, domain=domain: _solveset(f, x, domain)
if domain.is_subset(S.Reals):
inverter_func = invert_real
else:
inverter_func = invert_complex
inverter = lambda f, rhs, symbol: inverter_func(f, rhs, symbol, domain)
result = EmptySet()
if f.expand().is_zero:
return domain
elif not f.has(symbol):
return EmptySet()
elif f.is_Mul and all(_is_finite_with_finite_vars(m, domain)
for m in f.args):
# if f(x) and g(x) are both finite we can say that the solution of
# f(x)*g(x) == 0 is same as Union(f(x) == 0, g(x) == 0) is not true in
# general. g(x) can grow to infinitely large for the values where
# f(x) == 0. To be sure that we are not silently allowing any
# wrong solutions we are using this technique only if both f and g are
# finite for a finite input.
result = Union(*[solver(m, symbol) for m in f.args])
elif _is_function_class_equation(TrigonometricFunction, f, symbol) or \
_is_function_class_equation(HyperbolicFunction, f, symbol):
result = _solve_trig(f, symbol, domain)
elif f.is_Piecewise:
dom = domain
result = EmptySet()
expr_set_pairs = f.as_expr_set_pairs()
for (expr, in_set) in expr_set_pairs:
if in_set.is_Relational:
in_set = in_set.as_set()
if in_set.is_Interval:
dom -= in_set
solns = solver(expr, symbol, in_set)
result += solns
else:
lhs, rhs_s = inverter(f, 0, symbol)
if lhs == symbol:
# do some very minimal simplification since
# repeated inversion may have left the result
# in a state that other solvers (e.g. poly)
# would have simplified; this is done here
# rather than in the inverter since here it
# is only done once whereas there it would
# be repeated for each step of the inversion
if isinstance(rhs_s, FiniteSet):
rhs_s = FiniteSet(*[Mul(*
signsimp(i).as_content_primitive())
for i in rhs_s])
result = rhs_s
elif isinstance(rhs_s, FiniteSet):
for equation in [lhs - rhs for rhs in rhs_s]:
if equation == f:
if any(_has_rational_power(g, symbol)[0]
for g in equation.args) or _has_rational_power(
equation, symbol)[0]:
result += _solve_radical(equation,
symbol,
solver)
elif equation.has(Abs):
result += _solve_abs(f, symbol, domain)
else:
result += _solve_as_rational(equation, symbol, domain)
else:
result += solver(equation, symbol)
elif rhs_s is not S.EmptySet:
result = ConditionSet(symbol, Eq(f, 0), domain)
if _check:
if isinstance(result, ConditionSet):
# it wasn't solved or has enumerated all conditions
# -- leave it alone
return result
# whittle away all but the symbol-containing core
# to use this for testing
fx = orig_f.as_independent(symbol, as_Add=True)[1]
fx = fx.as_independent(symbol, as_Add=False)[1]
if isinstance(result, FiniteSet):
# check the result for invalid solutions
result = FiniteSet(*[s for s in result
if isinstance(s, RootOf)
or domain_check(fx, symbol, s)])
return result
def solveset(f, symbol=None, domain=S.Complexes):
"""Solves a given inequality or equation with set as output
Parameters
==========
f : Expr or a relational.
The target equation or inequality
symbol : Symbol
The variable for which the equation is solved
domain : Set
The domain over which the equation is solved
Returns
=======
Set
A set of values for `symbol` for which `f` is True or is equal to
zero. An `EmptySet` is returned if `f` is False or nonzero.
A `ConditionSet` is returned as unsolved object if algorithms
to evaluate complete solution are not yet implemented.
`solveset` claims to be complete in the solution set that it returns.
Raises
======
NotImplementedError
The algorithms to solve inequalities in complex domain are
not yet implemented.
ValueError
The input is not valid.
RuntimeError
It is a bug, please report to the github issue tracker.
Notes
=====
Python interprets 0 and 1 as False and True, respectively, but
in this function they refer to solutions of an expression. So 0 and 1
return the Domain and EmptySet, respectively, while True and False
return the opposite (as they are assumed to be solutions of relational
expressions).
See Also
========
solveset_real: solver for real domain
solveset_complex: solver for complex domain
Examples
========
>>> from sympy import exp, sin, Symbol, pprint, S
>>> from sympy.solvers.solveset import solveset, solveset_real
* The default domain is complex. Not specifying a domain will lead
to the solving of the equation in the complex domain (and this
is not affected by the assumptions on the symbol):
>>> x = Symbol('x')
>>> pprint(solveset(exp(x) - 1, x), use_unicode=False)
{2*n*I*pi | n in Integers()}
>>> x = Symbol('x', real=True)
>>> pprint(solveset(exp(x) - 1, x), use_unicode=False)
{2*n*I*pi | n in Integers()}
* If you want to use `solveset` to solve the equation in the
real domain, provide a real domain. (Using `solveset\_real`
does this automatically.)
>>> R = S.Reals
>>> x = Symbol('x')
>>> solveset(exp(x) - 1, x, R)
{0}
>>> solveset_real(exp(x) - 1, x)
{0}
The solution is mostly unaffected by assumptions on the symbol,
but there may be some slight difference:
>>> pprint(solveset(sin(x)/x,x), use_unicode=False)
({2*n*pi | n in Integers()} \ {0}) U ({2*n*pi + pi | n in Integers()} \ {0})
>>> p = Symbol('p', positive=True)
>>> pprint(solveset(sin(p)/p, p), use_unicode=False)
{2*n*pi | n in Integers()} U {2*n*pi + pi | n in Integers()}
* Inequalities can be solved over the real domain only. Use of a complex
domain leads to a NotImplementedError.
>>> solveset(exp(x) > 1, x, R)
(0, oo)
"""
f = sympify(f)
if f is S.true:
return domain
if f is S.false:
return S.EmptySet
if not isinstance(f, (Expr, Number)):
raise ValueError("%s is not a valid SymPy expression" % (f))
free_symbols = f.free_symbols
if not free_symbols:
b = Eq(f, 0)
if b is S.true:
return domain
elif b is S.false:
return S.EmptySet
else:
raise NotImplementedError(filldedent('''
relationship between value and 0 is unknown: %s''' % b))
if symbol is None:
if len(free_symbols) == 1:
symbol = free_symbols.pop()
else:
raise ValueError(filldedent('''
The independent variable must be specified for a
multivariate equation.'''))
elif not getattr(symbol, 'is_Symbol', False):
raise ValueError('A Symbol must be given, not type %s: %s' %
(type(symbol), symbol))
if isinstance(f, Eq):
from sympy.core import Add
f = Add(f.lhs, - f.rhs, evaluate=False)
elif f.is_Relational:
if not domain.is_subset(S.Reals):
raise NotImplementedError(filldedent('''
Inequalities in the complex domain are
not supported. Try the real domain by
setting domain=S.Reals'''))
try:
result = solve_univariate_inequality(
f, symbol, relational=False) - _invalid_solutions(
f, symbol, domain)
except NotImplementedError:
result = ConditionSet(symbol, f, domain)
return result
return _solveset(f, symbol, domain, _check=True)
def _invalid_solutions(f, symbol, domain):
bad = S.EmptySet
for d in denoms(f):
bad += _solveset(d, symbol, domain, _check=False)
return bad
def solveset_real(f, symbol):
return solveset(f, symbol, S.Reals)
def solveset_complex(f, symbol):
return solveset(f, symbol, S.Complexes)
def solvify(f, symbol, domain):
"""Solves an equation using solveset and returns the solution in accordance
with the `solve` output API.
Returns
=======
We classify the output based on the type of solution returned by `solveset`.
Solution | Output
----------------------------------------
FiniteSet | list
ImageSet, | list (if `f` is periodic)
Union |
EmptySet | empty list
Others | None
Raises
======
NotImplementedError
A ConditionSet is the input.
Examples
========
>>> from sympy.solvers.solveset import solvify, solveset
>>> from sympy.abc import x
>>> from sympy import S, tan, sin, exp
>>> solvify(x**2 - 9, x, S.Reals)
[-3, 3]
>>> solvify(sin(x) - 1, x, S.Reals)
[pi/2]
>>> solvify(tan(x), x, S.Reals)
[0]
>>> solvify(exp(x) - 1, x, S.Complexes)
>>> solvify(exp(x) - 1, x, S.Reals)
[0]
"""
solution_set = solveset(f, symbol, domain)
result = None
if solution_set is S.EmptySet:
result = []
elif isinstance(solution_set, ConditionSet):
raise NotImplementedError('solveset is unable to solve this equation.')
elif isinstance(solution_set, FiniteSet):
result = list(solution_set)
else:
period = periodicity(f, symbol)
if period is not None:
solutions = S.EmptySet
if isinstance(solution_set, ImageSet):
iter_solutions = (solution_set,)
elif isinstance(solution_set, Union):
if all(isinstance(i, ImageSet) for i in solution_set.args):
iter_solutions = solution_set.args
for solution in iter_solutions:
solutions += solution.intersect(Interval(0, period, False, True))
if isinstance(solutions, FiniteSet):
result = list(solutions)
else:
solution = solution_set.intersect(domain)
if isinstance(solution, FiniteSet):
result += solution
return result
###############################################################################
################################ LINSOLVE #####################################
###############################################################################
def linear_eq_to_matrix(equations, *symbols):
r"""
Converts a given System of Equations into Matrix form.
Here `equations` must be a linear system of equations in
`symbols`. The order of symbols in input `symbols` will
determine the order of coefficients in the returned
Matrix.
The Matrix form corresponds to the augmented matrix form.
For example:
.. math:: 4x + 2y + 3z = 1
.. math:: 3x + y + z = -6
.. math:: 2x + 4y + 9z = 2
This system would return `A` & `b` as given below:
::
[ 4 2 3 ] [ 1 ]
A = [ 3 1 1 ] b = [-6 ]
[ 2 4 9 ] [ 2 ]
Examples
========
>>> from sympy import linear_eq_to_matrix, symbols
>>> x, y, z = symbols('x, y, z')
>>> eqns = [x + 2*y + 3*z - 1, 3*x + y + z + 6, 2*x + 4*y + 9*z - 2]
>>> A, b = linear_eq_to_matrix(eqns, [x, y, z])
>>> A
Matrix([
[1, 2, 3],
[3, 1, 1],
[2, 4, 9]])
>>> b
Matrix([
[ 1],
[-6],
[ 2]])
>>> eqns = [x + z - 1, y + z, x - y]
>>> A, b = linear_eq_to_matrix(eqns, [x, y, z])
>>> A
Matrix([
[1, 0, 1],
[0, 1, 1],
[1, -1, 0]])
>>> b
Matrix([
[1],
[0],
[0]])
* Symbolic coefficients are also supported
>>> a, b, c, d, e, f = symbols('a, b, c, d, e, f')
>>> eqns = [a*x + b*y - c, d*x + e*y - f]
>>> A, B = linear_eq_to_matrix(eqns, x, y)
>>> A
Matrix([
[a, b],
[d, e]])
>>> B
Matrix([
[c],
[f]])
"""
if not symbols:
raise ValueError('Symbols must be given, for which coefficients \
are to be found.')
if hasattr(symbols[0], '__iter__'):
symbols = symbols[0]
M = Matrix([symbols])
# initialise Matrix with symbols + 1 columns
M = M.col_insert(len(symbols), Matrix([1]))
row_no = 1
for equation in equations:
f = sympify(equation)
if isinstance(f, Equality):
f = f.lhs - f.rhs
# Extract coeff of symbols
coeff_list = []
for symbol in symbols:
coeff_list.append(f.coeff(symbol))
# append constant term (term free from symbols)
coeff_list.append(-f.as_coeff_add(*symbols)[0])
# insert equations coeff's into rows
M = M.row_insert(row_no, Matrix([coeff_list]))
row_no += 1
# delete the initialised (Ist) trivial row
M.row_del(0)
A, b = M[:, :-1], M[:, -1:]
return A, b
def linsolve(system, *symbols):
r"""
Solve system of N linear equations with M variables, which
means both under - and overdetermined systems are supported.
The possible number of solutions is zero, one or infinite.
Zero solutions throws a ValueError, where as infinite
solutions are represented parametrically in terms of given
symbols. For unique solution a FiniteSet of ordered tuple
is returned.
All Standard input formats are supported:
For the given set of Equations, the respective input types
are given below:
.. math:: 3x + 2y - z = 1
.. math:: 2x - 2y + 4z = -2
.. math:: 2x - y + 2z = 0
* Augmented Matrix Form, `system` given below:
::
[3 2 -1 1]
system = [2 -2 4 -2]
[2 -1 2 0]
* List Of Equations Form
`system = [3x + 2y - z - 1, 2x - 2y + 4z + 2, 2x - y + 2z]`
* Input A & b Matrix Form (from Ax = b) are given as below:
::
[3 2 -1 ] [ 1 ]
A = [2 -2 4 ] b = [ -2 ]
[2 -1 2 ] [ 0 ]
`system = (A, b)`
Symbols to solve for should be given as input in all the
cases either in an iterable or as comma separated arguments.
This is done to maintain consistency in returning solutions
in the form of variable input by the user.
The algorithm used here is Gauss-Jordan elimination, which
results, after elimination, in an row echelon form matrix.
Returns
=======
A FiniteSet of ordered tuple of values of `symbols` for which
the `system` has solution.
Please note that general FiniteSet is unordered, the solution
returned here is not simply a FiniteSet of solutions, rather
it is a FiniteSet of ordered tuple, i.e. the first & only
argument to FiniteSet is a tuple of solutions, which is ordered,
& hence the returned solution is ordered.
Also note that solution could also have been returned as an
ordered tuple, FiniteSet is just a wrapper `{}` around
the tuple. It has no other significance except for
the fact it is just used to maintain a consistent output
format throughout the solveset.
Returns EmptySet(), if the linear system is inconsistent.
Raises
======
ValueError
The input is not valid.
The symbols are not given.
Examples
========
>>> from sympy import Matrix, S, linsolve, symbols
>>> x, y, z = symbols("x, y, z")
>>> A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
>>> b = Matrix([3, 6, 9])
>>> A
Matrix([
[1, 2, 3],
[4, 5, 6],
[7, 8, 10]])
>>> b
Matrix([
[3],
[6],
[9]])
>>> linsolve((A, b), [x, y, z])
{(-1, 2, 0)}
* Parametric Solution: In case the system is under determined, the function
will return parametric solution in terms of the given symbols.
Free symbols in the system are returned as it is. For e.g. in the system
below, `z` is returned as the solution for variable z, which means z is a
free symbol, i.e. it can take arbitrary values.
>>> A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> b = Matrix([3, 6, 9])
>>> linsolve((A, b), [x, y, z])
{(z - 1, -2*z + 2, z)}
* List of Equations as input
>>> Eqns = [3*x + 2*y - z - 1, 2*x - 2*y + 4*z + 2, - x + S(1)/2*y - z]
>>> linsolve(Eqns, x, y, z)
{(1, -2, -2)}
* Augmented Matrix as input
>>> aug = Matrix([[2, 1, 3, 1], [2, 6, 8, 3], [6, 8, 18, 5]])
>>> aug
Matrix([
[2, 1, 3, 1],
[2, 6, 8, 3],
[6, 8, 18, 5]])
>>> linsolve(aug, x, y, z)
{(3/10, 2/5, 0)}
* Solve for symbolic coefficients
>>> a, b, c, d, e, f = symbols('a, b, c, d, e, f')
>>> eqns = [a*x + b*y - c, d*x + e*y - f]
>>> linsolve(eqns, x, y)
{((-b*f + c*e)/(a*e - b*d), (a*f - c*d)/(a*e - b*d))}
* A degenerate system returns solution as set of given
symbols.
>>> system = Matrix(([0,0,0], [0,0,0], [0,0,0]))
>>> linsolve(system, x, y)
{(x, y)}
* For an empty system linsolve returns empty set
>>> linsolve([ ], x)
EmptySet()
"""
if not system:
return S.EmptySet
if not symbols:
raise ValueError('Symbols must be given, for which solution of the '
'system is to be found.')
if hasattr(symbols[0], '__iter__'):
symbols = symbols[0]
try:
sym = symbols[0].is_Symbol
except AttributeError:
sym = False
if not sym:
raise ValueError('Symbols or iterable of symbols must be given as '
'second argument, not type %s: %s' % (type(symbols[0]), symbols[0]))
# 1). Augmented Matrix input Form
if isinstance(system, Matrix):
A, b = system[:, :-1], system[:, -1:]
elif hasattr(system, '__iter__'):
# 2). A & b as input Form
if len(system) == 2 and system[0].is_Matrix:
A, b = system[0], system[1]
# 3). List of equations Form
if not system[0].is_Matrix:
A, b = linear_eq_to_matrix(system, symbols)
else:
raise ValueError("Invalid arguments")
# Solve using Gauss-Jordan elimination
try:
sol, params, free_syms = A.gauss_jordan_solve(b, freevar=True)
except ValueError:
# No solution
return EmptySet()
# Replace free parameters with free symbols
solution = []
if params:
for s in sol:
for k, v in enumerate(params):
s = s.xreplace({v: symbols[free_syms[k]]})
solution.append(simplify(s))
else:
for s in sol:
solution.append(simplify(s))
# Return solutions
solution = FiniteSet(tuple(solution))
return solution
##############################################################################
# ------------------------------nonlinsolve ---------------------------------#
##############################################################################
def _return_conditionset(eqs, symbols):
# return conditionset
condition_set = ConditionSet(
FiniteSet(*symbols),
FiniteSet(*eqs),
S.Complexes)
return condition_set
def substitution(system, symbols, result=[{}], known_symbols=[],
exclude=[], all_symbols=None):
r"""
Solves the `system` using substitution method. It is used in
`nonlinsolve`. This will be called from `nonlinsolve` when any
equation(s) is non polynomial equation.
Parameters
==========
system : list of equations
The target system of equations
symbols : list of symbols to be solved.
The variable(s) for which the system is solved
known_symbols : list of solved symbols
Values are known for these variable(s)
result : An empty list or list of dict
If No symbol values is known then empty list otherwise
symbol as keys and corresponding value in dict.
exclude : Set of expression.
Mostly denominator expression(s) of the equations of the system.
Final solution should not satisfy these expressions.
all_symbols : known_symbols + symbols(unsolved).
Returns
=======
A FiniteSet of ordered tuple of values of `all_symbols` for which the
`system` has solution. Order of values in the tuple is same as symbols
present in the parameter `all_symbols`. If parameter `all_symbols` is None
then same as symbols present in the parameter `symbols`.
Please note that general FiniteSet is unordered, the solution returned
here is not simply a FiniteSet of solutions, rather it is a FiniteSet of
ordered tuple, i.e. the first & only argument to FiniteSet is a tuple of
solutions, which is ordered, & hence the returned solution is ordered.
Also note that solution could also have been returned as an ordered tuple,
FiniteSet is just a wrapper `{}` around the tuple. It has no other
significance except for the fact it is just used to maintain a consistent
output format throughout the solveset.
Raises
======
ValueError
The input is not valid.
The symbols are not given.
AttributeError
The input symbols are not `Symbol` type.
Examples
========
>>> from sympy.core.symbol import symbols
>>> x, y = symbols('x, y', real=True)
>>> from sympy.solvers.solveset import substitution
>>> substitution([x + y], [x], [{y: 1}], [y], set([]), [x, y])
{(-1, 1)}
* when you want soln should not satisfy eq `x + 1 = 0`
>>> substitution([x + y], [x], [{y: 1}], [y], set([x + 1]), [y, x])
EmptySet()
>>> substitution([x + y], [x], [{y: 1}], [y], set([x - 1]), [y, x])
{(1, -1)}
>>> substitution([x + y - 1, y - x**2 + 5], [x, y])
{(-3, 4), (2, -1)}
* Returns both real and complex solution
>>> x, y, z = symbols('x, y, z')
>>> from sympy import exp, sin
>>> substitution([exp(x) - sin(y), y**2 - 4], [x, y])
{(log(sin(2)), 2), (ImageSet(Lambda(_n, I*(2*_n*pi + pi) +
log(sin(2))), Integers()), -2), (ImageSet(Lambda(_n, 2*_n*I*pi +
Mod(log(sin(2)), 2*I*pi)), Integers()), 2)}
>>> eqs = [z**2 + exp(2*x) - sin(y), -3 + exp(-y)]
>>> substitution(eqs, [y, z])
{(-log(3), -sqrt(-exp(2*x) - sin(log(3)))),
(-log(3), sqrt(-exp(2*x) - sin(log(3)))),
(ImageSet(Lambda(_n, 2*_n*I*pi + Mod(-log(3), 2*I*pi)), Integers()),
ImageSet(Lambda(_n, -sqrt(-exp(2*x) + sin(2*_n*I*pi +
Mod(-log(3), 2*I*pi)))), Integers())),
(ImageSet(Lambda(_n, 2*_n*I*pi + Mod(-log(3), 2*I*pi)), Integers()),
ImageSet(Lambda(_n, sqrt(-exp(2*x) + sin(2*_n*I*pi +
Mod(-log(3), 2*I*pi)))), Integers()))}
"""
from sympy import Complement
from sympy.core.compatibility import is_sequence
if not system:
return S.EmptySet
if not symbols:
msg = ('Symbols must be given, for which solution of the '
'system is to be found.')
raise ValueError(filldedent(msg))
if not is_sequence(symbols):
msg = ('symbols should be given as a sequence, e.g. a list.'
'Not type %s: %s')
raise TypeError(filldedent(msg % (type(symbols), symbols)))
try:
sym = symbols[0].is_Symbol
except AttributeError:
sym = False
if not sym:
msg = ('Iterable of symbols must be given as '
'second argument, not type %s: %s')
raise ValueError(filldedent(msg % (type(symbols[0]), symbols[0])))
# By default `all_symbols` will be same as `symbols`
if all_symbols is None:
all_symbols = symbols
old_result = result
# storing complements and intersection for particular symbol
complements = {}
intersections = {}
# when total_solveset_call is equals to total_conditionset
# means solvest fail to solve all the eq.
total_conditionset = -1
total_solveset_call = -1
def _unsolved_syms(eq, sort=False):
"""Returns the unsolved symbol present
in the equation `eq`.
"""
free = eq.free_symbols
unsolved = (free - set(known_symbols)) & set(all_symbols)
if sort:
unsolved = list(unsolved)
unsolved.sort(key=default_sort_key)
return unsolved
# end of _unsolved_syms()
# sort such that equation with the fewest potential symbols is first.
# means eq with less number of variable first in the list.
eqs_in_better_order = list(
ordered(system, lambda _: len(_unsolved_syms(_))))
def add_intersection_complement(result, sym_set, **flags):
# If solveset have returned some intersection/complement
# for any symbol. It will be added in final solution.
final_result = []
for res in result:
res_copy = res
for key_res, value_res in res.items():
# Intersection/complement is in Interval or Set.
intersection_true = flags.get('Intersection', True)
complements_true = flags.get('Complement', True)
for key_sym, value_sym in sym_set.items():
if key_sym == key_res:
if intersection_true:
# testcase is not added for this line(intersection)
new_value = \
Intersection(FiniteSet(value_res), value_sym)
if new_value is not S.EmptySet:
res_copy[key_res] = new_value
if complements_true:
new_value = \
Complement(FiniteSet(value_res), value_sym)
if new_value is not S.EmptySet:
res_copy[key_res] = new_value
final_result.append(res_copy)
return final_result
# end of def add_intersection_complement()
def _extract_main_soln(sol, soln_imageset):
"""separate the Complements, Intersections, ImageSet lambda expr
and it's base_set.
"""
# if there is union, then need to check
# Complement, Intersection, Imageset.
# Order should not be changed.
if isinstance(sol, Complement):
# extract solution and complement
complements[sym] = sol.args[1]
sol = sol.args[0]
# complement will be added at the end
# using `add_intersection_complement` method
if isinstance(sol, Intersection):
# Interval/Set will be at 0th index always
if sol.args[0] != Interval(-oo, oo):
# sometimes solveset returns soln
# with intersection `S.Reals`, to confirm that
# soln is in `domain=S.Reals` or not. We don't consider
# that intersecton.
intersections[sym] = sol.args[0]
sol = sol.args[1]
# after intersection and complement Imageset should
# be checked.
if isinstance(sol, ImageSet):
soln_imagest = sol
expr2 = sol.lamda.expr
sol = FiniteSet(expr2)
soln_imageset[expr2] = soln_imagest
# if there is union of Imageset or other in soln.
# no testcase is written for this if block
if isinstance(sol, Union):
sol_args = sol.args
sol = S.EmptySet
# We need in sequence so append finteset elements
# and then imageset or other.
for sol_arg2 in sol_args:
if isinstance(sol_arg2, FiniteSet):
sol += sol_arg2
else:
# ImageSet, Intersection, complement then
# append them directly
sol += FiniteSet(sol_arg2)
if not isinstance(sol, FiniteSet):
sol = FiniteSet(sol)
return sol, soln_imageset
# end of def _extract_main_soln()
# helper function for _append_new_soln
def _check_exclude(rnew, imgset_yes):
rnew_ = rnew
if imgset_yes:
# replace all dummy variables (Imageset lambda variables)
# with zero before `checksol`. Considering fundamental soln
# for `checksol`.
rnew_copy = rnew.copy()
dummy_n = imgset_yes[0]
for key_res, value_res in rnew_copy.items():
rnew_copy[key_res] = value_res.subs(dummy_n, 0)
rnew_ = rnew_copy
# satisfy_exclude == true if it satisfies the expr of `exclude` list.
try:
# something like : `Mod(-log(3), 2*I*pi)` can't be
# simplified right now, so `checksol` returns `TypeError`.
# when this issue is fixed this try block should be
# removed. Mod(-log(3), 2*I*pi) == -log(3)
satisfy_exclude = any(
checksol(d, rnew_) for d in exclude)
except TypeError:
satisfy_exclude = None
return satisfy_exclude
# end of def _check_exclude()
# helper function for _append_new_soln
def _restore_imgset(rnew, original_imageset, newresult):
restore_sym = set(rnew.keys()) & \
set(original_imageset.keys())
for key_sym in restore_sym:
img = original_imageset[key_sym]
rnew[key_sym] = img
if rnew not in newresult:
newresult.append(rnew)
# end of def _restore_imgset()
def _append_eq(eq, result, res, delete_soln, n=None):
u = Dummy('u')
if n:
eq = eq.subs(n, 0)
satisfy = checksol(u, u, eq, minimal=True)
if satisfy is False:
delete_soln = True
res = {}
else:
result.append(res)
return result, res, delete_soln
def _append_new_soln(rnew, sym, sol, imgset_yes, soln_imageset,
original_imageset, newresult, eq=None):
"""If `rnew` (A dict <symbol: soln>) contains valid soln
append it to `newresult` list.
`imgset_yes` is (base, dummy_var) if there was imageset in previously
calculated result(otherwise empty tuple). `original_imageset` is dict
of imageset expr and imageset from this result.
`soln_imageset` dict of imageset expr and imageset of new soln.
"""
satisfy_exclude = _check_exclude(rnew, imgset_yes)
delete_soln = False
# soln should not satisfy expr present in `exclude` list.
if not satisfy_exclude:
local_n = None
# if it is imageset
if imgset_yes:
local_n = imgset_yes[0]
base = imgset_yes[1]
if sym and sol:
# when `sym` and `sol` is `None` means no new
# soln. In that case we will append rnew directly after
# substituting original imagesets in rnew values if present
# (second last line of this function using _restore_imgset)
dummy_list = list(sol.atoms(Dummy))
# use one dummy `n` which is in
# previous imageset
local_n_list = [
local_n for i in range(
0, len(dummy_list))]
dummy_zip = zip(dummy_list, local_n_list)
lam = Lambda(local_n, sol.subs(dummy_zip))
rnew[sym] = ImageSet(lam, base)
if eq is not None:
newresult, rnew, delete_soln = _append_eq(
eq, newresult, rnew, delete_soln, local_n)
elif eq is not None:
newresult, rnew, delete_soln = _append_eq(
eq, newresult, rnew, delete_soln)
elif soln_imageset:
rnew[sym] = soln_imageset[sol]
# restore original imageset
_restore_imgset(rnew, original_imageset, newresult)
else:
newresult.append(rnew)
elif satisfy_exclude:
delete_soln = True
rnew = {}
_restore_imgset(rnew, original_imageset, newresult)
return newresult, delete_soln
# end of def _append_new_soln()
def _new_order_result(result, eq):
# separate first, second priority. `res` that makes `eq` value equals
# to zero, should be used first then other result(second priority).
# If it is not done then we may miss some soln.
first_priority = []
second_priority = []
for res in result:
if not any(isinstance(val, ImageSet) for val in res.values()):
if eq.subs(res) == 0:
first_priority.append(res)
else:
second_priority.append(res)
if first_priority or second_priority:
return first_priority + second_priority
return result
def _solve_using_known_values(result, solver):
"""Solves the system using already known solution
(result contains the dict <symbol: value>).
solver is `solveset_complex` or `solveset_real`.
"""
# stores imageset <expr: imageset(Lambda(n, expr), base)>.
soln_imageset = {}
total_solvest_call = 0
total_conditionst = 0
# sort such that equation with the fewest potential symbols is first.
# means eq with less variable first
for index, eq in enumerate(eqs_in_better_order):
newresult = []
original_imageset = {}
# if imageset expr is used to solve other symbol
imgset_yes = False
result = _new_order_result(result, eq)
for res in result:
got_symbol = set() # symbols solved in one iteration
if soln_imageset:
# find the imageset and use its expr.
for key_res, value_res in res.items():
if isinstance(value_res, ImageSet):
res[key_res] = value_res.lamda.expr
original_imageset[key_res] = value_res
dummy_n = value_res.lamda.expr.atoms(Dummy).pop()
base = value_res.base_set
imgset_yes = (dummy_n, base)
# update eq with everything that is known so far
eq2 = eq.subs(res)
unsolved_syms = _unsolved_syms(eq2, sort=True)
if not unsolved_syms:
if res:
newresult, delete_res = _append_new_soln(
res, None, None, imgset_yes, soln_imageset,
original_imageset, newresult, eq2)
if delete_res:
# `delete_res` is true, means substituting `res` in
# eq2 doesn't return `zero` or deleting the `res`
# (a soln) since it staisfies expr of `exclude`
# list.
result.remove(res)
continue # skip as it's independent of desired symbols
depen = eq2.as_independent(unsolved_syms)[0]
if depen.has(Abs) and solver == solveset_complex:
# Absolute values cannot be inverted in the
# complex domain
continue
soln_imageset = {}
for sym in unsolved_syms:
not_solvable = False
try:
soln = solver(eq2, sym)
total_solvest_call += 1
soln_new = S.EmptySet
if isinstance(soln, Complement):
# separate solution and complement
complements[sym] = soln.args[1]
soln = soln.args[0]
# complement will be added at the end
if isinstance(soln, Intersection):
# Interval will be at 0th index always
if soln.args[0] != Interval(-oo, oo):
# sometimes solveset returns soln
# with intersection S.Reals, to confirm that
# soln is in domain=S.Reals
intersections[sym] = soln.args[0]
soln_new += soln.args[1]
soln = soln_new if soln_new else soln
if index > 0 and solver == solveset_real:
# one symbol's real soln , another symbol may have
# corresponding complex soln.
if not isinstance(soln, (ImageSet, ConditionSet)):
soln += solveset_complex(eq2, sym)
except NotImplementedError:
# If sovleset is not able to solve equation `eq2`. Next
# time we may get soln using next equation `eq2`
continue
if isinstance(soln, ConditionSet):
soln = S.EmptySet
# don't do `continue` we may get soln
# in terms of other symbol(s)
not_solvable = True
total_conditionst += 1
if soln is not S.EmptySet:
soln, soln_imageset = _extract_main_soln(
soln, soln_imageset)
for sol in soln:
# sol is not a `Union` since we checked it
# before this loop
sol, soln_imageset = _extract_main_soln(
sol, soln_imageset)
sol = set(sol).pop()
free = sol.free_symbols
if got_symbol and any([
ss in free for ss in got_symbol
]):
# sol depends on previously solved symbols
# then continue
continue
rnew = res.copy()
# put each solution in res and append the new result
# in the new result list (solution for symbol `s`)
# along with old results.
for k, v in res.items():
if isinstance(v, Expr):
# if any unsolved symbol is present
# Then subs known value
rnew[k] = v.subs(sym, sol)
# and add this new solution
if soln_imageset:
# replace all lambda variables with 0.
imgst = soln_imageset[sol]
rnew[sym] = imgst.lamda(
*[0 for i in range(0, len(
imgst.lamda.variables))])
else:
rnew[sym] = sol
newresult, delete_res = _append_new_soln(
rnew, sym, sol, imgset_yes, soln_imageset,
original_imageset, newresult)
if delete_res:
# deleting the `res` (a soln) since it staisfies
# eq of `exclude` list
result.remove(res)
# solution got for sym
if not not_solvable:
got_symbol.add(sym)
# next time use this new soln
if newresult:
result = newresult
return result, total_solvest_call, total_conditionst
# end def _solve_using_know_values()
new_result_real, solve_call1, cnd_call1 = _solve_using_known_values(
old_result, solveset_real)
new_result_complex, solve_call2, cnd_call2 = _solve_using_known_values(
old_result, solveset_complex)
# when `total_solveset_call` is equals to `total_conditionset`
# means solvest fails to solve all the eq.
# return conditionset in this case
total_conditionset += (cnd_call1 + cnd_call2)
total_solveset_call += (solve_call1 + solve_call2)
if total_conditionset == total_solveset_call and total_solveset_call != -1:
return _return_conditionset(eqs_in_better_order, all_symbols)
# overall result
result = new_result_real + new_result_complex
result_all_variables = []
result_infinite = []
for res in result:
if not res:
# means {None : None}
continue
# If length < len(all_symbols) means infinite soln.
# Some or all the soln is dependent on 1 symbol.
# eg. {x: y+2} then final soln {x: y+2, y: y}
if len(res) < len(all_symbols):
solved_symbols = res.keys()
unsolved = list(filter(
lambda x: x not in solved_symbols, all_symbols))
for unsolved_sym in unsolved:
res[unsolved_sym] = unsolved_sym
result_infinite.append(res)
if res not in result_all_variables:
result_all_variables.append(res)
if result_infinite:
# we have general soln
# eg : [{x: -1, y : 1}, {x : -y , y: y}] then
# return [{x : -y, y : y}]
result_all_variables = result_infinite
if intersections and complements:
# no testcase is added for this block
result_all_variables = add_intersection_complement(
result_all_variables, intersections,
Intersection=True, Complement=True)
elif intersections:
result_all_variables = add_intersection_complement(
result_all_variables, intersections, Intersection=True)
elif complements:
result_all_variables = add_intersection_complement(
result_all_variables, complements, Complement=True)
# convert to ordered tuple
result = S.EmptySet
for r in result_all_variables:
temp = [r[symb] for symb in all_symbols]
result += FiniteSet(tuple(temp))
return result
# end of def substitution()
def _solveset_work(system, symbols):
soln = solveset(system[0], symbols[0])
if isinstance(soln, FiniteSet):
_soln = FiniteSet(*[tuple((s,)) for s in soln])
return _soln
else:
return FiniteSet(tuple(FiniteSet(soln)))
def _handle_positive_dimensional(polys, symbols, denominators):
from sympy.polys.polytools import groebner
# substitution method where new system is groebner basis of the system
_symbols = list(symbols)
_symbols.sort(key=default_sort_key)
basis = groebner(polys, _symbols, polys=True)
new_system = []
for poly_eq in basis:
new_system.append(poly_eq.as_expr())
result = [{}]
result = substitution(
new_system, symbols, result, [],
denominators)
return result
# end of def _handle_positive_dimensional()
def _handle_zero_dimensional(polys, symbols, system):
# solve 0 dimensional poly system using `solve_poly_system`
result = solve_poly_system(polys, *symbols)
# May be some extra soln is added because
# we used `unrad` in `_separate_poly_nonpoly`, so
# need to check and remove if it is not a soln.
result_update = S.EmptySet
for res in result:
dict_sym_value = dict(list(zip(symbols, res)))
if all(checksol(eq, dict_sym_value) for eq in system):
result_update += FiniteSet(res)
return result_update
# end of def _handle_zero_dimensional()
def _separate_poly_nonpoly(system, symbols):
polys = []
polys_expr = []
nonpolys = []
denominators = set()
poly = None
for eq in system:
# Store denom expression if it contains symbol
denominators.update(_simple_dens(eq, symbols))
# try to remove sqrt and rational power
without_radicals = unrad(simplify(eq))
if without_radicals:
eq_unrad, cov = without_radicals
if not cov:
eq = eq_unrad
if isinstance(eq, Expr):
eq = eq.as_numer_denom()[0]
poly = eq.as_poly(*symbols, extension=True)
elif simplify(eq).is_number:
continue
if poly is not None:
polys.append(poly)
polys_expr.append(poly.as_expr())
else:
nonpolys.append(eq)
return polys, polys_expr, nonpolys, denominators
# end of def _separate_poly_nonpoly()
def nonlinsolve(system, *symbols):
r"""
Solve system of N non linear equations with M variables, which means both
under and overdetermined systems are supported. Positive dimensional
system is also supported (A system with infinitely many solutions is said
to be positive-dimensional). In Positive dimensional system solution will
be dependent on at least one symbol. Returns both real solution
and complex solution(If system have). The possible number of solutions
is zero, one or infinite.
Parameters
==========
system : list of equations
The target system of equations
symbols : list of Symbols
symbols should be given as a sequence eg. list
Returns
=======
A FiniteSet of ordered tuple of values of `symbols` for which the `system`
has solution. Order of values in the tuple is same as symbols present in
the parameter `symbols`.
Please note that general FiniteSet is unordered, the solution returned
here is not simply a FiniteSet of solutions, rather it is a FiniteSet of
ordered tuple, i.e. the first & only argument to FiniteSet is a tuple of
solutions, which is ordered, & hence the returned solution is ordered.
Also note that solution could also have been returned as an ordered tuple,
FiniteSet is just a wrapper `{}` around the tuple. It has no other
significance except for the fact it is just used to maintain a consistent
output format throughout the solveset.
For the given set of Equations, the respective input types
are given below:
.. math:: x*y - 1 = 0
.. math:: 4*x**2 + y**2 - 5 = 0
`system = [x*y - 1, 4*x**2 + y**2 - 5]`
`symbols = [x, y]`
Raises
======
ValueError
The input is not valid.
The symbols are not given.
AttributeError
The input symbols are not `Symbol` type.
Examples
========
>>> from sympy.core.symbol import symbols
>>> from sympy.solvers.solveset import nonlinsolve
>>> x, y, z = symbols('x, y, z', real=True)
>>> nonlinsolve([x*y - 1, 4*x**2 + y**2 - 5], [x, y])
{(-1, -1), (-1/2, -2), (1/2, 2), (1, 1)}
1. Positive dimensional system and complements:
>>> from sympy import pprint
>>> from sympy.polys.polytools import is_zero_dimensional
>>> a, b, c, d = symbols('a, b, c, d', real=True)
>>> eq1 = a + b + c + d
>>> eq2 = a*b + b*c + c*d + d*a
>>> eq3 = a*b*c + b*c*d + c*d*a + d*a*b
>>> eq4 = a*b*c*d - 1
>>> system = [eq1, eq2, eq3, eq4]
>>> is_zero_dimensional(system)
False
>>> pprint(nonlinsolve(system, [a, b, c, d]), use_unicode=False)
-1 1 1 -1
{(---, -d, -, {d} \ {0}), (-, -d, ---, {d} \ {0})}
d d d d
>>> nonlinsolve([(x+y)**2 - 4, x + y - 2], [x, y])
{(-y + 2, y)}
2. If some of the equations are non polynomial equation then `nonlinsolve`
will call `substitution` function and returns real and complex solutions,
if present.
>>> from sympy import exp, sin
>>> nonlinsolve([exp(x) - sin(y), y**2 - 4], [x, y])
{(log(sin(2)), 2), (ImageSet(Lambda(_n, I*(2*_n*pi + pi) +
log(sin(2))), Integers()), -2), (ImageSet(Lambda(_n, 2*_n*I*pi +
Mod(log(sin(2)), 2*I*pi)), Integers()), 2)}
3. If system is Non linear polynomial zero dimensional then it returns
both solution (real and complex solutions, if present using
`solve_poly_system`):
>>> from sympy import sqrt
>>> nonlinsolve([x**2 - 2*y**2 -2, x*y - 2], [x, y])
{(-2, -1), (2, 1), (-sqrt(2)*I, sqrt(2)*I), (sqrt(2)*I, -sqrt(2)*I)}
4. `nonlinsolve` can solve some linear(zero or positive dimensional)
system (because it is using `groebner` function to get the
groebner basis and then `substitution` function basis as the new `system`).
But it is not recommended to solve linear system using `nonlinsolve`,
because `linsolve` is better for all kind of linear system.
>>> nonlinsolve([x + 2*y -z - 3, x - y - 4*z + 9 , y + z - 4], [x, y, z])
{(3*z - 5, -z + 4, z)}
5. System having polynomial equations and only real solution is present
(will be solved using `solve_poly_system`):
>>> e1 = sqrt(x**2 + y**2) - 10
>>> e2 = sqrt(y**2 + (-x + 10)**2) - 3
>>> nonlinsolve((e1, e2), (x, y))
{(191/20, -3*sqrt(391)/20), (191/20, 3*sqrt(391)/20)}
>>> nonlinsolve([x**2 + 2/y - 2, x + y - 3], [x, y])
{(1, 2), (1 + sqrt(5), -sqrt(5) + 2), (-sqrt(5) + 1, 2 + sqrt(5))}
>>> nonlinsolve([x**2 + 2/y - 2, x + y - 3], [y, x])
{(2, 1), (2 + sqrt(5), -sqrt(5) + 1), (-sqrt(5) + 2, 1 + sqrt(5))}
6. It is better to use symbols instead of Trigonometric Function or
Function (e.g. replace `sin(x)` with symbol, replace `f(x)` with symbol
and so on. Get soln from `nonlinsolve` and then using `solveset` get
the value of `x`)
How nonlinsolve is better than old solver `_solve_system` :
===========================================================
1. A positive dimensional system solver : nonlinsolve can return
solution for positive dimensional system. It finds the
Groebner Basis of the positive dimensional system(calling it as
basis) then we can start solving equation(having least number of
variable first in the basis) using solveset and substituting that
solved solutions into other equation(of basis) to get solution in
terms of minimum variables. Here the important thing is how we
are substituting the known values and in which equations.
2. Real and Complex both solutions : nonlinsolve returns both real
and complex solution. If all the equations in the system are polynomial
then using `solve_poly_system` both real and complex solution is returned.
If all the equations in the system are not polynomial equation then goes to
`substitution` method with this polynomial and non polynomial equation(s),
to solve for unsolved variables. Here to solve for particular variable
solveset_real and solveset_complex is used. For both real and complex
solution function `_solve_using_know_values` is used inside `substitution`
function.(`substitution` function will be called when there is any non
polynomial equation(s) is present). When solution is valid then add its
general solution in the final result.
3. Complement and Intersection will be added if any : nonlinsolve maintains
dict for complements and Intersections. If solveset find complements or/and
Intersection with any Interval or set during the execution of
`substitution` function ,then complement or/and Intersection for that
variable is added before returning final solution.
"""
from sympy.polys.polytools import is_zero_dimensional
if not system:
return S.EmptySet
if not symbols:
msg = ('Symbols must be given, for which solution of the '
'system is to be found.')
raise ValueError(filldedent(msg))
if hasattr(symbols[0], '__iter__'):
symbols = symbols[0]
try:
sym = symbols[0].is_Symbol
except AttributeError:
sym = False
except IndexError:
msg = ('Symbols must be given, for which solution of the '
'system is to be found.')
raise IndexError(filldedent(msg))
if not sym:
msg = ('Symbols or iterable of symbols must be given as '
'second argument, not type %s: %s')
raise ValueError(filldedent(msg % (type(symbols[0]), symbols[0])))
if len(system) == 1 and len(symbols) == 1:
return _solveset_work(system, symbols)
# main code of def nonlinsolve() starts from here
polys, polys_expr, nonpolys, denominators = _separate_poly_nonpoly(
system, symbols)
if len(symbols) == len(polys):
# If all the equations in the system is poly
if is_zero_dimensional(polys, symbols):
# finite number of soln (Zero dimensional system)
try:
return _handle_zero_dimensional(polys, symbols, system)
except NotImplementedError:
# Right now it doesn't fail for any polynomial system of
# equation. If `solve_poly_system` fails then `substitution`
# method will handle it.
result = substitution(
polys_expr, symbols, exclude=denominators)
return result
# positive dimensional system
return _handle_positive_dimensional(polys, symbols, denominators)
else:
# If alll the equations are not polynomial.
# Use `substitution` method for the system
result = substitution(
polys_expr + nonpolys, symbols, exclude=denominators)
return result
| 36.265478 | 93 | 0.568121 | [
"BSD-3-Clause"
] | aktech/sympy | sympy/solvers/solveset.py | 77,318 | Python |
import tensorflow.compat.v1 as tf
import numpy as np
m = 1740
x_batch = np.random.rand(m)
y_batch = np.random.rand(1)
weights = np.random.rand(m)
biases = np.random.rand(m)
with tf.Session() as sess:
x = tf.placeholder(tf.float32, shape=(m, ), name='x')
y = tf.placeholder(tf.float32, shape=(1, ), name='y')
# w = tf.Variable(np.random.rand(m), name='W', dtype=tf.float32)
# b = tf.Variable(np.random.rand(m), name='b', dtype=tf.float32)
w = tf.placeholder(tf.float32, shape=(m, ), name='W')
b = tf.placeholder(tf.float32, shape=(m, ), name='b')
mu = tf.constant(1, dtype=tf.float32)
_ = tf.Variable(initial_value=np.random.rand(1))
h = tf.reduce_sum(tf.multiply(w, x))
c = tf.multiply(y, h)
distances = tf.subtract(1., c)
# maximum = tf.maximum(0., distances)
#maximum = tf.boolean_mask(distances, tf.greater(0., distances))
# Look here for gradient of SVM objective function: http://u.cs.biu.ac.il/~jkeshet/teaching/aml2016/sgd_optimization.pdf
maximum = tf.cast(tf.greater(distances, 0.), tf.float32)
g = tf.multiply(maximum, x)
g = tf.multiply(mu, g)
w = tf.subtract(w, g, name='update')
sess.run(tf.initialize_all_variables())
feed_dict = {x: x_batch, y: y_batch, w: weights, b: biases}
sess.run(w, feed_dict)
tf.train.Saver().save(sess, 'model.ckpt')
| 30.2 | 124 | 0.646063 | [
"Apache-2.0"
] | VeriGOOD-ML/public | tabla/tabla/benchmarks/onnx/svm_tf.py | 1,359 | Python |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
class LayerNormNet(nn.Cell):
def __init__(self, begin_norm_axis, begin_params_axis):
super(LayerNormNet, self).__init__()
self.norm = P.LayerNorm(begin_norm_axis, begin_params_axis)
def construct(self, x, gamma, beta):
return self.norm(x, gamma, beta)
def LayerNormReference(begin_norm_axis, begin_params_axis, x, gamma, beta):
begin_norm_axis = begin_norm_axis if begin_norm_axis >= 0 else begin_norm_axis + len(x.shape)
begin_params_axis = begin_params_axis if begin_params_axis >= 0 else begin_params_axis + len(x.shape)
axis = [i for i in range(begin_norm_axis, len(x.shape))]
mean = np.mean(x, axis=tuple(axis), keepdims=True)
var = np.var(x, axis=tuple(axis), keepdims=True)
gamma = gamma.reshape((*((1,) * begin_params_axis), *x.shape[begin_params_axis:]))
beta = beta.reshape((*((1,) * begin_params_axis), *x.shape[begin_params_axis:]))
y = np.subtract(x, mean) / np.sqrt(var + 1e-12) * gamma + beta
return y, mean, var
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_layernorm0():
begin_norm_axis = 1
begin_params_axis = 1
x_np = np.random.randn(4096, 3072).astype(np.float32)
gamma_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)
beta_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)
y_np, mean_np, var_np = LayerNormReference(begin_norm_axis, begin_params_axis, x_np, gamma_np, beta_np)
x_ms = Tensor(x_np)
gamma_ms = Tensor(gamma_np)
beta_ms = Tensor(beta_np)
net = LayerNormNet(begin_norm_axis, begin_params_axis)
y_ms, mean_ms, var_ms = net(x_ms, gamma_ms, beta_ms)
assert np.allclose(y_ms.asnumpy(), y_np, atol=1e-4)
assert np.allclose(mean_ms.asnumpy(), mean_np, atol=1e-4)
assert np.allclose(var_ms.asnumpy(), var_np, atol=1e-4)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_layernorm1():
begin_norm_axis = 1
begin_params_axis = 1
x_np = np.random.randn(640, 768).astype(np.float32)
gamma_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)
beta_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)
y_np, mean_np, var_np = LayerNormReference(begin_norm_axis, begin_params_axis, x_np, gamma_np, beta_np)
x_ms = Tensor(x_np)
gamma_ms = Tensor(gamma_np)
beta_ms = Tensor(beta_np)
net = LayerNormNet(begin_norm_axis, begin_params_axis)
y_ms, mean_ms, var_ms = net(x_ms, gamma_ms, beta_ms)
assert np.allclose(y_ms.asnumpy(), y_np, rtol=1e-6, atol=1e-4)
assert np.allclose(mean_ms.asnumpy(), mean_np, rtol=1e-6, atol=1e-4)
assert np.allclose(var_ms.asnumpy(), var_np, rtol=1e-6, atol=1e-4)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_layernorm3d_1():
begin_norm_axis = -1
begin_params_axis = -1
x_np = np.random.randn(32, 128, 768).astype(np.float32)
gamma_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)
beta_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)
y_np, mean_np, var_np = LayerNormReference(begin_norm_axis, begin_params_axis, x_np, gamma_np, beta_np)
x_ms = Tensor(x_np)
gamma_ms = Tensor(gamma_np)
beta_ms = Tensor(beta_np)
net = LayerNormNet(begin_norm_axis, begin_params_axis)
y_ms, mean_ms, var_ms = net(x_ms, gamma_ms, beta_ms)
assert np.allclose(y_ms.asnumpy(), y_np, rtol=1e-6, atol=1e-4)
assert np.allclose(mean_ms.asnumpy(), mean_np, rtol=1e-6, atol=1e-4)
assert np.allclose(var_ms.asnumpy(), var_np, rtol=1e-6, atol=1e-4)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_layernorm3d_2():
begin_norm_axis = -1
begin_params_axis = 1
x_np = np.random.randn(32, 128, 768).astype(np.float32)
gamma_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)
beta_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)
y_np, mean_np, var_np = LayerNormReference(begin_norm_axis, begin_params_axis, x_np, gamma_np, beta_np)
x_ms = Tensor(x_np)
gamma_ms = Tensor(gamma_np)
beta_ms = Tensor(beta_np)
net = LayerNormNet(begin_norm_axis, begin_params_axis)
y_ms, mean_ms, var_ms = net(x_ms, gamma_ms, beta_ms)
assert np.allclose(y_ms.asnumpy(), y_np, rtol=1e-6, atol=1e-4)
assert np.allclose(mean_ms.asnumpy(), mean_np, rtol=1e-6, atol=1e-4)
assert np.allclose(var_ms.asnumpy(), var_np, rtol=1e-6, atol=1e-4)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_layernorm2d_2():
begin_norm_axis = -1
begin_params_axis = 1
x_np = np.random.randn(64, 32).astype(np.float32)
gamma_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)
beta_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)
y_np, mean_np, var_np = LayerNormReference(begin_norm_axis, begin_params_axis, x_np, gamma_np, beta_np)
x_ms = Tensor(x_np)
gamma_ms = Tensor(gamma_np)
beta_ms = Tensor(beta_np)
net = LayerNormNet(begin_norm_axis, begin_params_axis)
y_ms, mean_ms, var_ms = net(x_ms, gamma_ms, beta_ms)
assert np.allclose(y_ms.asnumpy(), y_np, rtol=1e-6, atol=1e-4)
assert np.allclose(mean_ms.asnumpy(), mean_np, rtol=1e-6, atol=1e-4)
assert np.allclose(var_ms.asnumpy(), var_np, rtol=1e-6, atol=1e-4)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_layernorm2d_3():
begin_norm_axis = -1
begin_params_axis = 1
x_np = np.random.randn(128, 128).astype(np.float32)
gamma_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)
beta_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)
y_np, mean_np, var_np = LayerNormReference(begin_norm_axis, begin_params_axis, x_np, gamma_np, beta_np)
x_ms = Tensor(x_np)
gamma_ms = Tensor(gamma_np)
beta_ms = Tensor(beta_np)
net = LayerNormNet(begin_norm_axis, begin_params_axis)
y_ms, mean_ms, var_ms = net(x_ms, gamma_ms, beta_ms)
assert np.allclose(y_ms.asnumpy(), y_np, rtol=1e-6, atol=1e-4)
assert np.allclose(mean_ms.asnumpy(), mean_np, rtol=1e-6, atol=1e-4)
assert np.allclose(var_ms.asnumpy(), var_np, rtol=1e-6, atol=1e-4)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_layernorm2d_4():
begin_norm_axis = 2
begin_params_axis = 1
np.random.seed(42)
x_np = np.random.randn(128, 2, 16, 32).astype(np.float32)
gamma_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)
beta_np = np.random.randn(*x_np.shape[begin_params_axis:]).astype(np.float32)
y_np, mean_np, var_np = LayerNormReference(begin_norm_axis, begin_params_axis, x_np, gamma_np, beta_np)
x_ms = Tensor(x_np)
gamma_ms = Tensor(gamma_np)
beta_ms = Tensor(beta_np)
net = LayerNormNet(begin_norm_axis, begin_params_axis)
y_ms, mean_ms, var_ms = net(x_ms, gamma_ms, beta_ms)
assert np.allclose(y_ms.asnumpy(), y_np, rtol=1e-6, atol=1e-4)
assert np.allclose(mean_ms.asnumpy(), mean_np, rtol=1e-6, atol=1e-4)
assert np.allclose(var_ms.asnumpy(), var_np, rtol=1e-6, atol=1e-4)
| 40.92 | 107 | 0.721041 | [
"Apache-2.0"
] | 233-puchi/mindspore | tests/st/ops/cpu/test_layer_norm_op.py | 8,184 | Python |
from rdflib import URIRef, Namespace
from definednamespace import DefinedNamespace
class RDF(DefinedNamespace):
# http://www.w3.org/1999/02/22-rdf-syntax-ns#Property
direction: URIRef # The base direction component of a CompoundLiteral.
first: URIRef # The first item in the subject RDF list.
language: URIRef # The language component of a CompoundLiteral.
object: URIRef # The object of the subject RDF statement.
predicate: URIRef # The predicate of the subject RDF statement.
rest: URIRef # The rest of the subject RDF list after the first item.
subject: URIRef # The subject of the subject RDF statement.
type: URIRef # The subject is an instance of a class.
value: URIRef # Idiomatic property used for structured values.
# http://www.w3.org/2000/01/rdf-schema#Class
Alt: URIRef # The class of containers of alternatives.
Bag: URIRef # The class of unordered containers.
CompoundLiteral: URIRef # A class representing a compound literal.
List: URIRef # The class of RDF Lists.
Property: URIRef # The class of RDF properties.
Seq: URIRef # The class of ordered containers.
Statement: URIRef # The class of RDF statements.
# http://www.w3.org/2000/01/rdf-schema#Datatype
HTML: URIRef # The datatype of RDF literals storing fragments of HTML content
JSON: URIRef # The datatype of RDF literals storing JSON content.
PlainLiteral: URIRef # The class of plain (i.e. untyped) literal values, as used in RIF and OWL 2
XMLLiteral: URIRef # The datatype of XML literal values.
langString: URIRef # The datatype of language-tagged string values
_NS = Namespace("http://www.w3.org/1999/02/22-rdf-syntax-ns#")
| 56.514286 | 109 | 0.63094 | [
"CC0-1.0"
] | hsolbrig/definednamespace | tests/data/RDF.py | 1,978 | Python |
# 927030030
seals = [
[600, -510],
[-600, -510],
[150, -720],
[-150, -720],
[0, -280],
]
sm.lockInGameUI(True, False)
sm.hideUser(False)
sm.forcedInput(0)
sm.sendDelay(1000)
for seal in seals:
sm.playSound("eunwolTuto/seal", 100)
sm.avatarOriented("Effect/Direction15.img/effect/tuto/seal/front")
sm.avatarOriented("Effect/Direction15.img/effect/tuto/seal/back")
sm.sendDelay(300)
sm.playSound("eunwolTuto/particle", 100)
sm.moveParticleEff("eunwol_seal", 0, -345, seal[0], seal[1], 1500, 50, 2, 5)
sm.sendDelay(1500)
sm.playSound("eunwolTuto/seal_stone", 100)
sm.showEffect("Effect/Direction15.img/effect/tuto/seal/stone", 0, seal[0], seal[1] + 200, 0, 0, True, 0)
if seal[0] == 0:
sm.sendDelay(600)
else:
sm.sendDelay(1000)
sm.setQRValue(38907, "3")
sm.warp(927030050, 0) | 25.393939 | 108 | 0.658711 | [
"MIT"
] | Bia10/MapleEllinel-v203.4 | scripts/field/eunwol_tuto_3_5.py | 838 | Python |
from data import *
from model import *
from utils import *
import torch
from torch.autograd import Variable
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
#import matplotlib.pyplot as plt
#import matplotlib.ticker as ticker
#import numpy as np
#import io
#import torchvision
#from PIL import Image
#import visdom
#vis = visdom.Visdom()
def evaluate(input_seq, encoder, decoder, max_length=MAX_LENGTH):
# input_lengths = [len(input_seq)] xiba, 嚴重錯誤
input_seqs = [indexes_from_sentence(input_lang, input_seq)]
input_lengths = [len(x) for x in input_seqs]
input_batches = Variable(torch.LongTensor(input_seqs), volatile=True).transpose(0, 1)
if USE_CUDA:
input_batches = input_batches.cuda()
# Set to not-training mode to disable dropout
encoder.eval()
decoder.eval()
# Run through encoder
encoder_outputs, encoder_hidden = encoder(input_batches, input_lengths, None)
# Create starting vectors for decoder
decoder_input = Variable(torch.LongTensor([[SOS_token]]), volatile=True) # SOS
decoder_hidden = encoder_hidden[:decoder.n_layers] # Use last (forward) hidden state from encoder
if USE_CUDA:
decoder_input = decoder_input.cuda()
# Store output words and attention states
decoded_words = []
decoder_attentions = torch.zeros(max_length + 1, max_length + 1)
# Run through decoder
for di in range(max_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_outputs
)
decoder_attentions[di,:decoder_attention.size(2)] += decoder_attention[0][0].cpu().data
# Choose top word from output
topv, topi = decoder_output.data.topk(1)
ni = topi[0][0]
if ni == EOS_token:
decoded_words.append('<EOS>')
break
else:
decoded_words.append(output_lang.index2word[ni])
# Next input is chosen word
decoder_input = Variable(torch.LongTensor([[ni]]))
if USE_CUDA: decoder_input = decoder_input.cuda()
# Set back to training mode
encoder.train()
decoder.train()
return decoded_words, decoder_attentions[:di+1, :len(encoder_outputs)]
#def show_plot_visdom():
#buf = io.BytesIO()
#plt.savefig(buf)
#buf.seek(0)
#attn_win = 'attention (%s)' % hostname
#vis.image(torchvision.transforms.ToTensor()(Image.open(buf)), win=attn_win, opts={'title': attn_win})
#def show_attention(input_sentence, output_words, attentions):
## Set up figure with colorbar
#fig = plt.figure()
#ax = fig.add_subplot(111)
#cax = ax.matshow(attentions.numpy(), cmap='bone')
#fig.colorbar(cax)
## Set up axes
#ax.set_xticklabels([''] + input_sentence.split(' ') + ['<EOS>'], rotation=90)
#ax.set_yticklabels([''] + output_words)
## Show label at every tick
#ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
#ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
#show_plot_visdom()
#plt.show()
#plt.close()
def evaluate_and_show_attention(input_sentence, encoder, decoder, target_sentence=None):
output_words, attentions = evaluate(input_sentence, encoder, decoder)
output_sentence = ' '.join(output_words)
print('>', input_sentence)
if target_sentence is not None:
print('=', target_sentence)
print('<', output_sentence)
#show_attention(input_sentence, output_words, attentions)
## Show input, target, output text in visdom
#win = 'evaluted (%s)' % hostname
#text = '<p>> %s</p><p>= %s</p><p>< %s</p>' % (input_sentence, target_sentence, output_sentence)
#vis.text(text, win=win, opts={'title': win})
def evaluate_randomly(encoder, decoder):
input_sentence, target_sentence = random.choice(pairs)
evaluate_and_show_attention(input_sentence, encoder, decoder, target_sentence)
#def show_plot(points):
#plt.figure()
#fig, ax = plt.subplots()
#loc = ticker.MultipleLocator(base=0.2) # put ticks at regular intervals
#ax.yaxis.set_major_locator(loc)
#plt.plot(points)
#show_plot(plot_losses)
#output_words, attentions = evaluate("je suis trop froid .")
#plt.matshow(attentions.numpy())
#show_plot_visdom()
| 31.59854 | 106 | 0.68122 | [
"MIT"
] | iclementine/practical-pytorch | seq2seq-translation-batched/evaluate.py | 4,337 | Python |
import gzip
from codecs import getreader, open
def get_file_handle(file_path):
"""Return a opened file"""
if file_path.endswith(".gz"):
file_handle = getreader("utf-8")(gzip.open(file_path, "r"), errors="replace")
else:
file_handle = open(file_path, "r", encoding="utf-8")
return file_handle
| 23.428571 | 85 | 0.661585 | [
"BSD-3-Clause"
] | Clinical-Genomics/scout | scout/utils/handle.py | 328 | Python |
#!/usr/bin/python
Liste1 = ["1,1 + 1.1"]
Liste2 = ["python", "ist", "nice"]
Liste3 = ["1,4,2,3,5,8,7,9,6,0"]
Liste3.sort
Listen ={Liste1 , Liste2 , Liste3}
print(Listen)
| 19 | 34 | 0.596491 | [
"MIT"
] | matthis-werkstatt/python-3 | kapitel-3 Erste_Schritte_im_interaktiven_modus/main.py | 171 | Python |
# Generated by Django 3.0.3 on 2020-02-13 07:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('locations', '0002_department'),
]
operations = [
migrations.AlterField(
model_name='department',
name='department_code',
field=models.CharField(max_length=20),
),
]
| 20.684211 | 50 | 0.605598 | [
"MIT"
] | manuel103/pitchant-v1.0- | pitchant/locations/migrations/0003_auto_20200213_0758.py | 393 | Python |
from common.database import db
from flask_restful import fields
from sqlalchemy.ext.orderinglist import ordering_list
from sqlalchemy.sql import func
contato_campos = {
'id': fields.Integer(attribute='id'),
'nome': fields.String(attribute='nome'),
'email': fields.String(attribute='email'),
'telefone': fields.String(attribute='telefone'),
'descricao': fields.String(attribute='descricao'),
'isAtendido': fields.Boolean(attribute='is_atendido')
}
'''
Classe Contato.
'''
class ContatoModel(db.Model):
__tablename__ = 'tb_contato'
id = db.Column(db.Integer, primary_key=True)
nome = db.Column(db.String(255))
email = db.Column(db.String(255))
telefone = db.Column(db.String(13))
descricao = db.Column(db.Text())
is_atendido = db.Column(db.Boolean, default=False)
dt_insercao = db.Column(db.DateTime, default=func.current_timestamp())
def __init__(self, nome, email, telefone, descricao, is_atendido):
self.nome = nome
self.email = email
self.telefone = telefone
self.descricao = descricao
self.is_atendido = is_atendido
def __str__(self):
return '<Contato %r>'%(self.nome) | 30.075 | 74 | 0.679967 | [
"Apache-2.0"
] | LADOSSIFPB/LaNoCentro | ServicoApp/models/contato.py | 1,203 | Python |
# coding: utf-8
import urllib
import re
from google.appengine.ext import blobstore
import flask
import flask_wtf
import wtforms
import auth
import config
import model
import util
from main import app
# ###############################################################################
# # List Filters
# ###############################################################################
# @app.route('/resource/', endpoint='resource_grid')
# def resource_grid():
# resource_dbs, cursors = model.Resource.get_dbs(
# model.Resource.query(model.Resource.hotness > 0),
# limit=20, prev_cursor=True, order='-hotness')
# return flask.render_template(
# 'resource/resource_grid.html',
# html_class='resource-grid',
# title=u'تصفح الصور',
# resource_dbs=resource_dbs,
# next_url=util.generate_next_url(cursors.get('next')),
# prev_url=util.generate_next_url(cursors.get('prev')),
# api_url=flask.url_for('api.resource.list'),
# )
# ###############################################################################
# # View
# ###############################################################################
# @app.route('/resource/<int:resource_id>/', endpoint='resource_view')
# def resource_view(resource_id):
# resource_db = model.Resource.get_by_id(resource_id)
# if not resource_db:
# return flask.abort(404)
# user_db = resource_db.user_key.get()
# return flask.render_template(
# 'resource/resource_view.html',
# html_class='resource-view',
# title='%s' % (resource_db.name),
# resource_db=resource_db,
# user_db=user_db,
# api_url=flask.url_for('api.resource', key=resource_db.key.urlsafe()),
# )
###############################################################################
# Update
###############################################################################
class FilterUpdateForm(flask_wtf.FlaskForm):
label = wtforms.TextField(u'عنوان الفلتر', [wtforms.validators.required()])
filter_property = wtforms.TextField(u'اسم الخاصية', [wtforms.validators.required()])
filter_value = wtforms.TextField(u'قيمة الخاصية', [wtforms.validators.required()])
description = wtforms.TextAreaField(u'وصف الفلتر', [wtforms.validators.optional()])
@app.route('/admin/filter/create/', methods=['GET', 'POST'])
@app.route('/admin/filter/<string:filter_id>/update/', methods=['GET', 'POST'], endpoint='filter_update')
@auth.admin_required
def filter_update(filter_id=''):
if filter_id:
filter_db = model.Filter.get_by_id(filter_id)
else:
filter_db = model.Filter(
id='{}-{}'.format(util.param('filter_value'), util.param('filter_value')),
label='')
if not filter_db or not auth.current_user_db().admin:
return flask.abort(404)
form = FilterUpdateForm(obj=filter_db)
if form.validate_on_submit():
form.populate_obj(filter_db)
filter_db.put()
return flask.redirect(flask.url_for('admin_filter_list'))
return flask.render_template(
'filter/filter_update.html',
html_class='filter-update',
title='%s' % (filter_db.label or 'فلتر جديد'),
filter_db=filter_db,
form=form,
api_url=flask.url_for('api.filter', key=filter_db.key.urlsafe()) if filter_db.key else '',
)
###############################################################################
# Admin Filters List
###############################################################################
@app.route('/admin/filter/', endpoint='admin_filter_list')
@auth.admin_required
def admin_filter_list():
filter_dbs, cursors = model.Filter.get_dbs(
limit=30, prev_cursor=True, order='-photos_count')
return flask.render_template(
'filter/filter_list.html',
html_class='filter-list',
title=u'قائمة الفلاتر',
filter_dbs=filter_dbs,
next_url=util.generate_next_url(cursors.get('next')),
prev_url=util.generate_next_url(cursors.get('prev')),
api_url=flask.url_for('api.filter.list'),
)
| 34.06087 | 105 | 0.594077 | [
"MIT"
] | manshar/birwaz | main/control/filter.py | 3,987 | Python |
import datetime
import warnings
import pendulum
from dagster import check
from dagster.core.definitions.partition import PartitionSetDefinition
from dagster.core.errors import DagsterInvalidDefinitionError
from dagster.utils.partitions import (
DEFAULT_DATE_FORMAT,
DEFAULT_HOURLY_FORMAT_WITHOUT_TIMEZONE,
DEFAULT_HOURLY_FORMAT_WITH_TIMEZONE,
DEFAULT_MONTHLY_FORMAT,
create_offset_partition_selector,
schedule_partition_range,
)
from ..mode import DEFAULT_MODE_NAME
from ..schedule import ScheduleDefinition
# Error messages are long
# pylint: disable=C0301
def schedule(
cron_schedule,
pipeline_name,
name=None,
tags=None,
tags_fn=None,
solid_selection=None,
mode="default",
should_execute=None,
environment_vars=None,
execution_timezone=None,
):
"""Create a schedule.
The decorated function will be called as the ``run_config_fn`` of the underlying
:py:class:`~dagster.ScheduleDefinition` and should take a
:py:class:`~dagster.ScheduleExecutionContext` as its only argument, returning the environment
dict for the scheduled execution.
Args:
cron_schedule (str): A valid cron string specifying when the schedule will run, e.g.,
``'45 23 * * 6'`` for a schedule that runs at 11:45 PM every Saturday.
pipeline_name (str): The name of the pipeline to execute when the schedule runs.
name (Optional[str]): The name of the schedule to create.
tags (Optional[Dict[str, str]]): A dictionary of tags (string key-value pairs) to attach
to the scheduled runs.
tags_fn (Optional[Callable[[ScheduleExecutionContext], Optional[Dict[str, str]]]]): A function
that generates tags to attach to the schedules runs. Takes a
:py:class:`~dagster.ScheduleExecutionContext` and returns a dictionary of tags (string
key-value pairs). You may set only one of ``tags`` and ``tags_fn``.
solid_selection (Optional[List[str]]): A list of solid subselection (including single
solid names) to execute when the schedule runs. e.g. ``['*some_solid+', 'other_solid']``
mode (Optional[str]): The pipeline mode in which to execute this schedule.
(Default: 'default')
should_execute (Optional[Callable[[ScheduleExecutionContext], bool]]): A function that runs at
schedule execution tie to determine whether a schedule should execute or skip. Takes a
:py:class:`~dagster.ScheduleExecutionContext` and returns a boolean (``True`` if the
schedule should execute). Defaults to a function that always returns ``True``.
environment_vars (Optional[Dict[str, str]]): Any environment variables to set when executing
the schedule.
execution_timezone (Optional[str]): Timezone in which the schedule should run. Only works
with DagsterDaemonScheduler, and must be set when using that scheduler.
"""
def inner(fn):
check.callable_param(fn, "fn")
schedule_name = name or fn.__name__
return ScheduleDefinition(
name=schedule_name,
cron_schedule=cron_schedule,
pipeline_name=pipeline_name,
run_config_fn=fn,
tags=tags,
tags_fn=tags_fn,
solid_selection=solid_selection,
mode=mode,
should_execute=should_execute,
environment_vars=environment_vars,
execution_timezone=execution_timezone,
)
return inner
def monthly_schedule(
pipeline_name,
start_date,
name=None,
execution_day_of_month=1,
execution_time=datetime.time(0, 0),
tags_fn_for_date=None,
solid_selection=None,
mode="default",
should_execute=None,
environment_vars=None,
end_date=None,
execution_timezone=None,
):
"""Create a schedule that runs monthly.
The decorated function will be called as the ``run_config_fn`` of the underlying
:py:class:`~dagster.ScheduleDefinition` and should take a
:py:class:`~dagster.ScheduleExecutionContext` as its only argument, returning the environment
dict for the scheduled execution.
Args:
pipeline_name (str): The name of the pipeline to execute when the schedule runs.
start_date (datetime.datetime): The date from which to run the schedule.
name (Optional[str]): The name of the schedule to create.
execution_day_of_month (int): The day of the month on which to run the schedule (must be
between 0 and 31).
execution_time (datetime.time): The time at which to execute the schedule.
tags_fn_for_date (Optional[Callable[[datetime.datetime], Optional[Dict[str, str]]]]): A
function that generates tags to attach to the schedules runs. Takes the date of the
schedule run and returns a dictionary of tags (string key-value pairs).
solid_selection (Optional[List[str]]): A list of solid subselection (including single
solid names) to execute when the schedule runs. e.g. ``['*some_solid+', 'other_solid']``
mode (Optional[str]): The pipeline mode in which to execute this schedule.
(Default: 'default')
should_execute (Optional[Callable[ScheduleExecutionContext, bool]]): A function that runs at
schedule execution tie to determine whether a schedule should execute or skip. Takes a
:py:class:`~dagster.ScheduleExecutionContext` and returns a boolean (``True`` if the
schedule should execute). Defaults to a function that always returns ``True``.
environment_vars (Optional[Dict[str, str]]): Any environment variables to set when executing
the schedule.
end_date (Optional[datetime.datetime]): The last time to run the schedule to, defaults to
current time.
execution_timezone (Optional[str]): Timezone in which the schedule should run. Only works
with DagsterDaemonScheduler, and must be set when using that scheduler.
"""
check.opt_str_param(name, "name")
check.inst_param(start_date, "start_date", datetime.datetime)
check.opt_inst_param(end_date, "end_date", datetime.datetime)
check.opt_callable_param(tags_fn_for_date, "tags_fn_for_date")
check.opt_nullable_list_param(solid_selection, "solid_selection", of_type=str)
mode = check.opt_str_param(mode, "mode", DEFAULT_MODE_NAME)
check.opt_callable_param(should_execute, "should_execute")
check.opt_dict_param(environment_vars, "environment_vars", key_type=str, value_type=str)
check.str_param(pipeline_name, "pipeline_name")
check.int_param(execution_day_of_month, "execution_day")
check.inst_param(execution_time, "execution_time", datetime.time)
check.opt_str_param(execution_timezone, "execution_timezone")
if (
start_date.day != 1
or start_date.hour != 0
or start_date.minute != 0
or start_date.second != 0
):
warnings.warn(
"`start_date` must be at the beginning of the first day of the month for a monthly "
"schedule. Use `execution_day_of_month` and `execution_time` to execute the schedule "
"at a specific time within the month. For example, to run the schedule at 3AM on the "
"23rd of each month starting in October, your schedule definition would look like:"
"""
@monthly_schedule(
start_date=datetime.datetime(2020, 10, 1),
execution_day_of_month=23,
execution_time=datetime.time(3, 0)
):
def my_schedule_definition(_):
...
"""
)
if execution_day_of_month <= 0 or execution_day_of_month > 31:
raise DagsterInvalidDefinitionError(
"`execution_day_of_month={}` is not valid for monthly schedule. Execution day must be "
"between 1 and 31".format(execution_day_of_month)
)
cron_schedule = "{minute} {hour} {day} * *".format(
minute=execution_time.minute, hour=execution_time.hour, day=execution_day_of_month
)
fmt = DEFAULT_MONTHLY_FORMAT
execution_time_to_partition_fn = (
lambda d: pendulum.instance(d)
.replace(hour=0, minute=0)
.subtract(months=1, days=execution_day_of_month - 1)
)
partition_fn = schedule_partition_range(
start_date,
end=end_date,
cron_schedule=cron_schedule,
fmt=fmt,
timezone=execution_timezone,
execution_time_to_partition_fn=execution_time_to_partition_fn,
)
def inner(fn):
check.callable_param(fn, "fn")
schedule_name = name or fn.__name__
tags_fn_for_partition_value = lambda partition: {}
if tags_fn_for_date:
tags_fn_for_partition_value = lambda partition: tags_fn_for_date(partition.value)
partition_set = PartitionSetDefinition(
name="{}_partitions".format(schedule_name),
pipeline_name=pipeline_name,
partition_fn=partition_fn,
run_config_fn_for_partition=lambda partition: fn(partition.value),
solid_selection=solid_selection,
tags_fn_for_partition=tags_fn_for_partition_value,
mode=mode,
)
return partition_set.create_schedule_definition(
schedule_name,
cron_schedule,
should_execute=should_execute,
environment_vars=environment_vars,
partition_selector=create_offset_partition_selector(
execution_time_to_partition_fn=execution_time_to_partition_fn
),
execution_timezone=execution_timezone,
)
return inner
def weekly_schedule(
pipeline_name,
start_date,
name=None,
execution_day_of_week=0,
execution_time=datetime.time(0, 0),
tags_fn_for_date=None,
solid_selection=None,
mode="default",
should_execute=None,
environment_vars=None,
end_date=None,
execution_timezone=None,
):
"""Create a schedule that runs weekly.
The decorated function will be called as the ``run_config_fn`` of the underlying
:py:class:`~dagster.ScheduleDefinition` and should take a
:py:class:`~dagster.ScheduleExecutionContext` as its only argument, returning the environment
dict for the scheduled execution.
Args:
pipeline_name (str): The name of the pipeline to execute when the schedule runs.
start_date (datetime.datetime): The date from which to run the schedule.
name (Optional[str]): The name of the schedule to create.
execution_day_of_week (int): The day of the week on which to run the schedule. Must be
between 0 (Sunday) and 6 (Saturday).
execution_time (datetime.time): The time at which to execute the schedule.
tags_fn_for_date (Optional[Callable[[datetime.datetime], Optional[Dict[str, str]]]]): A
function that generates tags to attach to the schedules runs. Takes the date of the
schedule run and returns a dictionary of tags (string key-value pairs).
solid_selection (Optional[List[str]]): A list of solid subselection (including single
solid names) to execute when the schedule runs. e.g. ``['*some_solid+', 'other_solid']``
mode (Optional[str]): The pipeline mode in which to execute this schedule.
(Default: 'default')
should_execute (Optional[Callable[ScheduleExecutionContext, bool]]): A function that runs at
schedule execution tie to determine whether a schedule should execute or skip. Takes a
:py:class:`~dagster.ScheduleExecutionContext` and returns a boolean (``True`` if the
schedule should execute). Defaults to a function that always returns ``True``.
environment_vars (Optional[Dict[str, str]]): Any environment variables to set when executing
the schedule.
end_date (Optional[datetime.datetime]): The last time to run the schedule to, defaults to
current time.
execution_timezone (Optional[str]): Timezone in which the schedule should run. Only works
with DagsterDaemonScheduler, and must be set when using that scheduler.
"""
check.opt_str_param(name, "name")
check.inst_param(start_date, "start_date", datetime.datetime)
check.opt_inst_param(end_date, "end_date", datetime.datetime)
check.opt_callable_param(tags_fn_for_date, "tags_fn_for_date")
check.opt_nullable_list_param(solid_selection, "solid_selection", of_type=str)
mode = check.opt_str_param(mode, "mode", DEFAULT_MODE_NAME)
check.opt_callable_param(should_execute, "should_execute")
check.opt_dict_param(environment_vars, "environment_vars", key_type=str, value_type=str)
check.str_param(pipeline_name, "pipeline_name")
check.int_param(execution_day_of_week, "execution_day_of_week")
check.inst_param(execution_time, "execution_time", datetime.time)
check.opt_str_param(execution_timezone, "execution_timezone")
if start_date.hour != 0 or start_date.minute != 0 or start_date.second != 0:
warnings.warn(
"`start_date` must be at the beginning of a day for a weekly schedule. "
"Use `execution_time` to execute the schedule at a specific time of day. For example, "
"to run the schedule at 3AM each Tuesday starting on 10/20/2020, your schedule "
"definition would look like:"
"""
@weekly_schedule(
start_date=datetime.datetime(2020, 10, 20),
execution_day_of_week=1,
execution_time=datetime.time(3, 0)
):
def my_schedule_definition(_):
...
"""
)
if execution_day_of_week < 0 or execution_day_of_week >= 7:
raise DagsterInvalidDefinitionError(
"`execution_day_of_week={}` is not valid for weekly schedule. Execution day must be "
"between 0 [Sunday] and 6 [Saturday]".format(execution_day_of_week)
)
cron_schedule = "{minute} {hour} * * {day}".format(
minute=execution_time.minute, hour=execution_time.hour, day=execution_day_of_week
)
fmt = DEFAULT_DATE_FORMAT
day_difference = (execution_day_of_week - (start_date.weekday() + 1)) % 7
execution_time_to_partition_fn = (
lambda d: pendulum.instance(d)
.replace(hour=0, minute=0)
.subtract(weeks=1, days=day_difference)
)
partition_fn = schedule_partition_range(
start_date,
end=end_date,
cron_schedule=cron_schedule,
fmt=fmt,
timezone=execution_timezone,
execution_time_to_partition_fn=execution_time_to_partition_fn,
)
def inner(fn):
check.callable_param(fn, "fn")
schedule_name = name or fn.__name__
tags_fn_for_partition_value = lambda partition: {}
if tags_fn_for_date:
tags_fn_for_partition_value = lambda partition: tags_fn_for_date(partition.value)
partition_set = PartitionSetDefinition(
name="{}_partitions".format(schedule_name),
pipeline_name=pipeline_name,
partition_fn=partition_fn,
run_config_fn_for_partition=lambda partition: fn(partition.value),
solid_selection=solid_selection,
tags_fn_for_partition=tags_fn_for_partition_value,
mode=mode,
)
return partition_set.create_schedule_definition(
schedule_name,
cron_schedule,
should_execute=should_execute,
environment_vars=environment_vars,
partition_selector=create_offset_partition_selector(
execution_time_to_partition_fn=execution_time_to_partition_fn,
),
execution_timezone=execution_timezone,
)
return inner
def daily_schedule(
pipeline_name,
start_date,
name=None,
execution_time=datetime.time(0, 0),
tags_fn_for_date=None,
solid_selection=None,
mode="default",
should_execute=None,
environment_vars=None,
end_date=None,
execution_timezone=None,
):
"""Create a schedule that runs daily.
The decorated function will be called as the ``run_config_fn`` of the underlying
:py:class:`~dagster.ScheduleDefinition` and should take a
:py:class:`~dagster.ScheduleExecutionContext` as its only argument, returning the environment
dict for the scheduled execution.
Args:
pipeline_name (str): The name of the pipeline to execute when the schedule runs.
start_date (datetime.datetime): The date from which to run the schedule.
name (Optional[str]): The name of the schedule to create.
execution_time (datetime.time): The time at which to execute the schedule.
tags_fn_for_date (Optional[Callable[[datetime.datetime], Optional[Dict[str, str]]]]): A
function that generates tags to attach to the schedules runs. Takes the date of the
schedule run and returns a dictionary of tags (string key-value pairs).
solid_selection (Optional[List[str]]): A list of solid subselection (including single
solid names) to execute when the schedule runs. e.g. ``['*some_solid+', 'other_solid']``
mode (Optional[str]): The pipeline mode in which to execute this schedule.
(Default: 'default')
should_execute (Optional[Callable[ScheduleExecutionContext, bool]]): A function that runs at
schedule execution tie to determine whether a schedule should execute or skip. Takes a
:py:class:`~dagster.ScheduleExecutionContext` and returns a boolean (``True`` if the
schedule should execute). Defaults to a function that always returns ``True``.
environment_vars (Optional[Dict[str, str]]): Any environment variables to set when executing
the schedule.
end_date (Optional[datetime.datetime]): The last time to run the schedule to, defaults to
current time.
execution_timezone (Optional[str]): Timezone in which the schedule should run. Only works
with DagsterDaemonScheduler, and must be set when using that scheduler.
"""
check.str_param(pipeline_name, "pipeline_name")
check.inst_param(start_date, "start_date", datetime.datetime)
check.opt_str_param(name, "name")
check.inst_param(execution_time, "execution_time", datetime.time)
check.opt_inst_param(end_date, "end_date", datetime.datetime)
check.opt_callable_param(tags_fn_for_date, "tags_fn_for_date")
check.opt_nullable_list_param(solid_selection, "solid_selection", of_type=str)
mode = check.opt_str_param(mode, "mode", DEFAULT_MODE_NAME)
check.opt_callable_param(should_execute, "should_execute")
check.opt_dict_param(environment_vars, "environment_vars", key_type=str, value_type=str)
check.opt_str_param(execution_timezone, "execution_timezone")
if start_date.hour != 0 or start_date.minute != 0 or start_date.second != 0:
warnings.warn(
"`start_date` must be at the beginning of a day for a daily schedule. "
"Use `execution_time` to execute the schedule at a specific time of day. For example, "
"to run the schedule at 3AM each day starting on 10/20/2020, your schedule "
"definition would look like:"
"""
@daily_schedule(
start_date=datetime.datetime(2020, 10, 20),
execution_time=datetime.time(3, 0)
):
def my_schedule_definition(_):
...
"""
)
cron_schedule = "{minute} {hour} * * *".format(
minute=execution_time.minute, hour=execution_time.hour
)
fmt = DEFAULT_DATE_FORMAT
execution_time_to_partition_fn = (
lambda d: pendulum.instance(d).replace(hour=0, minute=0).subtract(days=1,)
)
partition_fn = schedule_partition_range(
start_date,
end=end_date,
cron_schedule=cron_schedule,
fmt=fmt,
timezone=execution_timezone,
execution_time_to_partition_fn=execution_time_to_partition_fn,
)
def inner(fn):
check.callable_param(fn, "fn")
schedule_name = name or fn.__name__
tags_fn_for_partition_value = lambda partition: {}
if tags_fn_for_date:
tags_fn_for_partition_value = lambda partition: tags_fn_for_date(partition.value)
partition_set = PartitionSetDefinition(
name="{}_partitions".format(schedule_name),
pipeline_name=pipeline_name,
partition_fn=partition_fn,
run_config_fn_for_partition=lambda partition: fn(partition.value),
solid_selection=solid_selection,
tags_fn_for_partition=tags_fn_for_partition_value,
mode=mode,
)
return partition_set.create_schedule_definition(
schedule_name,
cron_schedule,
should_execute=should_execute,
environment_vars=environment_vars,
partition_selector=create_offset_partition_selector(
execution_time_to_partition_fn=execution_time_to_partition_fn,
),
execution_timezone=execution_timezone,
)
return inner
def hourly_schedule(
pipeline_name,
start_date,
name=None,
execution_time=datetime.time(0, 0),
tags_fn_for_date=None,
solid_selection=None,
mode="default",
should_execute=None,
environment_vars=None,
end_date=None,
execution_timezone=None,
):
"""Create a schedule that runs hourly.
The decorated function will be called as the ``run_config_fn`` of the underlying
:py:class:`~dagster.ScheduleDefinition` and should take a
:py:class:`~dagster.ScheduleExecutionContext` as its only argument, returning the environment
dict for the scheduled execution.
Args:
pipeline_name (str): The name of the pipeline to execute when the schedule runs.
start_date (datetime.datetime): The date from which to run the schedule.
name (Optional[str]): The name of the schedule to create. By default, this will be the name
of the decorated function.
execution_time (datetime.time): The time at which to execute the schedule. Only the minutes
component will be respected -- the hour should be 0, and will be ignored if it is not 0.
tags_fn_for_date (Optional[Callable[[datetime.datetime], Optional[Dict[str, str]]]]): A
function that generates tags to attach to the schedules runs. Takes the date of the
schedule run and returns a dictionary of tags (string key-value pairs).
solid_selection (Optional[List[str]]): A list of solid subselection (including single
solid names) to execute when the schedule runs. e.g. ``['*some_solid+', 'other_solid']``
mode (Optional[str]): The pipeline mode in which to execute this schedule.
(Default: 'default')
should_execute (Optional[Callable[ScheduleExecutionContext, bool]]): A function that runs at
schedule execution tie to determine whether a schedule should execute or skip. Takes a
:py:class:`~dagster.ScheduleExecutionContext` and returns a boolean (``True`` if the
schedule should execute). Defaults to a function that always returns ``True``.
environment_vars (Optional[Dict[str, str]]): Any environment variables to set when executing
the schedule.
end_date (Optional[datetime.datetime]): The last time to run the schedule to, defaults to
current time.
execution_timezone (Optional[str]): Timezone in which the schedule should run. Only works
with DagsterDaemonScheduler, and must be set when using that scheduler.
"""
check.opt_str_param(name, "name")
check.inst_param(start_date, "start_date", datetime.datetime)
check.opt_inst_param(end_date, "end_date", datetime.datetime)
check.opt_callable_param(tags_fn_for_date, "tags_fn_for_date")
check.opt_nullable_list_param(solid_selection, "solid_selection", of_type=str)
mode = check.opt_str_param(mode, "mode", DEFAULT_MODE_NAME)
check.opt_callable_param(should_execute, "should_execute")
check.opt_dict_param(environment_vars, "environment_vars", key_type=str, value_type=str)
check.str_param(pipeline_name, "pipeline_name")
check.inst_param(execution_time, "execution_time", datetime.time)
check.opt_str_param(execution_timezone, "execution_timezone")
if start_date.minute != 0 or start_date.second != 0:
warnings.warn(
"`start_date` must be at the beginning of the hour for an hourly schedule. "
"Use `execution_time` to execute the schedule at a specific time within the hour. For "
"example, to run the schedule each hour at 15 minutes past the hour starting at 3AM "
"on 10/20/2020, your schedule definition would look like:"
"""
@hourly_schedule(
start_date=datetime.datetime(2020, 10, 20, 3),
execution_time=datetime.time(0, 15)
):
def my_schedule_definition(_):
...
"""
)
if execution_time.hour != 0:
warnings.warn(
"Hourly schedule {schedule_name} created with:\n"
"\tschedule_time=datetime.time(hour={hour}, minute={minute}, ...)."
"Since this is an hourly schedule, the hour parameter will be ignored and the schedule "
"will run on the {minute} mark for the previous hour interval. Replace "
"datetime.time(hour={hour}, minute={minute}, ...) with "
"datetime.time(minute={minute}, ...) to fix this warning."
)
cron_schedule = "{minute} * * * *".format(minute=execution_time.minute)
fmt = (
DEFAULT_HOURLY_FORMAT_WITH_TIMEZONE
if execution_timezone
else DEFAULT_HOURLY_FORMAT_WITHOUT_TIMEZONE
)
execution_time_to_partition_fn = lambda d: pendulum.instance(d).subtract(
hours=1, minutes=(execution_time.minute - start_date.minute) % 60
)
partition_fn = schedule_partition_range(
start_date,
end=end_date,
cron_schedule=cron_schedule,
fmt=fmt,
timezone=execution_timezone,
execution_time_to_partition_fn=execution_time_to_partition_fn,
)
def inner(fn):
check.callable_param(fn, "fn")
schedule_name = name or fn.__name__
tags_fn_for_partition_value = lambda partition: {}
if tags_fn_for_date:
tags_fn_for_partition_value = lambda partition: tags_fn_for_date(partition.value)
partition_set = PartitionSetDefinition(
name="{}_partitions".format(schedule_name),
pipeline_name=pipeline_name,
partition_fn=partition_fn,
run_config_fn_for_partition=lambda partition: fn(partition.value),
solid_selection=solid_selection,
tags_fn_for_partition=tags_fn_for_partition_value,
mode=mode,
)
return partition_set.create_schedule_definition(
schedule_name,
cron_schedule,
should_execute=should_execute,
environment_vars=environment_vars,
partition_selector=create_offset_partition_selector(
execution_time_to_partition_fn=execution_time_to_partition_fn,
),
execution_timezone=execution_timezone,
)
return inner
| 43.548644 | 102 | 0.688042 | [
"Apache-2.0"
] | alex-treebeard/dagster | python_modules/dagster/dagster/core/definitions/decorators/schedule.py | 27,305 | Python |
#!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblock(self, hash, verbose=True):
return self.rpc('getblock', [hash, verbose])
def getblockhash(self, index):
return self.rpc('getblockhash', [index])
def get_block_hashes(settings):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
for height in xrange(settings['min_height'], settings['max_height']+1):
hash = rpc.getblockhash(height)
print(hash)
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: linearize-hashes.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
| 25.803738 | 78 | 0.673669 | [
"MIT"
] | ShastaFarEye/mazacoin-new | contrib/linearize/linearize-hashes.py | 2,761 | Python |
import pygame
import sys
import os
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import scene.scene
import scene.shape
pygame.init()
pygame.display.set_caption('draw alpha')
screen = pygame.display.set_mode((1280, 768))
running = True
s1 = scene.scene.Scene(1280, 768, 0, 0, 1280, 768)
c1 = scene.shape.Circle(100, 100, (255, 0, 0, 128), 30, 1)
c1.transform.move(200, 100)
c2 = scene.shape.Circle(600, 600, (255, 0, 0, 128), 300, 1)
c2.transform.move(200, 100)
r1 = scene.shape.Rect(100, 300, (0, 255, 0, 128), 1)
r1.transform.move(300, 100)
s1.attach('circle_01', c1)
s1.attach('circle_02', c2)
s1.attach('rect_01', r1)
s1.zoom(1)
s1.pan(0, -500)
#s1.locate(300, 300)
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
screen.fill(scene.shape.Color.WHITE)
s1.draw(screen)
cir = s1.find('circle_01')
cir.transform.move(0.1, 0.1)
r = s1.find('rect_01')
r.transform.move(0.0, 0.1)
s1.pan(-0.05, 0.1)
s1.zoom(0.999)
pygame.display.flip()
| 21.612245 | 59 | 0.644004 | [
"MIT"
] | nolmegamelab/PlaySimulator | test/test_scene.py | 1,059 | Python |
"""Checks the repository for updates."""
import os
import sys
import urllib
import imp
from hashlib import md5
from inspect import getsourcelines
from threading import Thread
from retriever import REPOSITORY, VERSION, MASTER_BRANCH, REPO_URL, SCRIPT_WRITE_PATH
from retriever.lib.models import file_exists
global abort, executable_name
abort = False
executable_name = "retriever"
def download_from_repository(filepath, newpath, repo=REPOSITORY):
"""Downloads the latest version of a file from the repository."""
try:
filename = filepath.split('/')[-1]
def reporthook(a,b,c):
print "%3.1f-%s" % (min(100, float(a * b) / c * 100), filename),
sys.stdout.flush()
urllib.urlretrieve(repo + filepath, newpath, reporthook=reporthook)
except:
raise
pass
def more_recent(latest, current):
"""Given two version number strings, returns True if the first is more recent."""
latest_parts = latest.split('.')
current_parts = current.split('.')
for n in range(len(latest_parts)):
l = latest_parts[n]
if len(current_parts) < (n + 1):
return (l != "rc")
c = current_parts[n]
if l > c:
return True
elif c > l:
return False
return (len(current_parts) > (n + 1) and current_parts[n + 1] == "rc")
def check_for_updates(graphical=False):
"""Check for updates to scripts and executable."""
if graphical:
import wx
app = wx.App(False)
from retriever.app.splash import Splash
splash = Splash()
#splash.Show()
splash.SetText("\tLoading...")
class update_progress:
def __init__(self, parent):
self.parent = parent
def write(self, s):
if s != "\n":
try:
self.parent.SetText('\t' + s)
except:
pass
def flush(self):
pass
sys.stdout = update_progress(splash)
init = InitThread()
init.run()
if graphical:
splash.Hide()
sys.stdout = sys.__stdout__
class InitThread(Thread):
"""This thread performs all of the necessary updates while the splash screen
is shown.
1. Check master/version.txt to get the latest version (Windows only).
2. Prompt for update if necessary (Windows only).
3. Download latest versions of scripts from current branch."""
def run(self):
try:
running_from = os.path.basename(sys.argv[0])
# NOTE: exe auto-update functionality has been temporarily disabled
# since the binaries were moved to AWS.
if False:#running_from[-4:] == ".exe":
if os.path.isfile('retriever_old.exe') and running_from != 'retriever_old.exe':
try:
os.remove('retriever_old.exe')
except:
pass
# Windows: open master branch version file to find out most recent executable version
try:
version_file = urllib.urlopen(MASTER_BRANCH + "version.txt")
except IOError:
print "Couldn't open version.txt from repository"
return
latest = version_file.readline().strip('\n')
if more_recent(latest, VERSION):
import wx
msg = "You're running version " + VERSION + "."
msg += '\n\n'
msg += "Version " + latest + " is available. Do you want to upgrade?"
choice = wx.MessageDialog(None, msg, "Update", wx.YES_NO)
if choice.ShowModal() == wx.ID_YES:
print "Updating to latest version. Please wait..."
try:
if not "_old" in running_from:
os.rename(running_from,
'.'.join(running_from.split('.')[:-1])
+ "_old." + running_from.split('.')[-1])
except:
pass
download_from_repository("windows/" + executable_name + ".exe",
executable_name + ".exe",
repo=REPO_URL + latest + "/")
sys.stdout = sys.__stdout__
wx.MessageBox("Update complete. The program will now restart.")
os.execv(executable_name + ".exe", sys.argv)
sys.exit()
version_file.close()
# open version.txt for current release branch and get script versions
version_file = urllib.urlopen(REPOSITORY + "version.txt")
version_file.readline()
scripts = []
for line in version_file:
scripts.append(line.strip('\n').split(','))
# get script files
if not os.path.isdir(SCRIPT_WRITE_PATH):
os.makedirs(SCRIPT_WRITE_PATH)
for script in scripts:
script_name = script[0]
if len(script) > 1:
script_version = script[1]
else:
script_version = None
if not file_exists(os.path.join("scripts", script_name)):
# File doesn't exist: download it
print "Downloading script: " + script_name
download_from_repository("scripts/" + script_name,
os.path.join(SCRIPT_WRITE_PATH, script_name))
elif script_version:
# File exists: import and check MD5 sum
need_to_download = False
try:
file, pathname, desc = imp.find_module(''.join(script_name.split('.')[:-1]),
["scripts"])
new_module = imp.load_module(script_name, file, pathname, desc)
m = md5()
m.update(''.join(getsourcelines(new_module)[0]).replace("\r\n", "\n"))
m = m.hexdigest()
need_to_download = script_version != m
except:
pass
if need_to_download:
try:
os.remove(os.path.join("scripts", script_name))
download_from_repository("scripts/" + script_name,
os.path.join(SCRIPT_WRITE_PATH, script_name))
except Exception as e:
print e
pass
except:
raise
return
| 38.280423 | 113 | 0.482377 | [
"MIT"
] | brymz/retriever | lib/repository.py | 7,235 | Python |
# -*- coding: utf-8 -*-
"""
Store data in the Sqlite3 Database
Table1
"""
import os
import sys
import codecs
import sqlite3
from common import log
from store.model import Question, Answer, Person, Topic
DB_PATH = 'spiderman.db'
logger = log.Logger(name='store')
def init_all_dbs():
"""
call it when creating database
:return:
"""
conn = sqlite3.connect(DB_PATH)
cursor = conn.cursor()
exec_sql = "create table Question (id INTEGER primary key, content text, user VARCHAR(20), date VARCHAR(30))"
cursor.execute(exec_sql)
exec_sql = "create table People (id INTEGER primary key, question_id INTEGER, content text, user VARCHAR(20), date VARCHAR(30))"
cursor.execute(exec_sql)
conn.commit()
conn.close()
def store_to_file(filename, question, answers):
f = open(filename, 'w+')
f.write(question)
for ans in answers:
f.write(ans)
f.close()
logger.info("Saved to file %s" % filename)
def store_new_question(question):
assert isinstance(question, Question), "param `question` should be model.Question's instance"
conn = sqlite3.connect(DB_PATH)
cursor = conn.cursor()
sql = "INSERT into Question (id, content, user, date) VALUES (%s, %s, %s, %s)" % question()
cursor.execute(sql)
conn.commit()
conn.close()
def init_people_file(directory):
if directory[-1] != '/':
directory += '/'
path = directory + 'people.csv'
if not os.path.exists(path):
columns = [u'人物昵称', u'人物签名', u'人物标签', u'回答数', u'提问数', u'文章数', u'专栏数', u'想法数',
u'总赞同数', u'总感谢数', u'总收藏数', u'总编辑数', u'总关注数', u'被关注数', u'关注话题', u'关注专栏',
u'关注问题', u'收藏夹', u'动态']
with codecs.open(path, 'a+', 'utf-8') as f:
line = ','.join(columns)
line += '\n'
f.write(line)
logger.info("Created people information file: %s" % path)
return path
def init_question_file(directory):
if directory[-1] != '/':
directory += '/'
path = directory + 'question.csv'
if not os.path.exists(path):
columns = [u'问题ID', u'问题标题', u'问题描述', u'问题关注数', u'问题浏览数', u'问题评论数', u'URL', u'回答文件']
with codecs.open(path, 'a+', 'utf-8') as f:
line = ','.join(columns)
line += '\n'
f.write(line)
logger.info("Created question information file: %s" % path)
return path
def save_file(content_type, content):
if content_type == 'people':
# 存储用户信息 -- csv
dir_path = './result/people/'
if not os.path.exists(dir_path):
os.mkdir(dir_path)
assert isinstance(content, Person), "use Person class instead of raw content"
if content.name == '':
return
path = dir_path+'%s.txt' % content.name
# path = init_people_file(dir_path)
columns = [u'人物昵称', u'人物签名', u'人物标签', u'回答数', u'提问数', u'文章数', u'专栏数', u'想法数',
u'总赞同数', u'总感谢数', u'总收藏数', u'总编辑数', u'总关注数', u'被关注数', u'关注话题', u'关注专栏',
u'关注问题', u'收藏夹', u'动态']
content = content.to_line()
line = [u'[%s]:%s' % (x, y) for (x, y) in zip(columns, content)]
line = '\n'.join(line)
with codecs.open(path, 'a+', 'utf-8') as f:
f.write(line)
logger.info('people saved!')
elif content_type == 'question':
# 存储问题信息 -- csv
dir_path = './result/question/'
if not os.path.exists(dir_path):
os.mkdir(dir_path)
path = init_question_file(dir_path)
# path = dir_path + 'question.txt'
assert isinstance(content, Question), "use Question class instead of raw content"
with codecs.open(path, 'a+', 'utf-8') as f:
f.write(u'%s\n' % content.to_csv_line())
logger.info('question saved')
elif content_type == 'answers':
"""
content format: {id:id/name, answers:[]}
"""
assert isinstance(content, dict), "use Dict: {filename:xx, url:url, content: content, answers:[AnswerObjects]}"
dir_path = './result/answers/'
if not os.path.exists(dir_path):
os.mkdir(dir_path)
path = dir_path + content['filename']
with codecs.open(path, 'a+', 'utf-8') as f:
f.write(u'[Question]:%s\n' % content['content'])
f.write(u'[URL]:%s\n\n' % content['url'])
answers = content['answers']
for ans in answers:
f.write(u'%s\n' % ans)
logger.info("answers saved")
elif content_type == 'topic':
assert isinstance(content, Topic), "use Tpoc class instead of raw content"
dir_path = './result/topic/'
if not os.path.exists(dir_path):
os.mkdir(dir_path)
path = dir_path + 'topic_%s.txt' % content.topic_id
with codecs.open(path, 'a+', 'utf-8') as f:
f.write(u'[标题]%s\n' % content.title)
f.write(u'[类型]%s\n' % content.topic_type)
for question in content.questions:
f.write(u'[问题]:%s\n[回答作者]%s\n[回答内容]:%s\n[评论数]:%s\n\n' % question)
logger.info('topic saved')
else:
pass
| 26.156566 | 132 | 0.564202 | [
"MIT"
] | mengfanShi/SpiderMan | store/store.py | 5,567 | Python |
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
class BgpIpv6Peer(Base):
"""Bgp IPv6 Peer
The BgpIpv6Peer class encapsulates a list of bgpIpv6Peer resources that are managed by the user.
A list of resources can be retrieved from the server using the BgpIpv6Peer.find() method.
The list can be managed by using the BgpIpv6Peer.add() and BgpIpv6Peer.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'bgpIpv6Peer'
_SDM_ATT_MAP = {
'ActAsRestarted': 'actAsRestarted',
'Active': 'active',
'AdvSrv6SidInIgp': 'advSrv6SidInIgp',
'AdvertiseEndOfRib': 'advertiseEndOfRib',
'AdvertiseEvpnRoutesForOtherVtep': 'advertiseEvpnRoutesForOtherVtep',
'AdvertiseSRv6SID': 'advertiseSRv6SID',
'AdvertiseTunnelEncapsulationExtendedCommunity': 'advertiseTunnelEncapsulationExtendedCommunity',
'AlwaysIncludeTunnelEncExtCommunity': 'alwaysIncludeTunnelEncExtCommunity',
'AsSetMode': 'asSetMode',
'Authentication': 'authentication',
'AutoGenSegmentLeftValue': 'autoGenSegmentLeftValue',
'BgpFsmState': 'bgpFsmState',
'BgpId': 'bgpId',
'BgpLsAsSetMode': 'bgpLsAsSetMode',
'BgpLsEnableAsPathSegments': 'bgpLsEnableAsPathSegments',
'BgpLsEnableCluster': 'bgpLsEnableCluster',
'BgpLsEnableExtendedCommunity': 'bgpLsEnableExtendedCommunity',
'BgpLsNoOfASPathSegments': 'bgpLsNoOfASPathSegments',
'BgpLsNoOfClusters': 'bgpLsNoOfClusters',
'BgpLsNoOfCommunities': 'bgpLsNoOfCommunities',
'BgpLsOverridePeerAsSetMode': 'bgpLsOverridePeerAsSetMode',
'BgpUnnumbered': 'bgpUnnumbered',
'CapabilityIpV4Mdt': 'capabilityIpV4Mdt',
'CapabilityIpV4Mpls': 'capabilityIpV4Mpls',
'CapabilityIpV4MplsVpn': 'capabilityIpV4MplsVpn',
'CapabilityIpV4Multicast': 'capabilityIpV4Multicast',
'CapabilityIpV4MulticastVpn': 'capabilityIpV4MulticastVpn',
'CapabilityIpV4Unicast': 'capabilityIpV4Unicast',
'CapabilityIpV6Mpls': 'capabilityIpV6Mpls',
'CapabilityIpV6MplsVpn': 'capabilityIpV6MplsVpn',
'CapabilityIpV6Multicast': 'capabilityIpV6Multicast',
'CapabilityIpV6MulticastVpn': 'capabilityIpV6MulticastVpn',
'CapabilityIpV6Unicast': 'capabilityIpV6Unicast',
'CapabilityIpv4MplsAddPath': 'capabilityIpv4MplsAddPath',
'CapabilityIpv4UnicastAddPath': 'capabilityIpv4UnicastAddPath',
'CapabilityIpv6MplsAddPath': 'capabilityIpv6MplsAddPath',
'CapabilityIpv6UnicastAddPath': 'capabilityIpv6UnicastAddPath',
'CapabilityLinkStateNonVpn': 'capabilityLinkStateNonVpn',
'CapabilityLinkStateVpn': 'capabilityLinkStateVpn',
'CapabilityNHEncodingCapabilities': 'capabilityNHEncodingCapabilities',
'CapabilityRouteConstraint': 'capabilityRouteConstraint',
'CapabilityRouteRefresh': 'capabilityRouteRefresh',
'CapabilitySRTEPoliciesV4': 'capabilitySRTEPoliciesV4',
'CapabilitySRTEPoliciesV6': 'capabilitySRTEPoliciesV6',
'CapabilityVpls': 'capabilityVpls',
'Capabilityipv4UnicastFlowSpec': 'capabilityipv4UnicastFlowSpec',
'Capabilityipv6UnicastFlowSpec': 'capabilityipv6UnicastFlowSpec',
'ConfigureKeepaliveTimer': 'configureKeepaliveTimer',
'ConnectedVia': 'connectedVia',
'CopyTtl': 'copyTtl',
'Count': 'count',
'CustomSidType': 'customSidType',
'DescriptiveName': 'descriptiveName',
'DiscardIxiaGeneratedRoutes': 'discardIxiaGeneratedRoutes',
'DiscoveredDutIp': 'discoveredDutIp',
'DowntimeInSec': 'downtimeInSec',
'DutIp': 'dutIp',
'EnSRv6DataPlane': 'enSRv6DataPlane',
'Enable4ByteAs': 'enable4ByteAs',
'EnableBfdRegistration': 'enableBfdRegistration',
'EnableBgpId': 'enableBgpId',
'EnableBgpIdSameAsRouterId': 'enableBgpIdSameAsRouterId',
'EnableBgpLsCommunity': 'enableBgpLsCommunity',
'EnableEpeTraffic': 'enableEpeTraffic',
'EnableGracefulRestart': 'enableGracefulRestart',
'EnableLlgr': 'enableLlgr',
'EnableReducedEncapsulation': 'enableReducedEncapsulation',
'Errors': 'errors',
'EthernetSegmentsCountV6': 'ethernetSegmentsCountV6',
'Evpn': 'evpn',
'FilterEvpn': 'filterEvpn',
'FilterIpV4Mpls': 'filterIpV4Mpls',
'FilterIpV4MplsVpn': 'filterIpV4MplsVpn',
'FilterIpV4Multicast': 'filterIpV4Multicast',
'FilterIpV4MulticastVpn': 'filterIpV4MulticastVpn',
'FilterIpV4Unicast': 'filterIpV4Unicast',
'FilterIpV6Mpls': 'filterIpV6Mpls',
'FilterIpV6MplsVpn': 'filterIpV6MplsVpn',
'FilterIpV6Multicast': 'filterIpV6Multicast',
'FilterIpV6MulticastVpn': 'filterIpV6MulticastVpn',
'FilterIpV6Unicast': 'filterIpV6Unicast',
'FilterIpv4MulticastBgpMplsVpn': 'filterIpv4MulticastBgpMplsVpn',
'FilterIpv4UnicastFlowSpec': 'filterIpv4UnicastFlowSpec',
'FilterIpv6MulticastBgpMplsVpn': 'filterIpv6MulticastBgpMplsVpn',
'FilterIpv6UnicastFlowSpec': 'filterIpv6UnicastFlowSpec',
'FilterLinkState': 'filterLinkState',
'FilterLinkStateVpn': 'filterLinkStateVpn',
'FilterSRTEPoliciesV4': 'filterSRTEPoliciesV4',
'FilterSRTEPoliciesV6': 'filterSRTEPoliciesV6',
'FilterVpls': 'filterVpls',
'Flap': 'flap',
'HoldTimer': 'holdTimer',
'IpVrfToIpVrfType': 'ipVrfToIpVrfType',
'Ipv4MplsAddPathMode': 'ipv4MplsAddPathMode',
'Ipv4MplsCapability': 'ipv4MplsCapability',
'Ipv4MulticastBgpMplsVpn': 'ipv4MulticastBgpMplsVpn',
'Ipv4MultipleMplsLabelsCapability': 'ipv4MultipleMplsLabelsCapability',
'Ipv4UnicastAddPathMode': 'ipv4UnicastAddPathMode',
'Ipv6MplsAddPathMode': 'ipv6MplsAddPathMode',
'Ipv6MplsCapability': 'ipv6MplsCapability',
'Ipv6MulticastBgpMplsVpn': 'ipv6MulticastBgpMplsVpn',
'Ipv6MultipleMplsLabelsCapability': 'ipv6MultipleMplsLabelsCapability',
'Ipv6UnicastAddPathMode': 'ipv6UnicastAddPathMode',
'IrbInterfaceLabel': 'irbInterfaceLabel',
'IrbIpv6Address': 'irbIpv6Address',
'KeepaliveTimer': 'keepaliveTimer',
'L3VPNEncapsulationType': 'l3VPNEncapsulationType',
'LocalAs2Bytes': 'localAs2Bytes',
'LocalAs4Bytes': 'localAs4Bytes',
'LocalIpv6Ver2': 'localIpv6Ver2',
'LocalRouterID': 'localRouterID',
'MaxSidPerSrh': 'maxSidPerSrh',
'Md5Key': 'md5Key',
'ModeOfBfdOperations': 'modeOfBfdOperations',
'MplsLabelsCountForIpv4MplsRoute': 'mplsLabelsCountForIpv4MplsRoute',
'MplsLabelsCountForIpv6MplsRoute': 'mplsLabelsCountForIpv6MplsRoute',
'Multiplier': 'multiplier',
'Name': 'name',
'NoOfEpePeers': 'noOfEpePeers',
'NoOfExtendedCommunities': 'noOfExtendedCommunities',
'NoOfUserDefinedAfiSafi': 'noOfUserDefinedAfiSafi',
'NumBgpLsId': 'numBgpLsId',
'NumBgpLsInstanceIdentifier': 'numBgpLsInstanceIdentifier',
'NumBgpUpdatesGeneratedPerIteration': 'numBgpUpdatesGeneratedPerIteration',
'NumberColorFlexAlgoMapping': 'numberColorFlexAlgoMapping',
'NumberFlowSpecRangeV4': 'numberFlowSpecRangeV4',
'NumberFlowSpecRangeV6': 'numberFlowSpecRangeV6',
'NumberSRTEPolicies': 'numberSRTEPolicies',
'OperationalModel': 'operationalModel',
'RestartTime': 'restartTime',
'RoutersMacOrIrbMacAddress': 'routersMacOrIrbMacAddress',
'SRGBRangeCount': 'sRGBRangeCount',
'SegmentLeftValue': 'segmentLeftValue',
'SendIxiaSignatureWithRoutes': 'sendIxiaSignatureWithRoutes',
'SendSRv6SIDOptionalInfo': 'sendSRv6SIDOptionalInfo',
'SessionInfo': 'sessionInfo',
'SessionStatus': 'sessionStatus',
'Srv6EndpointBehavior': 'srv6EndpointBehavior',
'Srv6SIDOptionalInformation': 'srv6SIDOptionalInformation',
'Srv6SidFlags': 'srv6SidFlags',
'Srv6SidLoc': 'srv6SidLoc',
'Srv6SidLocLen': 'srv6SidLocLen',
'Srv6SidLocMetric': 'srv6SidLocMetric',
'Srv6SidReserved': 'srv6SidReserved',
'Srv6SidReserved1': 'srv6SidReserved1',
'Srv6SidReserved2': 'srv6SidReserved2',
'Srv6Ttl': 'srv6Ttl',
'StackedLayers': 'stackedLayers',
'StaleTime': 'staleTime',
'StateCounts': 'stateCounts',
'Status': 'status',
'TcpWindowSizeInBytes': 'tcpWindowSizeInBytes',
'Ttl': 'ttl',
'Type': 'type',
'UdpPortEndValue': 'udpPortEndValue',
'UdpPortStartValue': 'udpPortStartValue',
'UpdateInterval': 'updateInterval',
'UptimeInSec': 'uptimeInSec',
'UseGatewayAsDutIp': 'useGatewayAsDutIp',
'UseStaticPolicy': 'useStaticPolicy',
'VplsEnableNextHop': 'vplsEnableNextHop',
'VplsNextHop': 'vplsNextHop',
}
def __init__(self, parent):
super(BgpIpv6Peer, self).__init__(parent)
@property
def BgpCustomAfiSafiv6(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpcustomafisafiv6_31ae8bd98f331c2119281ac977022fca.BgpCustomAfiSafiv6): An instance of the BgpCustomAfiSafiv6 class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpcustomafisafiv6_31ae8bd98f331c2119281ac977022fca import BgpCustomAfiSafiv6
return BgpCustomAfiSafiv6(self)._select()
@property
def BgpEpePeerList(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpepepeerlist_8e1fc47aa0221fde5418b0e01514b909.BgpEpePeerList): An instance of the BgpEpePeerList class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpepepeerlist_8e1fc47aa0221fde5418b0e01514b909 import BgpEpePeerList
return BgpEpePeerList(self)._select()
@property
def BgpEthernetSegmentV6(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpethernetsegmentv6_766c04a63efb3fe4eca969aac968fe4e.BgpEthernetSegmentV6): An instance of the BgpEthernetSegmentV6 class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpethernetsegmentv6_766c04a63efb3fe4eca969aac968fe4e import BgpEthernetSegmentV6
return BgpEthernetSegmentV6(self)._select()
@property
def BgpFlowSpecRangesList(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpflowspecrangeslist_9ad7609645f425215665a5736cc73e84.BgpFlowSpecRangesList): An instance of the BgpFlowSpecRangesList class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpflowspecrangeslist_9ad7609645f425215665a5736cc73e84 import BgpFlowSpecRangesList
return BgpFlowSpecRangesList(self)._select()
@property
def BgpFlowSpecRangesListV4(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpflowspecrangeslistv4_ab0c3185b027eff54394da27736dcb9a.BgpFlowSpecRangesListV4): An instance of the BgpFlowSpecRangesListV4 class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpflowspecrangeslistv4_ab0c3185b027eff54394da27736dcb9a import BgpFlowSpecRangesListV4
return BgpFlowSpecRangesListV4(self)._select()
@property
def BgpFlowSpecRangesListV6(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpflowspecrangeslistv6_305d65dd8b0f124660b13211ca670c20.BgpFlowSpecRangesListV6): An instance of the BgpFlowSpecRangesListV6 class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpflowspecrangeslistv6_305d65dd8b0f124660b13211ca670c20 import BgpFlowSpecRangesListV6
return BgpFlowSpecRangesListV6(self)._select()
@property
def BgpIPv6EvpnEvi(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpipv6evpnevi_7148192f2f68b72a7e220fe51f91ee65.BgpIPv6EvpnEvi): An instance of the BgpIPv6EvpnEvi class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpipv6evpnevi_7148192f2f68b72a7e220fe51f91ee65 import BgpIPv6EvpnEvi
return BgpIPv6EvpnEvi(self)
@property
def BgpIPv6EvpnPbb(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpipv6evpnpbb_7e3d31c960a96c76772f39596f4e0b6c.BgpIPv6EvpnPbb): An instance of the BgpIPv6EvpnPbb class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpipv6evpnpbb_7e3d31c960a96c76772f39596f4e0b6c import BgpIPv6EvpnPbb
return BgpIPv6EvpnPbb(self)
@property
def BgpIPv6EvpnVXLAN(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpipv6evpnvxlan_58919d93e3f1d08f428277c92a21e890.BgpIPv6EvpnVXLAN): An instance of the BgpIPv6EvpnVXLAN class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpipv6evpnvxlan_58919d93e3f1d08f428277c92a21e890 import BgpIPv6EvpnVXLAN
return BgpIPv6EvpnVXLAN(self)
@property
def BgpIPv6EvpnVXLANVpws(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpipv6evpnvxlanvpws_3f36e2b3e739d7ab9aec3577a508ada7.BgpIPv6EvpnVXLANVpws): An instance of the BgpIPv6EvpnVXLANVpws class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpipv6evpnvxlanvpws_3f36e2b3e739d7ab9aec3577a508ada7 import BgpIPv6EvpnVXLANVpws
return BgpIPv6EvpnVXLANVpws(self)
@property
def BgpIPv6EvpnVpws(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpipv6evpnvpws_7e7a3dec141df7b1c974f723df7f4814.BgpIPv6EvpnVpws): An instance of the BgpIPv6EvpnVpws class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpipv6evpnvpws_7e7a3dec141df7b1c974f723df7f4814 import BgpIPv6EvpnVpws
return BgpIPv6EvpnVpws(self)
@property
def BgpIpv6AdL2Vpn(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpipv6adl2vpn_dfa30e45f6798c9ecc0ef8b85351cb5d.BgpIpv6AdL2Vpn): An instance of the BgpIpv6AdL2Vpn class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpipv6adl2vpn_dfa30e45f6798c9ecc0ef8b85351cb5d import BgpIpv6AdL2Vpn
return BgpIpv6AdL2Vpn(self)
@property
def BgpIpv6L2Site(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpipv6l2site_91dde52dc0cc2c12360c0d436c8db2fe.BgpIpv6L2Site): An instance of the BgpIpv6L2Site class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpipv6l2site_91dde52dc0cc2c12360c0d436c8db2fe import BgpIpv6L2Site
return BgpIpv6L2Site(self)
@property
def BgpIpv6MVrf(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpipv6mvrf_226a44af23e6291841522d3353c88b21.BgpIpv6MVrf): An instance of the BgpIpv6MVrf class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpipv6mvrf_226a44af23e6291841522d3353c88b21 import BgpIpv6MVrf
return BgpIpv6MVrf(self)
@property
def BgpLsAsPathSegmentList(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.bgplsaspathsegmentlist_fed4f671dbff6ccda8e8824fbe375856.BgpLsAsPathSegmentList): An instance of the BgpLsAsPathSegmentList class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.bgplsaspathsegmentlist_fed4f671dbff6ccda8e8824fbe375856 import BgpLsAsPathSegmentList
return BgpLsAsPathSegmentList(self)
@property
def BgpLsClusterIdList(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.bgplsclusteridlist_7b4bcec76ea98c69afbc1dcb2556f669.BgpLsClusterIdList): An instance of the BgpLsClusterIdList class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.bgplsclusteridlist_7b4bcec76ea98c69afbc1dcb2556f669 import BgpLsClusterIdList
return BgpLsClusterIdList(self)
@property
def BgpLsCommunitiesList(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.bgplscommunitieslist_fdb216f1d4195f82ad738e19cb2b5d32.BgpLsCommunitiesList): An instance of the BgpLsCommunitiesList class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.bgplscommunitieslist_fdb216f1d4195f82ad738e19cb2b5d32 import BgpLsCommunitiesList
return BgpLsCommunitiesList(self)
@property
def BgpLsExtendedCommunitiesList(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.bgplsextendedcommunitieslist_835ffabe7ce10fa0b2a04b0ca4ed54d9.BgpLsExtendedCommunitiesList): An instance of the BgpLsExtendedCommunitiesList class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.bgplsextendedcommunitieslist_835ffabe7ce10fa0b2a04b0ca4ed54d9 import BgpLsExtendedCommunitiesList
return BgpLsExtendedCommunitiesList(self)
@property
def BgpSRGBRangeSubObjectsList(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpsrgbrangesubobjectslist_6e28159e439bbeffe19ca2de4c7f7879.BgpSRGBRangeSubObjectsList): An instance of the BgpSRGBRangeSubObjectsList class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpsrgbrangesubobjectslist_6e28159e439bbeffe19ca2de4c7f7879 import BgpSRGBRangeSubObjectsList
return BgpSRGBRangeSubObjectsList(self)
@property
def BgpSRTEPoliciesListV6(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpsrtepolicieslistv6_4c4a356e5a00d2ddfa49e9cef396bffd.BgpSRTEPoliciesListV6): An instance of the BgpSRTEPoliciesListV6 class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpsrtepolicieslistv6_4c4a356e5a00d2ddfa49e9cef396bffd import BgpSRTEPoliciesListV6
return BgpSRTEPoliciesListV6(self)._select()
@property
def BgpV6Vrf(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpv6vrf_1d6029d380b737c5ce1f12d2ed82f3ed.BgpV6Vrf): An instance of the BgpV6Vrf class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpv6vrf_1d6029d380b737c5ce1f12d2ed82f3ed import BgpV6Vrf
return BgpV6Vrf(self)
@property
def Connector(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.connector_d0d942810e4010add7642d3914a1f29b.Connector): An instance of the Connector class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.connector_d0d942810e4010add7642d3914a1f29b import Connector
return Connector(self)
@property
def FlexAlgoColorMappingTemplate(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.flexalgocolormappingtemplate_8e0816b88fc7b32d81aaa2e2335895f1.FlexAlgoColorMappingTemplate): An instance of the FlexAlgoColorMappingTemplate class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.flexalgocolormappingtemplate_8e0816b88fc7b32d81aaa2e2335895f1 import FlexAlgoColorMappingTemplate
return FlexAlgoColorMappingTemplate(self)._select()
@property
def LearnedInfo(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.learnedinfo.learnedinfo_ff4d5e5643a63bccb40b6cf64fc58100.LearnedInfo): An instance of the LearnedInfo class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.learnedinfo.learnedinfo_ff4d5e5643a63bccb40b6cf64fc58100 import LearnedInfo
return LearnedInfo(self)
@property
def TlvProfile(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.tlvprofile.tlvprofile_69db000d3ef3b060f5edc387b878736c.TlvProfile): An instance of the TlvProfile class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.tlvprofile.tlvprofile_69db000d3ef3b060f5edc387b878736c import TlvProfile
return TlvProfile(self)
@property
def ActAsRestarted(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Act as restarted
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ActAsRestarted']))
@property
def Active(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Activate/Deactivate Configuration
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Active']))
@property
def AdvSrv6SidInIgp(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Advertise SRv6 SID in IGP
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AdvSrv6SidInIgp']))
@property
def AdvertiseEndOfRib(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Advertise End-Of-RIB
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AdvertiseEndOfRib']))
@property
def AdvertiseEvpnRoutesForOtherVtep(self):
"""
Returns
-------
- bool: Advertise EVPN routes for other VTEPS
"""
return self._get_attribute(self._SDM_ATT_MAP['AdvertiseEvpnRoutesForOtherVtep'])
@AdvertiseEvpnRoutesForOtherVtep.setter
def AdvertiseEvpnRoutesForOtherVtep(self, value):
self._set_attribute(self._SDM_ATT_MAP['AdvertiseEvpnRoutesForOtherVtep'], value)
@property
def AdvertiseSRv6SID(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Advertise SRv6 SID
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AdvertiseSRv6SID']))
@property
def AdvertiseTunnelEncapsulationExtendedCommunity(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Advertise Tunnel Encapsulation Extended Community
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AdvertiseTunnelEncapsulationExtendedCommunity']))
@property
def AlwaysIncludeTunnelEncExtCommunity(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Always Include Tunnel Encapsulation Extended Community
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AlwaysIncludeTunnelEncExtCommunity']))
@property
def AsSetMode(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): AS# Set Mode
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AsSetMode']))
@property
def Authentication(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Authentication Type
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Authentication']))
@property
def AutoGenSegmentLeftValue(self):
"""
Returns
-------
- bool: If enabled then Segment Left field value will be auto generated
"""
return self._get_attribute(self._SDM_ATT_MAP['AutoGenSegmentLeftValue'])
@AutoGenSegmentLeftValue.setter
def AutoGenSegmentLeftValue(self, value):
self._set_attribute(self._SDM_ATT_MAP['AutoGenSegmentLeftValue'], value)
@property
def BgpFsmState(self):
"""
Returns
-------
- list(str[active | connect | error | established | idle | none | openConfirm | openSent]): Logs additional information about the BGP Peer State
"""
return self._get_attribute(self._SDM_ATT_MAP['BgpFsmState'])
@property
def BgpId(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): BGP ID
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BgpId']))
@property
def BgpLsAsSetMode(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): AS# Set Mode
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BgpLsAsSetMode']))
@property
def BgpLsEnableAsPathSegments(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enable AS Path Segments
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BgpLsEnableAsPathSegments']))
@property
def BgpLsEnableCluster(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enable Cluster
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BgpLsEnableCluster']))
@property
def BgpLsEnableExtendedCommunity(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enable Extended Community
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BgpLsEnableExtendedCommunity']))
@property
def BgpLsNoOfASPathSegments(self):
"""
Returns
-------
- number: Number Of AS Path Segments Per Route Range
"""
return self._get_attribute(self._SDM_ATT_MAP['BgpLsNoOfASPathSegments'])
@BgpLsNoOfASPathSegments.setter
def BgpLsNoOfASPathSegments(self, value):
self._set_attribute(self._SDM_ATT_MAP['BgpLsNoOfASPathSegments'], value)
@property
def BgpLsNoOfClusters(self):
"""
Returns
-------
- number: Number of Clusters
"""
return self._get_attribute(self._SDM_ATT_MAP['BgpLsNoOfClusters'])
@BgpLsNoOfClusters.setter
def BgpLsNoOfClusters(self, value):
self._set_attribute(self._SDM_ATT_MAP['BgpLsNoOfClusters'], value)
@property
def BgpLsNoOfCommunities(self):
"""
Returns
-------
- number: Number of Communities
"""
return self._get_attribute(self._SDM_ATT_MAP['BgpLsNoOfCommunities'])
@BgpLsNoOfCommunities.setter
def BgpLsNoOfCommunities(self, value):
self._set_attribute(self._SDM_ATT_MAP['BgpLsNoOfCommunities'], value)
@property
def BgpLsOverridePeerAsSetMode(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Override Peer AS# Set Mode
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BgpLsOverridePeerAsSetMode']))
@property
def BgpUnnumbered(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): If enabled, BGP local IP will be Link-local IP.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BgpUnnumbered']))
@property
def CapabilityIpV4Mdt(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): IPv4 BGP MDT: AFI = 1, SAFI = 66
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CapabilityIpV4Mdt']))
@property
def CapabilityIpV4Mpls(self):
"""DEPRECATED
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): IPv4 MPLS
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CapabilityIpV4Mpls']))
@property
def CapabilityIpV4MplsVpn(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): IPv4 MPLS VPN Capability: AFI=1,SAFI=128
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CapabilityIpV4MplsVpn']))
@property
def CapabilityIpV4Multicast(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): IPv4 Multicast Capability: AFI=1,SAFI=2
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CapabilityIpV4Multicast']))
@property
def CapabilityIpV4MulticastVpn(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): IP MCAST-VPN: AFI = 1, SAFI = 5
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CapabilityIpV4MulticastVpn']))
@property
def CapabilityIpV4Unicast(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): IPv4 Unicast Capability: AFI=1,SAFI=1
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CapabilityIpV4Unicast']))
@property
def CapabilityIpV6Mpls(self):
"""DEPRECATED
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): IPv6 MPLS
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CapabilityIpV6Mpls']))
@property
def CapabilityIpV6MplsVpn(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): IPv6 MPLS VPN Capability: AFI=2,SAFI=128
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CapabilityIpV6MplsVpn']))
@property
def CapabilityIpV6Multicast(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): IPv6 Multicast Capability: AFI=2,SAFI=2
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CapabilityIpV6Multicast']))
@property
def CapabilityIpV6MulticastVpn(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): IP6 MCAST-VPN: AFI = 2, SAFI = 5
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CapabilityIpV6MulticastVpn']))
@property
def CapabilityIpV6Unicast(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): IPv6 Unicast Capability: AFI=2,SAFI=1
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CapabilityIpV6Unicast']))
@property
def CapabilityIpv4MplsAddPath(self):
"""
Returns
-------
- bool: IPv4 MPLS Add Path Capability
"""
return self._get_attribute(self._SDM_ATT_MAP['CapabilityIpv4MplsAddPath'])
@CapabilityIpv4MplsAddPath.setter
def CapabilityIpv4MplsAddPath(self, value):
self._set_attribute(self._SDM_ATT_MAP['CapabilityIpv4MplsAddPath'], value)
@property
def CapabilityIpv4UnicastAddPath(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Check box for IPv4 Unicast Add Path
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CapabilityIpv4UnicastAddPath']))
@property
def CapabilityIpv6MplsAddPath(self):
"""
Returns
-------
- bool: IPv6 MPLS Add Path Capability
"""
return self._get_attribute(self._SDM_ATT_MAP['CapabilityIpv6MplsAddPath'])
@CapabilityIpv6MplsAddPath.setter
def CapabilityIpv6MplsAddPath(self, value):
self._set_attribute(self._SDM_ATT_MAP['CapabilityIpv6MplsAddPath'], value)
@property
def CapabilityIpv6UnicastAddPath(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Check box for IPv6 Unicast Add Path
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CapabilityIpv6UnicastAddPath']))
@property
def CapabilityLinkStateNonVpn(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Link State Non-VPN Capability: AFI=16388,SAFI=71
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CapabilityLinkStateNonVpn']))
@property
def CapabilityLinkStateVpn(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Select this check box to enable Link State VPN capability on the router.AFI=16388 and SAFI=72 values will be supported.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CapabilityLinkStateVpn']))
@property
def CapabilityNHEncodingCapabilities(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Extended Next Hop Encoding Capability which needs to be used when advertising IPv4 or VPN-IPv4 routes over IPv6 Core
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CapabilityNHEncodingCapabilities']))
@property
def CapabilityRouteConstraint(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Route Constraint Capability: AFI=1,SAFI=132
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CapabilityRouteConstraint']))
@property
def CapabilityRouteRefresh(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Route Refresh
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CapabilityRouteRefresh']))
@property
def CapabilitySRTEPoliciesV4(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): IPv4 SR TE Policy Capability: AFI=1,SAFI=73
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CapabilitySRTEPoliciesV4']))
@property
def CapabilitySRTEPoliciesV6(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): IPv6 SR TE Policy Capability: AFI=2,SAFI=73
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CapabilitySRTEPoliciesV6']))
@property
def CapabilityVpls(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): VPLS Capability: AFI = 25, SAFI = 65
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CapabilityVpls']))
@property
def Capabilityipv4UnicastFlowSpec(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): IPv4 Unicast Flow Spec Capability: AFI=1,SAFI=133
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Capabilityipv4UnicastFlowSpec']))
@property
def Capabilityipv6UnicastFlowSpec(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): IPv6 Unicast Flow Spec Capability: AFI=2,SAFI=133
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Capabilityipv6UnicastFlowSpec']))
@property
def ConfigureKeepaliveTimer(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Configure Keepalive Timer
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ConfigureKeepaliveTimer']))
@property
def ConnectedVia(self):
"""DEPRECATED
Returns
-------
- list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*]): List of layers this layer is used to connect with to the wire.
"""
return self._get_attribute(self._SDM_ATT_MAP['ConnectedVia'])
@ConnectedVia.setter
def ConnectedVia(self, value):
self._set_attribute(self._SDM_ATT_MAP['ConnectedVia'], value)
@property
def CopyTtl(self):
"""
Returns
-------
- bool: Copy TTL from customer packet to outer IPv6 header
"""
return self._get_attribute(self._SDM_ATT_MAP['CopyTtl'])
@CopyTtl.setter
def CopyTtl(self, value):
self._set_attribute(self._SDM_ATT_MAP['CopyTtl'], value)
@property
def Count(self):
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def CustomSidType(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): moved to port data in bgp/srv6 Custom SID Type
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CustomSidType']))
@property
def DescriptiveName(self):
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def DiscardIxiaGeneratedRoutes(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Discard Ixia Generated Routes
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DiscardIxiaGeneratedRoutes']))
@property
def DiscoveredDutIp(self):
"""
Returns
-------
- list(str): The discovered DUT IP addresses.
"""
return self._get_attribute(self._SDM_ATT_MAP['DiscoveredDutIp'])
@property
def DowntimeInSec(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Downtime in Seconds
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DowntimeInSec']))
@property
def DutIp(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): DUT IP
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DutIp']))
@property
def EnSRv6DataPlane(self):
"""
Returns
-------
- bool: Ingress Peer Supports SRv6 VPN
"""
return self._get_attribute(self._SDM_ATT_MAP['EnSRv6DataPlane'])
@EnSRv6DataPlane.setter
def EnSRv6DataPlane(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnSRv6DataPlane'], value)
@property
def Enable4ByteAs(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enable 4-Byte AS
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Enable4ByteAs']))
@property
def EnableBfdRegistration(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enable BFD Registration
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableBfdRegistration']))
@property
def EnableBgpId(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enable BGP ID
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableBgpId']))
@property
def EnableBgpIdSameAsRouterId(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): BGP ID Same as Router ID
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableBgpIdSameAsRouterId']))
@property
def EnableBgpLsCommunity(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enable Community
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableBgpLsCommunity']))
@property
def EnableEpeTraffic(self):
"""
Returns
-------
- bool: Enable EPE Traffic
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableEpeTraffic'])
@EnableEpeTraffic.setter
def EnableEpeTraffic(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnableEpeTraffic'], value)
@property
def EnableGracefulRestart(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enable Graceful Restart
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableGracefulRestart']))
@property
def EnableLlgr(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enable LLGR
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableLlgr']))
@property
def EnableReducedEncapsulation(self):
"""
Returns
-------
- bool: Enable Reduced Encapsulation in Data-Plane for SRv6
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableReducedEncapsulation'])
@EnableReducedEncapsulation.setter
def EnableReducedEncapsulation(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnableReducedEncapsulation'], value)
@property
def Errors(self):
"""
Returns
-------
- list(dict(arg1:str[None | /api/v1/sessions/1/ixnetwork//.../*],arg2:list[str])): A list of errors that have occurred
"""
return self._get_attribute(self._SDM_ATT_MAP['Errors'])
@property
def EthernetSegmentsCountV6(self):
"""
Returns
-------
- number: Number of Ethernet Segments
"""
return self._get_attribute(self._SDM_ATT_MAP['EthernetSegmentsCountV6'])
@EthernetSegmentsCountV6.setter
def EthernetSegmentsCountV6(self, value):
self._set_attribute(self._SDM_ATT_MAP['EthernetSegmentsCountV6'], value)
@property
def Evpn(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): EVPN Capability: AFI = 25, SAFI = 70
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Evpn']))
@property
def FilterEvpn(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Check box for EVPN filter
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FilterEvpn']))
@property
def FilterIpV4Mpls(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Filter IPv4 MPLS
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FilterIpV4Mpls']))
@property
def FilterIpV4MplsVpn(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Filter IPv4 MPLS VPN
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FilterIpV4MplsVpn']))
@property
def FilterIpV4Multicast(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Filter IPv4 Multicast
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FilterIpV4Multicast']))
@property
def FilterIpV4MulticastVpn(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Filter IPv4 Multicast VPN
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FilterIpV4MulticastVpn']))
@property
def FilterIpV4Unicast(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Filter IPv4 Unicast
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FilterIpV4Unicast']))
@property
def FilterIpV6Mpls(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Filter IPv6 MPLS
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FilterIpV6Mpls']))
@property
def FilterIpV6MplsVpn(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Filter IPv6 MPLS VPN
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FilterIpV6MplsVpn']))
@property
def FilterIpV6Multicast(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Filter IPv6 Multicast
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FilterIpV6Multicast']))
@property
def FilterIpV6MulticastVpn(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Filter IPv6 Multicast VPN
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FilterIpV6MulticastVpn']))
@property
def FilterIpV6Unicast(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Filter IPv6 Unicast
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FilterIpV6Unicast']))
@property
def FilterIpv4MulticastBgpMplsVpn(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Check box for IPv4 Multicast BGP/MPLS VPN filter
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FilterIpv4MulticastBgpMplsVpn']))
@property
def FilterIpv4UnicastFlowSpec(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Filter IPv4 Unicast Flow Spec
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FilterIpv4UnicastFlowSpec']))
@property
def FilterIpv6MulticastBgpMplsVpn(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Check box for IPv6 Multicast BGP/MPLS VPN filter
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FilterIpv6MulticastBgpMplsVpn']))
@property
def FilterIpv6UnicastFlowSpec(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Filter IPv6 Unicast Flow Spec
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FilterIpv6UnicastFlowSpec']))
@property
def FilterLinkState(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Filter Link State
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FilterLinkState']))
@property
def FilterLinkStateVpn(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Select this check box to store incoming BGP LS VPN route info.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FilterLinkStateVpn']))
@property
def FilterSRTEPoliciesV4(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enable IPv4 SR TE Policy Filter
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FilterSRTEPoliciesV4']))
@property
def FilterSRTEPoliciesV6(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enable IPv6 SR TE Policy Filter
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FilterSRTEPoliciesV6']))
@property
def FilterVpls(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Filter VPLS
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FilterVpls']))
@property
def Flap(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Flap
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Flap']))
@property
def HoldTimer(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Hold Timer
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HoldTimer']))
@property
def IpVrfToIpVrfType(self):
"""
Returns
-------
- str(interfaceLess | interfacefullWithCorefacingIRB | interfacefullWithUnnumberedCorefacingIRB): IP-VRF-to-IP-VRF Model Type
"""
return self._get_attribute(self._SDM_ATT_MAP['IpVrfToIpVrfType'])
@IpVrfToIpVrfType.setter
def IpVrfToIpVrfType(self, value):
self._set_attribute(self._SDM_ATT_MAP['IpVrfToIpVrfType'], value)
@property
def Ipv4MplsAddPathMode(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): IPv4 MPLS Add Path Mode
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv4MplsAddPathMode']))
@property
def Ipv4MplsCapability(self):
"""
Returns
-------
- bool: IPv4 MPLS Capability: AFI=1, SAFI=4
"""
return self._get_attribute(self._SDM_ATT_MAP['Ipv4MplsCapability'])
@Ipv4MplsCapability.setter
def Ipv4MplsCapability(self, value):
self._set_attribute(self._SDM_ATT_MAP['Ipv4MplsCapability'], value)
@property
def Ipv4MulticastBgpMplsVpn(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): IP Multicast for BGP/MPLS IP VPN (UMH): AFI = 1, SAFI = 129
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv4MulticastBgpMplsVpn']))
@property
def Ipv4MultipleMplsLabelsCapability(self):
"""
Returns
-------
- bool: IPv4 Multiple MPLS Labels Capability: AFI=1, SAFI=4
"""
return self._get_attribute(self._SDM_ATT_MAP['Ipv4MultipleMplsLabelsCapability'])
@Ipv4MultipleMplsLabelsCapability.setter
def Ipv4MultipleMplsLabelsCapability(self, value):
self._set_attribute(self._SDM_ATT_MAP['Ipv4MultipleMplsLabelsCapability'], value)
@property
def Ipv4UnicastAddPathMode(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): IPv4 Unicast Add Path Mode
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv4UnicastAddPathMode']))
@property
def Ipv6MplsAddPathMode(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): IPv6 MPLS Add Path Mode
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv6MplsAddPathMode']))
@property
def Ipv6MplsCapability(self):
"""
Returns
-------
- bool: IPv6 MPLS Capability: AFI=2, SAFI=4
"""
return self._get_attribute(self._SDM_ATT_MAP['Ipv6MplsCapability'])
@Ipv6MplsCapability.setter
def Ipv6MplsCapability(self, value):
self._set_attribute(self._SDM_ATT_MAP['Ipv6MplsCapability'], value)
@property
def Ipv6MulticastBgpMplsVpn(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): IP6 Multicast for BGP/MPLS IP VPN (UMH): AFI = 2, SAFI = 129
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv6MulticastBgpMplsVpn']))
@property
def Ipv6MultipleMplsLabelsCapability(self):
"""
Returns
-------
- bool: IPv6 Multiple MPLS Labels Capability: AFI=2, SAFI=4
"""
return self._get_attribute(self._SDM_ATT_MAP['Ipv6MultipleMplsLabelsCapability'])
@Ipv6MultipleMplsLabelsCapability.setter
def Ipv6MultipleMplsLabelsCapability(self, value):
self._set_attribute(self._SDM_ATT_MAP['Ipv6MultipleMplsLabelsCapability'], value)
@property
def Ipv6UnicastAddPathMode(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): IPv6 Unicast Add Path Mode
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv6UnicastAddPathMode']))
@property
def IrbInterfaceLabel(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Label to be used for Route Type 2 carrying IRB MAC and/or IRB IP in Route Type 2
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IrbInterfaceLabel']))
@property
def IrbIpv6Address(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): IRB IPv6 Address
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IrbIpv6Address']))
@property
def KeepaliveTimer(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Keepalive Timer
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['KeepaliveTimer']))
@property
def L3VPNEncapsulationType(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): L3VPN Traffic Encapsulation
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['L3VPNEncapsulationType']))
@property
def LocalAs2Bytes(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Local AS# (2-Bytes)
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LocalAs2Bytes']))
@property
def LocalAs4Bytes(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Local AS# (4-Bytes)
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LocalAs4Bytes']))
@property
def LocalIpv6Ver2(self):
"""
Returns
-------
- list(str): Local IP
"""
return self._get_attribute(self._SDM_ATT_MAP['LocalIpv6Ver2'])
@property
def LocalRouterID(self):
"""
Returns
-------
- list(str): Router ID
"""
return self._get_attribute(self._SDM_ATT_MAP['LocalRouterID'])
@property
def MaxSidPerSrh(self):
"""
Returns
-------
- number: Max number of SIDs a SRH can have
"""
return self._get_attribute(self._SDM_ATT_MAP['MaxSidPerSrh'])
@MaxSidPerSrh.setter
def MaxSidPerSrh(self, value):
self._set_attribute(self._SDM_ATT_MAP['MaxSidPerSrh'], value)
@property
def Md5Key(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): MD5 Key
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Md5Key']))
@property
def ModeOfBfdOperations(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Mode of BFD Operations
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ModeOfBfdOperations']))
@property
def MplsLabelsCountForIpv4MplsRoute(self):
"""
Returns
-------
- number: MPLS Labels Count For IPv4 MPLS Route
"""
return self._get_attribute(self._SDM_ATT_MAP['MplsLabelsCountForIpv4MplsRoute'])
@MplsLabelsCountForIpv4MplsRoute.setter
def MplsLabelsCountForIpv4MplsRoute(self, value):
self._set_attribute(self._SDM_ATT_MAP['MplsLabelsCountForIpv4MplsRoute'], value)
@property
def MplsLabelsCountForIpv6MplsRoute(self):
"""
Returns
-------
- number: MPLS Labels Count For IPv6 MPLS Route
"""
return self._get_attribute(self._SDM_ATT_MAP['MplsLabelsCountForIpv6MplsRoute'])
@MplsLabelsCountForIpv6MplsRoute.setter
def MplsLabelsCountForIpv6MplsRoute(self, value):
self._set_attribute(self._SDM_ATT_MAP['MplsLabelsCountForIpv6MplsRoute'], value)
@property
def Multiplier(self):
"""
Returns
-------
- number: Number of layer instances per parent instance (multiplier)
"""
return self._get_attribute(self._SDM_ATT_MAP['Multiplier'])
@Multiplier.setter
def Multiplier(self, value):
self._set_attribute(self._SDM_ATT_MAP['Multiplier'], value)
@property
def Name(self):
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def NoOfEpePeers(self):
"""
Returns
-------
- number: Number of EPE Peers
"""
return self._get_attribute(self._SDM_ATT_MAP['NoOfEpePeers'])
@NoOfEpePeers.setter
def NoOfEpePeers(self, value):
self._set_attribute(self._SDM_ATT_MAP['NoOfEpePeers'], value)
@property
def NoOfExtendedCommunities(self):
"""
Returns
-------
- number: Number of Extended Communities
"""
return self._get_attribute(self._SDM_ATT_MAP['NoOfExtendedCommunities'])
@NoOfExtendedCommunities.setter
def NoOfExtendedCommunities(self, value):
self._set_attribute(self._SDM_ATT_MAP['NoOfExtendedCommunities'], value)
@property
def NoOfUserDefinedAfiSafi(self):
"""
Returns
-------
- number: Count of User defined AFI SAFI
"""
return self._get_attribute(self._SDM_ATT_MAP['NoOfUserDefinedAfiSafi'])
@NoOfUserDefinedAfiSafi.setter
def NoOfUserDefinedAfiSafi(self, value):
self._set_attribute(self._SDM_ATT_MAP['NoOfUserDefinedAfiSafi'], value)
@property
def NumBgpLsId(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): BGP LS Instance ID
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NumBgpLsId']))
@property
def NumBgpLsInstanceIdentifier(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): IGP Multi instance unique identifier. 0 is default single-instance IGP. (e.g. for OSPFv3 it is possible to separately run 4 instances of OSPFv3 with peer, one advertising v4 only, another v6 only and other 2 mcast v4 and v6 respectively) .
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NumBgpLsInstanceIdentifier']))
@property
def NumBgpUpdatesGeneratedPerIteration(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Num BGP Updates Generated Per Iteration
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NumBgpUpdatesGeneratedPerIteration']))
@property
def NumberColorFlexAlgoMapping(self):
"""
Returns
-------
- number: Number of Color/Flex Algo Mapping Entries
"""
return self._get_attribute(self._SDM_ATT_MAP['NumberColorFlexAlgoMapping'])
@NumberColorFlexAlgoMapping.setter
def NumberColorFlexAlgoMapping(self, value):
self._set_attribute(self._SDM_ATT_MAP['NumberColorFlexAlgoMapping'], value)
@property
def NumberFlowSpecRangeV4(self):
"""
Returns
-------
- number: Number of IPv4 Flow Spec Ranges
"""
return self._get_attribute(self._SDM_ATT_MAP['NumberFlowSpecRangeV4'])
@NumberFlowSpecRangeV4.setter
def NumberFlowSpecRangeV4(self, value):
self._set_attribute(self._SDM_ATT_MAP['NumberFlowSpecRangeV4'], value)
@property
def NumberFlowSpecRangeV6(self):
"""
Returns
-------
- number: Number of IPv6 Flow Spec Ranges
"""
return self._get_attribute(self._SDM_ATT_MAP['NumberFlowSpecRangeV6'])
@NumberFlowSpecRangeV6.setter
def NumberFlowSpecRangeV6(self, value):
self._set_attribute(self._SDM_ATT_MAP['NumberFlowSpecRangeV6'], value)
@property
def NumberSRTEPolicies(self):
"""
Returns
-------
- number: Count of SR TE Policies
"""
return self._get_attribute(self._SDM_ATT_MAP['NumberSRTEPolicies'])
@NumberSRTEPolicies.setter
def NumberSRTEPolicies(self, value):
self._set_attribute(self._SDM_ATT_MAP['NumberSRTEPolicies'], value)
@property
def OperationalModel(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Operational Model
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['OperationalModel']))
@property
def RestartTime(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Restart Time
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RestartTime']))
@property
def RoutersMacOrIrbMacAddress(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Router's MAC/IRB MAC Address
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RoutersMacOrIrbMacAddress']))
@property
def SRGBRangeCount(self):
"""
Returns
-------
- number: SRGB Range Count
"""
return self._get_attribute(self._SDM_ATT_MAP['SRGBRangeCount'])
@SRGBRangeCount.setter
def SRGBRangeCount(self, value):
self._set_attribute(self._SDM_ATT_MAP['SRGBRangeCount'], value)
@property
def SegmentLeftValue(self):
"""
Returns
-------
- number: Segment Left value to be used in top SRH. This zero index based value start from egress node.
"""
return self._get_attribute(self._SDM_ATT_MAP['SegmentLeftValue'])
@SegmentLeftValue.setter
def SegmentLeftValue(self, value):
self._set_attribute(self._SDM_ATT_MAP['SegmentLeftValue'], value)
@property
def SendIxiaSignatureWithRoutes(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Send Ixia Signature With Routes
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SendIxiaSignatureWithRoutes']))
@property
def SendSRv6SIDOptionalInfo(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): If we need to advertise SRv6 SID Optional Information (Service Information sub-TLV) which is specified in next column(s)
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SendSRv6SIDOptionalInfo']))
@property
def SessionInfo(self):
"""
Returns
-------
- list(str[aSRoutingLoopErrorRx | attributeFlagErrorRx | attributesLengthErrorRx | authenticationFailureErrorRx | badBGPIdentifierErrorRx | badMessageLengthErrorRx | badMessageTypeErrorRx | badPeerASErrorRx | bGPHeaderErrorRx | bGPHeaderErrorTx | bGPHoldTimerExpiredErrorRx | bGPOpenPacketErrorRx | bGPStateMachineErrorRx | bGPUpdatePacketErrorRx | ceaseErrorRx | ceaseNotificationErrorTx | connectionNotsynchronizedErrorRx | holdtimeExpiredErrorTx | invalidASPathErrorRx | invalidNetworkFieldErrorRx | invalidNextHopAttributeErrorRx | invalidOriginAttributeErrorRx | malformedAttributeListErrorRx | missingWellKnownAttributeErrorRx | none | openPacketErrTx | optionalAttributeErrorRx | stateMachineErrorTx | unacceptableHoldTimeErrorRx | unrecognizedWellKnownAttributeErrorRx | unspecifiedErrorRx | unspecifiedErrorTx | unspecifiedSubcodeErrorRx | unsupportedOptionalParameterErrorRx | unsupportedversionNumberErrorRx | updatePacketErrorTx]): Logs additional information about the session state
"""
return self._get_attribute(self._SDM_ATT_MAP['SessionInfo'])
@property
def SessionStatus(self):
"""
Returns
-------
- list(str[down | notStarted | up]): Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
"""
return self._get_attribute(self._SDM_ATT_MAP['SessionStatus'])
@property
def Srv6EndpointBehavior(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): SRv6 Endpoint Behavior field Value for all routes in this Route Range
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Srv6EndpointBehavior']))
@property
def Srv6SIDOptionalInformation(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): SRv6 SID Optional Information field Value (Service Information sub-TLV) for all routes in this Route Range
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Srv6SIDOptionalInformation']))
@property
def Srv6SidFlags(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): SRv6 SID Flags Value
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Srv6SidFlags']))
@property
def Srv6SidLoc(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): SRv6 SID. It consists of Locator, Func and Args
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Srv6SidLoc']))
@property
def Srv6SidLocLen(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): SRv6 SID Locator Length
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Srv6SidLocLen']))
@property
def Srv6SidLocMetric(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): SRv6 SID Locator Metric
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Srv6SidLocMetric']))
@property
def Srv6SidReserved(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): SRv6 SID Reserved Value (SRv6 SID Service TLV Level)
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Srv6SidReserved']))
@property
def Srv6SidReserved1(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): SRv6 SID Reserved1 Field for Service Information sub-TLV
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Srv6SidReserved1']))
@property
def Srv6SidReserved2(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): SRv6 SID Reserved2 Field for Service Information sub-TLV
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Srv6SidReserved2']))
@property
def Srv6Ttl(self):
"""
Returns
-------
- number: TTL value to be used in outer IPv6 header
"""
return self._get_attribute(self._SDM_ATT_MAP['Srv6Ttl'])
@Srv6Ttl.setter
def Srv6Ttl(self, value):
self._set_attribute(self._SDM_ATT_MAP['Srv6Ttl'], value)
@property
def StackedLayers(self):
"""
Returns
-------
- list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*]): List of secondary (many to one) child layer protocols
"""
return self._get_attribute(self._SDM_ATT_MAP['StackedLayers'])
@StackedLayers.setter
def StackedLayers(self, value):
self._set_attribute(self._SDM_ATT_MAP['StackedLayers'], value)
@property
def StaleTime(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Stale Time/ LLGR Stale Time
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['StaleTime']))
@property
def StateCounts(self):
"""
Returns
-------
- dict(total:number,notStarted:number,down:number,up:number): A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
"""
return self._get_attribute(self._SDM_ATT_MAP['StateCounts'])
@property
def Status(self):
"""
Returns
-------
- str(configured | error | mixed | notStarted | started | starting | stopping): Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
"""
return self._get_attribute(self._SDM_ATT_MAP['Status'])
@property
def TcpWindowSizeInBytes(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): TCP Window Size (in bytes)
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TcpWindowSizeInBytes']))
@property
def Ttl(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): TTL
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ttl']))
@property
def Type(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Type
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Type']))
@property
def UdpPortEndValue(self):
"""
Returns
-------
- number: UDP Port End Value
"""
return self._get_attribute(self._SDM_ATT_MAP['UdpPortEndValue'])
@UdpPortEndValue.setter
def UdpPortEndValue(self, value):
self._set_attribute(self._SDM_ATT_MAP['UdpPortEndValue'], value)
@property
def UdpPortStartValue(self):
"""
Returns
-------
- number: UDP Port Start Value
"""
return self._get_attribute(self._SDM_ATT_MAP['UdpPortStartValue'])
@UdpPortStartValue.setter
def UdpPortStartValue(self, value):
self._set_attribute(self._SDM_ATT_MAP['UdpPortStartValue'], value)
@property
def UpdateInterval(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Update Interval
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UpdateInterval']))
@property
def UptimeInSec(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Uptime in Seconds
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UptimeInSec']))
@property
def UseGatewayAsDutIp(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): If enabled, Gateway IP will be used as DUT IP.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UseGatewayAsDutIp']))
@property
def UseStaticPolicy(self):
"""
Returns
-------
- bool: If enabled then SRTE policy will be advertised
"""
return self._get_attribute(self._SDM_ATT_MAP['UseStaticPolicy'])
@UseStaticPolicy.setter
def UseStaticPolicy(self, value):
self._set_attribute(self._SDM_ATT_MAP['UseStaticPolicy'], value)
@property
def VplsEnableNextHop(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): VPLS Enable Next Hop
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['VplsEnableNextHop']))
@property
def VplsNextHop(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): VPLS Next Hop
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['VplsNextHop']))
def update(self, AdvertiseEvpnRoutesForOtherVtep=None, AutoGenSegmentLeftValue=None, BgpLsNoOfASPathSegments=None, BgpLsNoOfClusters=None, BgpLsNoOfCommunities=None, CapabilityIpv4MplsAddPath=None, CapabilityIpv6MplsAddPath=None, ConnectedVia=None, CopyTtl=None, EnSRv6DataPlane=None, EnableEpeTraffic=None, EnableReducedEncapsulation=None, EthernetSegmentsCountV6=None, IpVrfToIpVrfType=None, Ipv4MplsCapability=None, Ipv4MultipleMplsLabelsCapability=None, Ipv6MplsCapability=None, Ipv6MultipleMplsLabelsCapability=None, MaxSidPerSrh=None, MplsLabelsCountForIpv4MplsRoute=None, MplsLabelsCountForIpv6MplsRoute=None, Multiplier=None, Name=None, NoOfEpePeers=None, NoOfExtendedCommunities=None, NoOfUserDefinedAfiSafi=None, NumberColorFlexAlgoMapping=None, NumberFlowSpecRangeV4=None, NumberFlowSpecRangeV6=None, NumberSRTEPolicies=None, SRGBRangeCount=None, SegmentLeftValue=None, Srv6Ttl=None, StackedLayers=None, UdpPortEndValue=None, UdpPortStartValue=None, UseStaticPolicy=None):
"""Updates bgpIpv6Peer resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- AdvertiseEvpnRoutesForOtherVtep (bool): Advertise EVPN routes for other VTEPS
- AutoGenSegmentLeftValue (bool): If enabled then Segment Left field value will be auto generated
- BgpLsNoOfASPathSegments (number): Number Of AS Path Segments Per Route Range
- BgpLsNoOfClusters (number): Number of Clusters
- BgpLsNoOfCommunities (number): Number of Communities
- CapabilityIpv4MplsAddPath (bool): IPv4 MPLS Add Path Capability
- CapabilityIpv6MplsAddPath (bool): IPv6 MPLS Add Path Capability
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- CopyTtl (bool): Copy TTL from customer packet to outer IPv6 header
- EnSRv6DataPlane (bool): Ingress Peer Supports SRv6 VPN
- EnableEpeTraffic (bool): Enable EPE Traffic
- EnableReducedEncapsulation (bool): Enable Reduced Encapsulation in Data-Plane for SRv6
- EthernetSegmentsCountV6 (number): Number of Ethernet Segments
- IpVrfToIpVrfType (str(interfaceLess | interfacefullWithCorefacingIRB | interfacefullWithUnnumberedCorefacingIRB)): IP-VRF-to-IP-VRF Model Type
- Ipv4MplsCapability (bool): IPv4 MPLS Capability: AFI=1, SAFI=4
- Ipv4MultipleMplsLabelsCapability (bool): IPv4 Multiple MPLS Labels Capability: AFI=1, SAFI=4
- Ipv6MplsCapability (bool): IPv6 MPLS Capability: AFI=2, SAFI=4
- Ipv6MultipleMplsLabelsCapability (bool): IPv6 Multiple MPLS Labels Capability: AFI=2, SAFI=4
- MaxSidPerSrh (number): Max number of SIDs a SRH can have
- MplsLabelsCountForIpv4MplsRoute (number): MPLS Labels Count For IPv4 MPLS Route
- MplsLabelsCountForIpv6MplsRoute (number): MPLS Labels Count For IPv6 MPLS Route
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- NoOfEpePeers (number): Number of EPE Peers
- NoOfExtendedCommunities (number): Number of Extended Communities
- NoOfUserDefinedAfiSafi (number): Count of User defined AFI SAFI
- NumberColorFlexAlgoMapping (number): Number of Color/Flex Algo Mapping Entries
- NumberFlowSpecRangeV4 (number): Number of IPv4 Flow Spec Ranges
- NumberFlowSpecRangeV6 (number): Number of IPv6 Flow Spec Ranges
- NumberSRTEPolicies (number): Count of SR TE Policies
- SRGBRangeCount (number): SRGB Range Count
- SegmentLeftValue (number): Segment Left value to be used in top SRH. This zero index based value start from egress node.
- Srv6Ttl (number): TTL value to be used in outer IPv6 header
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
- UdpPortEndValue (number): UDP Port End Value
- UdpPortStartValue (number): UDP Port Start Value
- UseStaticPolicy (bool): If enabled then SRTE policy will be advertised
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, AdvertiseEvpnRoutesForOtherVtep=None, AutoGenSegmentLeftValue=None, BgpLsNoOfASPathSegments=None, BgpLsNoOfClusters=None, BgpLsNoOfCommunities=None, CapabilityIpv4MplsAddPath=None, CapabilityIpv6MplsAddPath=None, ConnectedVia=None, CopyTtl=None, EnSRv6DataPlane=None, EnableEpeTraffic=None, EnableReducedEncapsulation=None, EthernetSegmentsCountV6=None, IpVrfToIpVrfType=None, Ipv4MplsCapability=None, Ipv4MultipleMplsLabelsCapability=None, Ipv6MplsCapability=None, Ipv6MultipleMplsLabelsCapability=None, MaxSidPerSrh=None, MplsLabelsCountForIpv4MplsRoute=None, MplsLabelsCountForIpv6MplsRoute=None, Multiplier=None, Name=None, NoOfEpePeers=None, NoOfExtendedCommunities=None, NoOfUserDefinedAfiSafi=None, NumberColorFlexAlgoMapping=None, NumberFlowSpecRangeV4=None, NumberFlowSpecRangeV6=None, NumberSRTEPolicies=None, SRGBRangeCount=None, SegmentLeftValue=None, Srv6Ttl=None, StackedLayers=None, UdpPortEndValue=None, UdpPortStartValue=None, UseStaticPolicy=None):
"""Adds a new bgpIpv6Peer resource on the server and adds it to the container.
Args
----
- AdvertiseEvpnRoutesForOtherVtep (bool): Advertise EVPN routes for other VTEPS
- AutoGenSegmentLeftValue (bool): If enabled then Segment Left field value will be auto generated
- BgpLsNoOfASPathSegments (number): Number Of AS Path Segments Per Route Range
- BgpLsNoOfClusters (number): Number of Clusters
- BgpLsNoOfCommunities (number): Number of Communities
- CapabilityIpv4MplsAddPath (bool): IPv4 MPLS Add Path Capability
- CapabilityIpv6MplsAddPath (bool): IPv6 MPLS Add Path Capability
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- CopyTtl (bool): Copy TTL from customer packet to outer IPv6 header
- EnSRv6DataPlane (bool): Ingress Peer Supports SRv6 VPN
- EnableEpeTraffic (bool): Enable EPE Traffic
- EnableReducedEncapsulation (bool): Enable Reduced Encapsulation in Data-Plane for SRv6
- EthernetSegmentsCountV6 (number): Number of Ethernet Segments
- IpVrfToIpVrfType (str(interfaceLess | interfacefullWithCorefacingIRB | interfacefullWithUnnumberedCorefacingIRB)): IP-VRF-to-IP-VRF Model Type
- Ipv4MplsCapability (bool): IPv4 MPLS Capability: AFI=1, SAFI=4
- Ipv4MultipleMplsLabelsCapability (bool): IPv4 Multiple MPLS Labels Capability: AFI=1, SAFI=4
- Ipv6MplsCapability (bool): IPv6 MPLS Capability: AFI=2, SAFI=4
- Ipv6MultipleMplsLabelsCapability (bool): IPv6 Multiple MPLS Labels Capability: AFI=2, SAFI=4
- MaxSidPerSrh (number): Max number of SIDs a SRH can have
- MplsLabelsCountForIpv4MplsRoute (number): MPLS Labels Count For IPv4 MPLS Route
- MplsLabelsCountForIpv6MplsRoute (number): MPLS Labels Count For IPv6 MPLS Route
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- NoOfEpePeers (number): Number of EPE Peers
- NoOfExtendedCommunities (number): Number of Extended Communities
- NoOfUserDefinedAfiSafi (number): Count of User defined AFI SAFI
- NumberColorFlexAlgoMapping (number): Number of Color/Flex Algo Mapping Entries
- NumberFlowSpecRangeV4 (number): Number of IPv4 Flow Spec Ranges
- NumberFlowSpecRangeV6 (number): Number of IPv6 Flow Spec Ranges
- NumberSRTEPolicies (number): Count of SR TE Policies
- SRGBRangeCount (number): SRGB Range Count
- SegmentLeftValue (number): Segment Left value to be used in top SRH. This zero index based value start from egress node.
- Srv6Ttl (number): TTL value to be used in outer IPv6 header
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
- UdpPortEndValue (number): UDP Port End Value
- UdpPortStartValue (number): UDP Port Start Value
- UseStaticPolicy (bool): If enabled then SRTE policy will be advertised
Returns
-------
- self: This instance with all currently retrieved bgpIpv6Peer resources using find and the newly added bgpIpv6Peer resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained bgpIpv6Peer resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, AdvertiseEvpnRoutesForOtherVtep=None, AutoGenSegmentLeftValue=None, BgpFsmState=None, BgpLsNoOfASPathSegments=None, BgpLsNoOfClusters=None, BgpLsNoOfCommunities=None, CapabilityIpv4MplsAddPath=None, CapabilityIpv6MplsAddPath=None, ConnectedVia=None, CopyTtl=None, Count=None, DescriptiveName=None, DiscoveredDutIp=None, EnSRv6DataPlane=None, EnableEpeTraffic=None, EnableReducedEncapsulation=None, Errors=None, EthernetSegmentsCountV6=None, IpVrfToIpVrfType=None, Ipv4MplsCapability=None, Ipv4MultipleMplsLabelsCapability=None, Ipv6MplsCapability=None, Ipv6MultipleMplsLabelsCapability=None, LocalIpv6Ver2=None, LocalRouterID=None, MaxSidPerSrh=None, MplsLabelsCountForIpv4MplsRoute=None, MplsLabelsCountForIpv6MplsRoute=None, Multiplier=None, Name=None, NoOfEpePeers=None, NoOfExtendedCommunities=None, NoOfUserDefinedAfiSafi=None, NumberColorFlexAlgoMapping=None, NumberFlowSpecRangeV4=None, NumberFlowSpecRangeV6=None, NumberSRTEPolicies=None, SRGBRangeCount=None, SegmentLeftValue=None, SessionInfo=None, SessionStatus=None, Srv6Ttl=None, StackedLayers=None, StateCounts=None, Status=None, UdpPortEndValue=None, UdpPortStartValue=None, UseStaticPolicy=None):
"""Finds and retrieves bgpIpv6Peer resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve bgpIpv6Peer resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all bgpIpv6Peer resources from the server.
Args
----
- AdvertiseEvpnRoutesForOtherVtep (bool): Advertise EVPN routes for other VTEPS
- AutoGenSegmentLeftValue (bool): If enabled then Segment Left field value will be auto generated
- BgpFsmState (list(str[active | connect | error | established | idle | none | openConfirm | openSent])): Logs additional information about the BGP Peer State
- BgpLsNoOfASPathSegments (number): Number Of AS Path Segments Per Route Range
- BgpLsNoOfClusters (number): Number of Clusters
- BgpLsNoOfCommunities (number): Number of Communities
- CapabilityIpv4MplsAddPath (bool): IPv4 MPLS Add Path Capability
- CapabilityIpv6MplsAddPath (bool): IPv6 MPLS Add Path Capability
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- CopyTtl (bool): Copy TTL from customer packet to outer IPv6 header
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- DiscoveredDutIp (list(str)): The discovered DUT IP addresses.
- EnSRv6DataPlane (bool): Ingress Peer Supports SRv6 VPN
- EnableEpeTraffic (bool): Enable EPE Traffic
- EnableReducedEncapsulation (bool): Enable Reduced Encapsulation in Data-Plane for SRv6
- Errors (list(dict(arg1:str[None | /api/v1/sessions/1/ixnetwork//.../*],arg2:list[str]))): A list of errors that have occurred
- EthernetSegmentsCountV6 (number): Number of Ethernet Segments
- IpVrfToIpVrfType (str(interfaceLess | interfacefullWithCorefacingIRB | interfacefullWithUnnumberedCorefacingIRB)): IP-VRF-to-IP-VRF Model Type
- Ipv4MplsCapability (bool): IPv4 MPLS Capability: AFI=1, SAFI=4
- Ipv4MultipleMplsLabelsCapability (bool): IPv4 Multiple MPLS Labels Capability: AFI=1, SAFI=4
- Ipv6MplsCapability (bool): IPv6 MPLS Capability: AFI=2, SAFI=4
- Ipv6MultipleMplsLabelsCapability (bool): IPv6 Multiple MPLS Labels Capability: AFI=2, SAFI=4
- LocalIpv6Ver2 (list(str)): Local IP
- LocalRouterID (list(str)): Router ID
- MaxSidPerSrh (number): Max number of SIDs a SRH can have
- MplsLabelsCountForIpv4MplsRoute (number): MPLS Labels Count For IPv4 MPLS Route
- MplsLabelsCountForIpv6MplsRoute (number): MPLS Labels Count For IPv6 MPLS Route
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- NoOfEpePeers (number): Number of EPE Peers
- NoOfExtendedCommunities (number): Number of Extended Communities
- NoOfUserDefinedAfiSafi (number): Count of User defined AFI SAFI
- NumberColorFlexAlgoMapping (number): Number of Color/Flex Algo Mapping Entries
- NumberFlowSpecRangeV4 (number): Number of IPv4 Flow Spec Ranges
- NumberFlowSpecRangeV6 (number): Number of IPv6 Flow Spec Ranges
- NumberSRTEPolicies (number): Count of SR TE Policies
- SRGBRangeCount (number): SRGB Range Count
- SegmentLeftValue (number): Segment Left value to be used in top SRH. This zero index based value start from egress node.
- SessionInfo (list(str[aSRoutingLoopErrorRx | attributeFlagErrorRx | attributesLengthErrorRx | authenticationFailureErrorRx | badBGPIdentifierErrorRx | badMessageLengthErrorRx | badMessageTypeErrorRx | badPeerASErrorRx | bGPHeaderErrorRx | bGPHeaderErrorTx | bGPHoldTimerExpiredErrorRx | bGPOpenPacketErrorRx | bGPStateMachineErrorRx | bGPUpdatePacketErrorRx | ceaseErrorRx | ceaseNotificationErrorTx | connectionNotsynchronizedErrorRx | holdtimeExpiredErrorTx | invalidASPathErrorRx | invalidNetworkFieldErrorRx | invalidNextHopAttributeErrorRx | invalidOriginAttributeErrorRx | malformedAttributeListErrorRx | missingWellKnownAttributeErrorRx | none | openPacketErrTx | optionalAttributeErrorRx | stateMachineErrorTx | unacceptableHoldTimeErrorRx | unrecognizedWellKnownAttributeErrorRx | unspecifiedErrorRx | unspecifiedErrorTx | unspecifiedSubcodeErrorRx | unsupportedOptionalParameterErrorRx | unsupportedversionNumberErrorRx | updatePacketErrorTx])): Logs additional information about the session state
- SessionStatus (list(str[down | notStarted | up])): Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
- Srv6Ttl (number): TTL value to be used in outer IPv6 header
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
- StateCounts (dict(total:number,notStarted:number,down:number,up:number)): A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
- Status (str(configured | error | mixed | notStarted | started | starting | stopping)): Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
- UdpPortEndValue (number): UDP Port End Value
- UdpPortStartValue (number): UDP Port Start Value
- UseStaticPolicy (bool): If enabled then SRTE policy will be advertised
Returns
-------
- self: This instance with matching bgpIpv6Peer resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of bgpIpv6Peer data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the bgpIpv6Peer resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def get_device_ids(self, PortNames=None, ActAsRestarted=None, Active=None, AdvSrv6SidInIgp=None, AdvertiseEndOfRib=None, AdvertiseSRv6SID=None, AdvertiseTunnelEncapsulationExtendedCommunity=None, AlwaysIncludeTunnelEncExtCommunity=None, AsSetMode=None, Authentication=None, BgpId=None, BgpLsAsSetMode=None, BgpLsEnableAsPathSegments=None, BgpLsEnableCluster=None, BgpLsEnableExtendedCommunity=None, BgpLsOverridePeerAsSetMode=None, BgpUnnumbered=None, CapabilityIpV4Mdt=None, CapabilityIpV4Mpls=None, CapabilityIpV4MplsVpn=None, CapabilityIpV4Multicast=None, CapabilityIpV4MulticastVpn=None, CapabilityIpV4Unicast=None, CapabilityIpV6Mpls=None, CapabilityIpV6MplsVpn=None, CapabilityIpV6Multicast=None, CapabilityIpV6MulticastVpn=None, CapabilityIpV6Unicast=None, CapabilityIpv4UnicastAddPath=None, CapabilityIpv6UnicastAddPath=None, CapabilityLinkStateNonVpn=None, CapabilityLinkStateVpn=None, CapabilityNHEncodingCapabilities=None, CapabilityRouteConstraint=None, CapabilityRouteRefresh=None, CapabilitySRTEPoliciesV4=None, CapabilitySRTEPoliciesV6=None, CapabilityVpls=None, Capabilityipv4UnicastFlowSpec=None, Capabilityipv6UnicastFlowSpec=None, ConfigureKeepaliveTimer=None, CustomSidType=None, DiscardIxiaGeneratedRoutes=None, DowntimeInSec=None, DutIp=None, Enable4ByteAs=None, EnableBfdRegistration=None, EnableBgpId=None, EnableBgpIdSameAsRouterId=None, EnableBgpLsCommunity=None, EnableGracefulRestart=None, EnableLlgr=None, Evpn=None, FilterEvpn=None, FilterIpV4Mpls=None, FilterIpV4MplsVpn=None, FilterIpV4Multicast=None, FilterIpV4MulticastVpn=None, FilterIpV4Unicast=None, FilterIpV6Mpls=None, FilterIpV6MplsVpn=None, FilterIpV6Multicast=None, FilterIpV6MulticastVpn=None, FilterIpV6Unicast=None, FilterIpv4MulticastBgpMplsVpn=None, FilterIpv4UnicastFlowSpec=None, FilterIpv6MulticastBgpMplsVpn=None, FilterIpv6UnicastFlowSpec=None, FilterLinkState=None, FilterLinkStateVpn=None, FilterSRTEPoliciesV4=None, FilterSRTEPoliciesV6=None, FilterVpls=None, Flap=None, HoldTimer=None, Ipv4MplsAddPathMode=None, Ipv4MulticastBgpMplsVpn=None, Ipv4UnicastAddPathMode=None, Ipv6MplsAddPathMode=None, Ipv6MulticastBgpMplsVpn=None, Ipv6UnicastAddPathMode=None, IrbInterfaceLabel=None, IrbIpv6Address=None, KeepaliveTimer=None, L3VPNEncapsulationType=None, LocalAs2Bytes=None, LocalAs4Bytes=None, Md5Key=None, ModeOfBfdOperations=None, NumBgpLsId=None, NumBgpLsInstanceIdentifier=None, NumBgpUpdatesGeneratedPerIteration=None, OperationalModel=None, RestartTime=None, RoutersMacOrIrbMacAddress=None, SendIxiaSignatureWithRoutes=None, SendSRv6SIDOptionalInfo=None, Srv6EndpointBehavior=None, Srv6SIDOptionalInformation=None, Srv6SidFlags=None, Srv6SidLoc=None, Srv6SidLocLen=None, Srv6SidLocMetric=None, Srv6SidReserved=None, Srv6SidReserved1=None, Srv6SidReserved2=None, StaleTime=None, TcpWindowSizeInBytes=None, Ttl=None, Type=None, UpdateInterval=None, UptimeInSec=None, UseGatewayAsDutIp=None, VplsEnableNextHop=None, VplsNextHop=None):
"""Base class infrastructure that gets a list of bgpIpv6Peer device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- ActAsRestarted (str): optional regex of actAsRestarted
- Active (str): optional regex of active
- AdvSrv6SidInIgp (str): optional regex of advSrv6SidInIgp
- AdvertiseEndOfRib (str): optional regex of advertiseEndOfRib
- AdvertiseSRv6SID (str): optional regex of advertiseSRv6SID
- AdvertiseTunnelEncapsulationExtendedCommunity (str): optional regex of advertiseTunnelEncapsulationExtendedCommunity
- AlwaysIncludeTunnelEncExtCommunity (str): optional regex of alwaysIncludeTunnelEncExtCommunity
- AsSetMode (str): optional regex of asSetMode
- Authentication (str): optional regex of authentication
- BgpId (str): optional regex of bgpId
- BgpLsAsSetMode (str): optional regex of bgpLsAsSetMode
- BgpLsEnableAsPathSegments (str): optional regex of bgpLsEnableAsPathSegments
- BgpLsEnableCluster (str): optional regex of bgpLsEnableCluster
- BgpLsEnableExtendedCommunity (str): optional regex of bgpLsEnableExtendedCommunity
- BgpLsOverridePeerAsSetMode (str): optional regex of bgpLsOverridePeerAsSetMode
- BgpUnnumbered (str): optional regex of bgpUnnumbered
- CapabilityIpV4Mdt (str): optional regex of capabilityIpV4Mdt
- CapabilityIpV4Mpls (str): optional regex of capabilityIpV4Mpls
- CapabilityIpV4MplsVpn (str): optional regex of capabilityIpV4MplsVpn
- CapabilityIpV4Multicast (str): optional regex of capabilityIpV4Multicast
- CapabilityIpV4MulticastVpn (str): optional regex of capabilityIpV4MulticastVpn
- CapabilityIpV4Unicast (str): optional regex of capabilityIpV4Unicast
- CapabilityIpV6Mpls (str): optional regex of capabilityIpV6Mpls
- CapabilityIpV6MplsVpn (str): optional regex of capabilityIpV6MplsVpn
- CapabilityIpV6Multicast (str): optional regex of capabilityIpV6Multicast
- CapabilityIpV6MulticastVpn (str): optional regex of capabilityIpV6MulticastVpn
- CapabilityIpV6Unicast (str): optional regex of capabilityIpV6Unicast
- CapabilityIpv4UnicastAddPath (str): optional regex of capabilityIpv4UnicastAddPath
- CapabilityIpv6UnicastAddPath (str): optional regex of capabilityIpv6UnicastAddPath
- CapabilityLinkStateNonVpn (str): optional regex of capabilityLinkStateNonVpn
- CapabilityLinkStateVpn (str): optional regex of capabilityLinkStateVpn
- CapabilityNHEncodingCapabilities (str): optional regex of capabilityNHEncodingCapabilities
- CapabilityRouteConstraint (str): optional regex of capabilityRouteConstraint
- CapabilityRouteRefresh (str): optional regex of capabilityRouteRefresh
- CapabilitySRTEPoliciesV4 (str): optional regex of capabilitySRTEPoliciesV4
- CapabilitySRTEPoliciesV6 (str): optional regex of capabilitySRTEPoliciesV6
- CapabilityVpls (str): optional regex of capabilityVpls
- Capabilityipv4UnicastFlowSpec (str): optional regex of capabilityipv4UnicastFlowSpec
- Capabilityipv6UnicastFlowSpec (str): optional regex of capabilityipv6UnicastFlowSpec
- ConfigureKeepaliveTimer (str): optional regex of configureKeepaliveTimer
- CustomSidType (str): optional regex of customSidType
- DiscardIxiaGeneratedRoutes (str): optional regex of discardIxiaGeneratedRoutes
- DowntimeInSec (str): optional regex of downtimeInSec
- DutIp (str): optional regex of dutIp
- Enable4ByteAs (str): optional regex of enable4ByteAs
- EnableBfdRegistration (str): optional regex of enableBfdRegistration
- EnableBgpId (str): optional regex of enableBgpId
- EnableBgpIdSameAsRouterId (str): optional regex of enableBgpIdSameAsRouterId
- EnableBgpLsCommunity (str): optional regex of enableBgpLsCommunity
- EnableGracefulRestart (str): optional regex of enableGracefulRestart
- EnableLlgr (str): optional regex of enableLlgr
- Evpn (str): optional regex of evpn
- FilterEvpn (str): optional regex of filterEvpn
- FilterIpV4Mpls (str): optional regex of filterIpV4Mpls
- FilterIpV4MplsVpn (str): optional regex of filterIpV4MplsVpn
- FilterIpV4Multicast (str): optional regex of filterIpV4Multicast
- FilterIpV4MulticastVpn (str): optional regex of filterIpV4MulticastVpn
- FilterIpV4Unicast (str): optional regex of filterIpV4Unicast
- FilterIpV6Mpls (str): optional regex of filterIpV6Mpls
- FilterIpV6MplsVpn (str): optional regex of filterIpV6MplsVpn
- FilterIpV6Multicast (str): optional regex of filterIpV6Multicast
- FilterIpV6MulticastVpn (str): optional regex of filterIpV6MulticastVpn
- FilterIpV6Unicast (str): optional regex of filterIpV6Unicast
- FilterIpv4MulticastBgpMplsVpn (str): optional regex of filterIpv4MulticastBgpMplsVpn
- FilterIpv4UnicastFlowSpec (str): optional regex of filterIpv4UnicastFlowSpec
- FilterIpv6MulticastBgpMplsVpn (str): optional regex of filterIpv6MulticastBgpMplsVpn
- FilterIpv6UnicastFlowSpec (str): optional regex of filterIpv6UnicastFlowSpec
- FilterLinkState (str): optional regex of filterLinkState
- FilterLinkStateVpn (str): optional regex of filterLinkStateVpn
- FilterSRTEPoliciesV4 (str): optional regex of filterSRTEPoliciesV4
- FilterSRTEPoliciesV6 (str): optional regex of filterSRTEPoliciesV6
- FilterVpls (str): optional regex of filterVpls
- Flap (str): optional regex of flap
- HoldTimer (str): optional regex of holdTimer
- Ipv4MplsAddPathMode (str): optional regex of ipv4MplsAddPathMode
- Ipv4MulticastBgpMplsVpn (str): optional regex of ipv4MulticastBgpMplsVpn
- Ipv4UnicastAddPathMode (str): optional regex of ipv4UnicastAddPathMode
- Ipv6MplsAddPathMode (str): optional regex of ipv6MplsAddPathMode
- Ipv6MulticastBgpMplsVpn (str): optional regex of ipv6MulticastBgpMplsVpn
- Ipv6UnicastAddPathMode (str): optional regex of ipv6UnicastAddPathMode
- IrbInterfaceLabel (str): optional regex of irbInterfaceLabel
- IrbIpv6Address (str): optional regex of irbIpv6Address
- KeepaliveTimer (str): optional regex of keepaliveTimer
- L3VPNEncapsulationType (str): optional regex of l3VPNEncapsulationType
- LocalAs2Bytes (str): optional regex of localAs2Bytes
- LocalAs4Bytes (str): optional regex of localAs4Bytes
- Md5Key (str): optional regex of md5Key
- ModeOfBfdOperations (str): optional regex of modeOfBfdOperations
- NumBgpLsId (str): optional regex of numBgpLsId
- NumBgpLsInstanceIdentifier (str): optional regex of numBgpLsInstanceIdentifier
- NumBgpUpdatesGeneratedPerIteration (str): optional regex of numBgpUpdatesGeneratedPerIteration
- OperationalModel (str): optional regex of operationalModel
- RestartTime (str): optional regex of restartTime
- RoutersMacOrIrbMacAddress (str): optional regex of routersMacOrIrbMacAddress
- SendIxiaSignatureWithRoutes (str): optional regex of sendIxiaSignatureWithRoutes
- SendSRv6SIDOptionalInfo (str): optional regex of sendSRv6SIDOptionalInfo
- Srv6EndpointBehavior (str): optional regex of srv6EndpointBehavior
- Srv6SIDOptionalInformation (str): optional regex of srv6SIDOptionalInformation
- Srv6SidFlags (str): optional regex of srv6SidFlags
- Srv6SidLoc (str): optional regex of srv6SidLoc
- Srv6SidLocLen (str): optional regex of srv6SidLocLen
- Srv6SidLocMetric (str): optional regex of srv6SidLocMetric
- Srv6SidReserved (str): optional regex of srv6SidReserved
- Srv6SidReserved1 (str): optional regex of srv6SidReserved1
- Srv6SidReserved2 (str): optional regex of srv6SidReserved2
- StaleTime (str): optional regex of staleTime
- TcpWindowSizeInBytes (str): optional regex of tcpWindowSizeInBytes
- Ttl (str): optional regex of ttl
- Type (str): optional regex of type
- UpdateInterval (str): optional regex of updateInterval
- UptimeInSec (str): optional regex of uptimeInSec
- UseGatewayAsDutIp (str): optional regex of useGatewayAsDutIp
- VplsEnableNextHop (str): optional regex of vplsEnableNextHop
- VplsNextHop (str): optional regex of vplsNextHop
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
def Abort(self, *args, **kwargs):
"""Executes the abort operation on the server.
Abort CPF control plane (equals to demote to kUnconfigured state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
abort(SessionIndices=list)
--------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
abort(SessionIndices=string)
----------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('abort', payload=payload, response_object=None)
def BgpIPv4FlowSpecLearnedInfo(self, *args, **kwargs):
"""Executes the bgpIPv4FlowSpecLearnedInfo operation on the server.
Get IPv4 FlowSpec Learned Info
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
bgpIPv4FlowSpecLearnedInfo(SessionIndices=list)
-----------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
bgpIPv4FlowSpecLearnedInfo(SessionIndices=string)
-------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('bgpIPv4FlowSpecLearnedInfo', payload=payload, response_object=None)
def BgpIPv6FlowSpecLearnedInfo(self, *args, **kwargs):
"""Executes the bgpIPv6FlowSpecLearnedInfo operation on the server.
Get IPv6 FlowSpec Learned Info
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
bgpIPv6FlowSpecLearnedInfo(SessionIndices=list)
-----------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
bgpIPv6FlowSpecLearnedInfo(SessionIndices=string)
-------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('bgpIPv6FlowSpecLearnedInfo', payload=payload, response_object=None)
def BreakTCPSession(self, *args, **kwargs):
"""Executes the breakTCPSession operation on the server.
Break TCP Session
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
breakTCPSession(Notification_code=number, Notification_sub_code=number)
-----------------------------------------------------------------------
- Notification_code (number): This parameter requires a notification_code of type kInteger
- Notification_sub_code (number): This parameter requires a notification_sub_code of type kInteger
breakTCPSession(Notification_code=number, Notification_sub_code=number, SessionIndices=list)
--------------------------------------------------------------------------------------------
- Notification_code (number): This parameter requires a notification_code of type kInteger
- Notification_sub_code (number): This parameter requires a notification_sub_code of type kInteger
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
breakTCPSession(SessionIndices=string, Notification_code=number, Notification_sub_code=number)
----------------------------------------------------------------------------------------------
- SessionIndices (str): This parameter requires a notification_code of type kInteger
- Notification_code (number): This parameter requires a notification_sub_code of type kInteger
- Notification_sub_code (number): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('breakTCPSession', payload=payload, response_object=None)
def Breaktcpsession(self, *args, **kwargs):
"""Executes the breaktcpsession operation on the server.
Break BGP Peer Range TCP Session.
breaktcpsession(Arg2=list, Arg3=number, Arg4=number)list
--------------------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Arg3 (number): Notification Code
- Arg4 (number): Notification Sub Code
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('breaktcpsession', payload=payload, response_object=None)
def ClearAllLearnedInfo(self, *args, **kwargs):
"""Executes the clearAllLearnedInfo operation on the server.
Clear All Learned Info
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
clearAllLearnedInfo(SessionIndices=list)
----------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
clearAllLearnedInfo(SessionIndices=string)
------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('clearAllLearnedInfo', payload=payload, response_object=None)
def ClearAllLearnedInfoInClient(self, *args, **kwargs):
"""Executes the clearAllLearnedInfoInClient operation on the server.
Clears ALL routes from GUI grid for the selected BGP Peers.
clearAllLearnedInfoInClient(Arg2=list)list
------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('clearAllLearnedInfoInClient', payload=payload, response_object=None)
def GetADVPLSLearnedInfo(self, *args, **kwargs):
"""Executes the getADVPLSLearnedInfo operation on the server.
Get ADVPLS Learned Info
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
getADVPLSLearnedInfo(SessionIndices=list)
-----------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
getADVPLSLearnedInfo(SessionIndices=string)
-------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
getADVPLSLearnedInfo(Arg2=list)list
-----------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getADVPLSLearnedInfo', payload=payload, response_object=None)
def GetAllLearnedInfo(self, *args, **kwargs):
"""Executes the getAllLearnedInfo operation on the server.
Get All Learned Info
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
getAllLearnedInfo(SessionIndices=list)
--------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
getAllLearnedInfo(SessionIndices=string)
----------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
getAllLearnedInfo(Arg2=list)list
--------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getAllLearnedInfo', payload=payload, response_object=None)
def GetbgpIpv4FlowSpecLearnedInfoLearnedInfo(self, *args, **kwargs):
"""Executes the getbgpIpv4FlowSpecLearnedInfoLearnedInfo operation on the server.
getbgpIpv4FlowSpecLearnedInfoLearnedInfo(Arg2=list)list
-------------------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin.An empty list indicates all instances in the plugin.
- Returns list(str): Please provide a proper description here.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getbgpIpv4FlowSpecLearnedInfoLearnedInfo', payload=payload, response_object=None)
def GetbgpIpv6FlowSpecLearnedInfoLearnedInfo(self, *args, **kwargs):
"""Executes the getbgpIpv6FlowSpecLearnedInfoLearnedInfo operation on the server.
getbgpIpv6FlowSpecLearnedInfoLearnedInfo(Arg2=list)list
-------------------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin.An empty list indicates all instances in the plugin.
- Returns list(str): Please provide a proper description here.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getbgpIpv6FlowSpecLearnedInfoLearnedInfo', payload=payload, response_object=None)
def GetbgpSrTeLearnedInfoLearnedInfo(self, *args, **kwargs):
"""Executes the getbgpSrTeLearnedInfoLearnedInfo operation on the server.
getbgpSrTeLearnedInfoLearnedInfo(Arg2=list)list
-----------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin.An empty list indicates all instances in the plugin.
- Returns list(str): Please provide a proper description here.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getbgpSrTeLearnedInfoLearnedInfo', payload=payload, response_object=None)
def GetEVPNLearnedInfo(self, *args, **kwargs):
"""Executes the getEVPNLearnedInfo operation on the server.
Get EVPN Learned Info
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
getEVPNLearnedInfo(SessionIndices=list)
---------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
getEVPNLearnedInfo(SessionIndices=string)
-----------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
getEVPNLearnedInfo(Arg2=list)list
---------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getEVPNLearnedInfo', payload=payload, response_object=None)
def GetIPv4LearnedInfo(self, *args, **kwargs):
"""Executes the getIPv4LearnedInfo operation on the server.
Get IPv4 Learned Info
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
getIPv4LearnedInfo(SessionIndices=list)
---------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
getIPv4LearnedInfo(SessionIndices=string)
-----------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
getIPv4LearnedInfo(Arg2=list)list
---------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getIPv4LearnedInfo', payload=payload, response_object=None)
def GetIPv4MplsLearnedInfo(self, *args, **kwargs):
"""Executes the getIPv4MplsLearnedInfo operation on the server.
Fetches IPv4 MPLS routes learnt by this BGP peer.
getIPv4MplsLearnedInfo(Arg2=list)list
-------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getIPv4MplsLearnedInfo', payload=payload, response_object=None)
def GetIpv4MvpnLearnedInfo(self, *args, **kwargs):
"""Executes the getIpv4MvpnLearnedInfo operation on the server.
Fetches MVPN MAC IP routes learnt by this BGP peer.
getIpv4MvpnLearnedInfo(Arg2=list)list
-------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getIpv4MvpnLearnedInfo', payload=payload, response_object=None)
def GetIpv4UmhRoutesLearnedInfo(self, *args, **kwargs):
"""Executes the getIpv4UmhRoutesLearnedInfo operation on the server.
Fetches Umh Routes learned by this BGP peer.
getIpv4UmhRoutesLearnedInfo(Arg2=list)list
------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getIpv4UmhRoutesLearnedInfo', payload=payload, response_object=None)
def GetIPv4VpnLearnedInfo(self, *args, **kwargs):
"""Executes the getIPv4VpnLearnedInfo operation on the server.
Get IPv4 Vpn Learned Info
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
getIPv4VpnLearnedInfo(SessionIndices=list)
------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
getIPv4VpnLearnedInfo(SessionIndices=string)
--------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
getIPv4VpnLearnedInfo(Arg2=list)list
------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getIPv4VpnLearnedInfo', payload=payload, response_object=None)
def GetIPv6LearnedInfo(self, *args, **kwargs):
"""Executes the getIPv6LearnedInfo operation on the server.
Get IPv6 Learned Info
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
getIPv6LearnedInfo(SessionIndices=list)
---------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
getIPv6LearnedInfo(SessionIndices=string)
-----------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
getIPv6LearnedInfo(Arg2=list)list
---------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getIPv6LearnedInfo', payload=payload, response_object=None)
def GetIPv6MplsLearnedInfo(self, *args, **kwargs):
"""Executes the getIPv6MplsLearnedInfo operation on the server.
Gets IPv6 Mpls routes learnt by this BGP peer.
getIPv6MplsLearnedInfo(Arg2=list)list
-------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getIPv6MplsLearnedInfo', payload=payload, response_object=None)
def GetIpv6MvpnLearnedInfo(self, *args, **kwargs):
"""Executes the getIpv6MvpnLearnedInfo operation on the server.
Fetches MVPN MAC IP routes learnt by this BGP peer.
getIpv6MvpnLearnedInfo(Arg2=list)list
-------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getIpv6MvpnLearnedInfo', payload=payload, response_object=None)
def GetIpv6UmhRoutesLearnedInfo(self, *args, **kwargs):
"""Executes the getIpv6UmhRoutesLearnedInfo operation on the server.
Fetches Umh Route learned by this BGP peer.
getIpv6UmhRoutesLearnedInfo(Arg2=list)list
------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getIpv6UmhRoutesLearnedInfo', payload=payload, response_object=None)
def GetIPv6VpnLearnedInfo(self, *args, **kwargs):
"""Executes the getIPv6VpnLearnedInfo operation on the server.
Get IPv6 Vpn Learned Info
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
getIPv6VpnLearnedInfo(SessionIndices=list)
------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
getIPv6VpnLearnedInfo(SessionIndices=string)
--------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
getIPv6VpnLearnedInfo(Arg2=list)list
------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getIPv6VpnLearnedInfo', payload=payload, response_object=None)
def GetLinkStateLearnedInfo(self, *args, **kwargs):
"""Executes the getLinkStateLearnedInfo operation on the server.
Get Link State Learned Info
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
getLinkStateLearnedInfo(SessionIndices=list)
--------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
getLinkStateLearnedInfo(SessionIndices=string)
----------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
getLinkStateLearnedInfo(Arg2=list)list
--------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getLinkStateLearnedInfo', payload=payload, response_object=None)
def GetLinkStateVPNLearnedInfo(self, *args, **kwargs):
"""Executes the getLinkStateVPNLearnedInfo operation on the server.
Get Link State VPN Learned Info
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
getLinkStateVPNLearnedInfo(SessionIndices=list)
-----------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
getLinkStateVPNLearnedInfo(SessionIndices=string)
-------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
getLinkStateVPNLearnedInfo(Arg2=list)list
-----------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getLinkStateVPNLearnedInfo', payload=payload, response_object=None)
def GetVPLSLearnedInfo(self, *args, **kwargs):
"""Executes the getVPLSLearnedInfo operation on the server.
Get VPLS Learned Info
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
getVPLSLearnedInfo(SessionIndices=list)
---------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
getVPLSLearnedInfo(SessionIndices=string)
-----------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
getVPLSLearnedInfo(Arg2=list)list
---------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getVPLSLearnedInfo', payload=payload, response_object=None)
def GracefulRestart(self, *args, **kwargs):
"""Executes the gracefulRestart operation on the server.
Graceful restart Peers on selected Peer Ranges
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
gracefulRestart(Restart_time=number)
------------------------------------
- Restart_time (number): This parameter requires a restart_time of type kInteger
gracefulRestart(Restart_time=number, SessionIndices=list)
---------------------------------------------------------
- Restart_time (number): This parameter requires a restart_time of type kInteger
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
gracefulRestart(SessionIndices=string, Restart_time=number)
-----------------------------------------------------------
- SessionIndices (str): This parameter requires a restart_time of type kInteger
- Restart_time (number): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('gracefulRestart', payload=payload, response_object=None)
def Gracefulrestart(self, *args, **kwargs):
"""Executes the gracefulrestart operation on the server.
Graceful restart Peers on selected Peer Ranges.
gracefulrestart(Arg2=list, Arg3=number)list
-------------------------------------------
- Arg2 (list(number)): List of indices into the group. An empty list indicates all instances in the group.
- Arg3 (number): Restart After Time(in secs).
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('gracefulrestart', payload=payload, response_object=None)
def RestartDown(self, *args, **kwargs):
"""Executes the restartDown operation on the server.
Stop and start interfaces and sessions that are in Down state.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
restartDown(SessionIndices=list)
--------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
restartDown(SessionIndices=string)
----------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('restartDown', payload=payload, response_object=None)
def ResumeKeepAlive(self, *args, **kwargs):
"""Executes the resumeKeepAlive operation on the server.
Resume sending KeepAlive
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
resumeKeepAlive(SessionIndices=list)
------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
resumeKeepAlive(SessionIndices=string)
--------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('resumeKeepAlive', payload=payload, response_object=None)
def Resumekeepalive(self, *args, **kwargs):
"""Executes the resumekeepalive operation on the server.
Start Sending Keep Alive Messages.
resumekeepalive(Arg2=list)list
------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('resumekeepalive', payload=payload, response_object=None)
def ResumeTCPSession(self, *args, **kwargs):
"""Executes the resumeTCPSession operation on the server.
Resume TCP Session
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
resumeTCPSession(Notification_code=number, Notification_sub_code=number)
------------------------------------------------------------------------
- Notification_code (number): This parameter requires a notification_code of type kInteger
- Notification_sub_code (number): This parameter requires a notification_sub_code of type kInteger
resumeTCPSession(Notification_code=number, Notification_sub_code=number, SessionIndices=list)
---------------------------------------------------------------------------------------------
- Notification_code (number): This parameter requires a notification_code of type kInteger
- Notification_sub_code (number): This parameter requires a notification_sub_code of type kInteger
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
resumeTCPSession(SessionIndices=string, Notification_code=number, Notification_sub_code=number)
-----------------------------------------------------------------------------------------------
- SessionIndices (str): This parameter requires a notification_code of type kInteger
- Notification_code (number): This parameter requires a notification_sub_code of type kInteger
- Notification_sub_code (number): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('resumeTCPSession', payload=payload, response_object=None)
def Resumetcpsession(self, *args, **kwargs):
"""Executes the resumetcpsession operation on the server.
Resume BGP Peer Range TCP Session.
resumetcpsession(Arg2=list, Arg3=number, Arg4=number)list
---------------------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Arg3 (number): Notification Code
- Arg4 (number): Notification Sub Code
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('resumetcpsession', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
"""Executes the start operation on the server.
Start CPF control plane (equals to promote to negotiated state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
start(SessionIndices=list)
--------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
start(SessionIndices=string)
----------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self, *args, **kwargs):
"""Executes the stop operation on the server.
Stop CPF control plane (equals to demote to PreValidated-DoDDone state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
stop(SessionIndices=list)
-------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
stop(SessionIndices=string)
---------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stop', payload=payload, response_object=None)
def StopKeepAlive(self, *args, **kwargs):
"""Executes the stopKeepAlive operation on the server.
Stop sending KeepAlive
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
stopKeepAlive(SessionIndices=list)
----------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
stopKeepAlive(SessionIndices=string)
------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stopKeepAlive', payload=payload, response_object=None)
def Stopkeepalive(self, *args, **kwargs):
"""Executes the stopkeepalive operation on the server.
Stop Sending Keep Alive Messages.
stopkeepalive(Arg2=list)list
----------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stopkeepalive', payload=payload, response_object=None)
| 45.707617 | 2,945 | 0.651716 | [
"MIT"
] | rfrye-github/ixnetwork_restpy | uhd_restpy/testplatform/sessions/ixnetwork/topology/bgpipv6peer_d4ac277d9da759fd5a152b8e6eb0ab20.py | 159,611 | Python |
# old functions (slightly more time efficient) which store forward and reverse frames in memory and don't use
# any multiprocessing.
def seqToProtein(dnaSeq, minLen):
newSeq = dnaSeq.upper().replace('N', '')
start = time.time()
forwFrames, revFrames = seqToFrames(newSeq)
peptides = []
for frame in forwFrames:
peptide = tripletToAmino(frame, minLen)
peptides += peptide
for frame in revFrames:
peptide = tripletToAmino(frame, minLen)
peptides += peptide
end = time.time()
print(end-start)
return peptides
def seqToFrames(dnaSeq):
forward = dnaSeq
reverse = createReverseSeq(dnaSeq)
forwardFrames = createFrames(forward)
reverseFrames = createFrames(reverse)
return forwardFrames, reverseFrames
def createFrames(dnaSeq):
frames = [[],[],[]]
for i in range(0,3):
frame = frames[i]
for j in range(0, len(dnaSeq), 3):
if i+j < len(dnaSeq)-2:
triplet = dnaSeq[i+j:i+j+3]
frame.append(triplet)
# incorporate start triplet
def tripletToAmino(frame, minLen):
aminoList = []
peptideList = []
for triplet in frame:
amino = DNA_TABLE[triplet]
if amino == -1:
if len(aminoList) > minLen:
peptideList.append(''.join(aminoList))
aminoList.clear()
else:
aminoList.append(amino)
if len(aminoList) > minLen:
peptideList.append(''.join(aminoList))
aminoList.clear()
return peptideList
def parseFastaDna(input_path):
# fasta_sequences = SeqIO.parse(open(input_path), 'fasta')
sequenceDictionary = {}
# for fasta in fasta_sequences:
# name, sequence = fasta.id, str(fasta.seq)
# sequence = sequence.upper().replace("N", "")
# #sequenceDictionary[name] = sequence.upper()
# print(seqToProtein(sequence))
# break;
with open(input_path, "rU") as handle:
counter = 0
# convert to tuple and look to start multiprocessing from here
for record in SeqIO.parse(handle, 'fasta'):
counter += 1
print(counter)
sequenceDictionary[record.name] = record.seq
return sequenceDictionary
def generateOutput(outputPath, minLen, inputFile):
finalPeptides = {}
seqDict = parseFastaDna(inputFile)
for key, value in seqDict.items():
dnaSeq = str(value).upper()
peptides = seqToProteinNew(dnaSeq, minLen)
for peptide in peptides:
if peptide not in finalPeptides.keys():
finalPeptides[peptide] = [key]
else:
finalPeptides[peptide].append(key)
print(finalPeptides)
saveHandle = outputPath + '/DNAFastaProteins.fasta'
with open(saveHandle, "w") as output_handle:
SeqIO.write(createSeqObj(finalPeptides), output_handle, "fasta")
| 29.06 | 109 | 0.621817 | [
"Apache-2.0"
] | nhcha6/SixFrameTranslationNew | DNAtoPep/OldSixFrameFunctions.py | 2,906 | Python |
# -*- coding: utf-8 -*-
# Copyright 2011 Takeshi KOMIYA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import blockdiag.elements
from blockdiag.utils import images, Size, XY
from blockdiag.utils.logging import warning
class NodeGroup(blockdiag.elements.NodeGroup):
pass
class DiagramNode(blockdiag.elements.DiagramNode):
def __init__(self, _id):
super(DiagramNode, self).__init__(_id)
self.activated = False
self.activity = []
self.activities = []
def set_activated(self, value):
self.activated = True
def activate(self, height, index):
if len(self.activity) <= index:
self.activity.insert(index, [])
if (len(self.activity[index]) > 0 and
(self.activity[index][-1] != height - 1)):
self.deactivate(index)
self.activity[index].append(height)
def deactivate(self, index=None):
if index is None:
for i in range(len(self.activity)):
self.deactivate(i)
return
if self.activity[index]:
attr = {'lifetime': self.activity[index],
'level': index}
self.activities.append(attr)
self.activity[index] = []
class EdgeSeparator(blockdiag.elements.Base):
basecolor = (208, 208, 208)
linecolor = (0, 0, 0)
@classmethod
def clear(cls):
super(EdgeSeparator, cls).clear()
cls.basecolor = (208, 208, 208)
cls.linecolor = (0, 0, 0)
def __init__(self, _type, label):
super(EdgeSeparator, self).__init__()
self.label = label
self.group = None
self.style = None
self.color = self.basecolor
self.order = 0
if _type == '===':
self.type = 'divider'
elif _type == '...':
self.type = 'delay'
class DiagramEdge(blockdiag.elements.DiagramEdge):
notecolor = (255, 182, 193) # LightPink
# name -> (dir, style, asynchronous)
ARROW_DEF = {
'both': ('both', None, False),
'=>': ('both', None, False),
'forward': ('forward', None, False),
'->': ('forward', None, False),
'-->': ('forward', 'dashed', False),
'->>': ('forward', None, True),
'-->>': ('forward', 'dashed', True),
'back': ('back', None, False),
'<-': ('back', None, False),
'<--': ('back', 'dashed', False),
'<<-': ('back', None, True),
'<<--': ('back', 'dashed', True)
}
@classmethod
def clear(cls):
super(DiagramEdge, cls).clear()
cls.notecolor = (255, 182, 193)
@classmethod
def set_default_note_color(cls, color):
color = images.color_to_rgb(color)
cls.notecolor = color
def __init__(self, node1, node2):
super(DiagramEdge, self).__init__(node1, node2)
self.leftnote = None
self.leftnotesize = Size(0, 0)
self.rightnote = None
self.rightnotesize = Size(0, 0)
self.textwidth = 0
self.textheight = 0
self.order = 0
self.activate = True
self.asynchronous = False
self.diagonal = False
self.failed = False
self.return_label = ''
@property
def left_node(self):
if self.node1.xy.x <= self.node2.xy.x:
return self.node1
else:
return self.node2
@property
def right_node(self):
if self.node1.xy.x > self.node2.xy.x:
return self.node1
else:
return self.node2
@property
def direction(self):
if self.node1.xy.x == self.node2.xy.x:
direction = 'self'
elif self.node1.xy.x < self.node2.xy.x:
# n1 .. n2
if self.dir == 'forward':
direction = 'right'
else:
direction = 'left'
else:
# n2 .. n1
if self.dir == 'forward':
direction = 'left'
else:
direction = 'right'
return direction
def set_note(self, value):
self.rightnote = value
def set_diagonal(self, value):
self.diagonal = True
def set_async(self, value):
self.dir = 'forward'
def set_return(self, value):
self.return_label = value
def set_failed(self, value):
self.failed = True
self.activate = False
def set_activate(self, value):
self.activate = True
def set_noactivate(self, value):
self.activate = False
def set_dir(self, value):
params = self.ARROW_DEF.get(value.lower())
if params is None:
warning("unknown edge dir: %s", value)
else:
self.dir, self.style, self.asynchronous = params
if self.node1 == self.node2 and self.dir in ('forward', 'back'):
self.activate = False
def to_desctable(self):
params = (self.dir, self.style, self.asynchronous)
for arrow_type, settings in self.ARROW_DEF.items():
if params == settings and not arrow_type.isalpha():
label = "%s %s %s" % (self.node1.label,
arrow_type,
self.node2.label)
return [label, self.description]
class AltBlock(blockdiag.elements.Base):
basecolor = (0, 0, 0)
linecolor = (0, 0, 0)
width = None
height = None
@classmethod
def clear(cls):
super(EdgeSeparator, cls).clear()
cls.basecolor = (0, 0, 0)
cls.linecolor = (0, 0, 0)
@classmethod
def set_default_linecolor(cls, color):
color = images.color_to_rgb(color)
cls.linecolor = color
def __init__(self, _type, _id):
self.type = _type
self.id = _id
self.xlevel = 1
self.ylevel_top = 1
self.ylevel_bottom = 1
self.edges = []
self.color = self.basecolor
@property
def xy(self):
if len(self.edges) == 0:
return XY(0, 0)
else:
x = min(e.left_node.xy.x for e in self.edges)
y = min(e.order for e in self.edges) + 1
return XY(x, y)
@property
def colwidth(self):
if len(self.edges) == 0:
return 1
else:
x2 = max(e.right_node.xy.x for e in self.edges)
return x2 - self.xy.x + 1
@property
def colheight(self):
if len(self.edges) == 0:
return 1
else:
y2 = max(e.order for e in self.edges) + 1
return y2 - self.xy.y + 1
class Diagram(blockdiag.elements.Diagram):
_DiagramNode = DiagramNode
_DiagramEdge = DiagramEdge
def __init__(self):
super(Diagram, self).__init__()
self.int_attrs.append('edge_length')
self.activation = True
self.autonumber = False
self.edge_length = None
self.groups = []
self.separators = []
self.altblocks = []
def traverse_groups(self, preorder=False):
return self.groups
def set_default_linecolor(self, color):
super(Diagram, self).set_default_linecolor(color)
color = images.color_to_rgb(color)
AltBlock.set_default_linecolor(color)
def set_default_note_color(self, color):
color = images.color_to_rgb(color)
self._DiagramEdge.set_default_note_color(color)
def set_activation(self, value):
value = value.lower()
if value == 'none':
self.activation = value
else:
warning("unknown activation style: %s", value)
def set_autonumber(self, value):
if value.lower() == 'false':
self.autonumber = False
else:
self.autonumber = True
def set_edge_height(self, value):
warning("edge_height is obsoleted; use span_height")
self.span_height = int(value)
| 27.920266 | 76 | 0.563422 | [
"Apache-2.0"
] | kizhakkd/seqdiag | src/seqdiag/elements.py | 8,404 | Python |
from .factory import ImageClassificationModel # noqa: F401
| 30 | 59 | 0.816667 | [
"MIT"
] | jexio/fulmo | fulmo/models/cv/__init__.py | 60 | Python |
"""Init to import all the routing protocols implemented."""
from .min_hop import MinHopRouting, MinHopRoutingSink
from .etx import ETX, ETXSink
from .dap import DAPRouting, DAPRoutingSink
from .base_routing_protocol import RoutingProtocol
| 34.285714 | 59 | 0.825 | [
"MIT"
] | mesepulveda/wsn-simulator | wsnsim/routing/__init__.py | 240 | Python |
import os
from retriever.lib.defaults import DATA_DIR
from retriever.lib.dummy import DummyConnection
from retriever.lib.models import Engine
from retriever.lib.tools import open_fr, open_fw
from retriever.lib.engine_tools import sort_csv, xml2csv_test
class engine(Engine):
"""Engine instance for writing data to a XML file."""
name = "XML"
abbreviation = "xml"
auto_column_number = 0
datatypes = {
"auto": "INTEGER",
"int": "INTEGER",
"bigint": "INTEGER",
"double": "REAL",
"decimal": "REAL",
"char": "TEXT",
"bool": "INTEGER",
}
insert_limit = 1000
required_opts = [
("table_name", "Format of table name", "{db}_{table}.xml"),
("data_dir", "Install directory", DATA_DIR),
]
table_names = []
def create_db(self):
"""Override create_db since there is no database just an XML file."""
return None
def create_table(self):
"""Create the table by creating an empty XML file."""
table_path = os.path.join(self.opts["data_dir"], self.table_name())
self.output_file = open_fw(table_path, encoding=self.encoding)
self.output_file.write(u'<?xml version="1.0" encoding="UTF-8"?>')
self.output_file.write(u'\n<root>')
self.table_names.append((self.output_file, table_path))
self.auto_column_number = 1
# Register all tables created to enable
# testing python files having custom download function
if self.script.name not in self.script_table_registry:
self.script_table_registry[self.script.name] = []
self.script_table_registry[self.script.name].append(
(self.table_name(), self.table))
def disconnect(self):
"""Close out the xml files
Close all the file objects that have been created
Re-write the files stripping off the last comma and then close with a closing tag)
"""
if self.table_names:
for output_file_i, file_name in self.table_names:
output_file_i.close()
current_input_file = open_fr(file_name, encoding=self.encoding)
file_contents = current_input_file.readlines()
current_input_file.close()
file_contents[-1] = file_contents[-1].strip(',')
current_output_file = open_fw(file_name, encoding=self.encoding)
current_output_file.writelines(file_contents)
current_output_file.write('\n</root>')
current_output_file.close()
self.table_names = []
def execute(self, statement, commit=True):
"""Write a line to the output file."""
self.output_file.writelines(statement)
def executemany(self, statement, values, commit=True):
"""Write a line to the output file."""
self.output_file.writelines(statement)
def format_insert_value(self, value, datatype):
"""Format value for an insert statement."""
v = Engine.format_insert_value(self, value, datatype)
if v is None:
return ""
try:
if len(v) > 1 and v[0] == v[-1] == "'":
v = '"%s"' % v[1:-1]
except BaseException:
pass
return v
def insert_statement(self, values):
"""Create the insert statement.
Wrap each data value with column values(key)
using _format_single_row <key> value </key>.
"""
if not hasattr(self, 'auto_column_number'):
self.auto_column_number = 1
keys = self.table.get_insert_columns(join=False, create=True)
if self.table.columns[0][1][0][3:] == 'auto':
newrows = []
for rows in values:
insert_stmt = [self.auto_column_number] + rows
newrows.append(insert_stmt)
self.auto_column_number += 1
else:
newrows = values
xml_lines = [
'\n<row>\n{}</row>'.format(format_single_row(keys, line_data))
for line_data in newrows
]
return xml_lines
def to_csv(self, sort=True, path=None, select_columns=None):
"""Export table from xml engine to CSV file."""
for table_item in self.script_table_registry[self.script.name]:
header = table_item[1].get_insert_columns(join=False, create=True)
outputfile = os.path.normpath(
os.path.join(
path if path else '',
os.path.splitext(os.path.basename(table_item[0]))[0] + '.csv'))
empty_rows = 1
if hasattr(self.script, "empty_rows"):
empty_rows = self.script.empty_rows
input_file = table_item[0]
header_values = header
csv_outfile = xml2csv_test(input_file,
outputfile,
header_values,
row_tag="row")
sort_csv(csv_outfile, encoding=self.encoding)
def get_connection(self):
"""Get db connection."""
self.get_input()
return DummyConnection()
def format_single_row(keys, line_data):
"""Create an xml string from the keys and line_data values."""
row_values = [
' <{key}>{value}</{key}>\n'.format(key=key, value=value)
for key, value in zip(keys, line_data)
]
return ''.join(row_values)
| 36.724832 | 90 | 0.58973 | [
"MIT"
] | Aakash3101/retriever | retriever/engines/xmlengine.py | 5,472 | Python |
from subprocess import STDOUT, run, PIPE
def align(x, al):
""" return <x> aligned to <al> """
return ((x+(al-1))//al)*al
class CompilationError(Exception):
def __init__(self, code, output) -> None:
super().__init__(f'compilation failed')
self.code = code
self.output = output
def __str__(self) -> str:
return f'compilation failed: compiler exit code {self.code}'
def __repr__(self) -> str:
return f'<CompilationError code={self.code}>'
def compile(sources, flags):
args = ''
compiler = 'gcc'
args += ''
if isinstance(sources, (str, bytes)):
args += sources
elif isinstance(sources, (list, tuple)):
args += ' '.join(sources)
args += ' '
if isinstance(flags, (str, bytes)):
args += flags
elif isinstance(flags, (list, tuple)):
args += ' '.join(flags)
cmd = f'{compiler} {args}'
proc = run(cmd, shell=True, stderr=STDOUT)
if proc.returncode != 0:
raise CompilationError(proc.returncode, proc.stdout)
def windres(sources, output):
executable = 'windres'
args = ''
if isinstance(sources, (str, bytes)):
args += sources
elif isinstance(sources, (list, tuple)):
args += ' '.join(sources)
cmd = f'{executable} {args} -o {output}'
proc = run(cmd, shell=True, stderr=STDOUT)
if proc.returncode != 0:
raise CompilationError(proc.returncode, proc.stdout)
| 25.946429 | 68 | 0.598073 | [
"MIT"
] | nnnewb/learning-packer | packer4/utils.py | 1,453 | Python |
from flask_appbuilder import BaseView, expose
from config import APP_ICON, APP_NAME
from flask import g
def get_user():
return g.user
def custom_template():
app_name = "GEA"
app_version = "1.2"
return app_name, app_version
class someView(BaseView):
"""
A simple view that implements the index for the site
"""
route_base = ''
default_view = 'index'
index_template = 'appbuilder/index.html'
@expose('/')
def index(self):
from app import db
from .models import Partner, Unit, Application, Doctype
session = db.session
partner = session.query(Partner).count()
unit = session.query(Unit).count()
application = session.query(Application).count()
doctype = session.query(Doctype).count()
self.update_redirect()
return self.render_template(self.index_template,
appbuilder=self.appbuilder,
partner=partner,
unit=unit,
material=application,
doctype=doctype,
user=g.user)
class MyIndexView(someView):
index_template = 'index.html'
| 25.803922 | 63 | 0.545593 | [
"MIT"
] | mcnigno/gea | app/index.py | 1,316 | Python |
# Copyright 2019-2020 Jan Feitsma (Falcons)
# SPDX-License-Identifier: Apache-2.0
#!/usr/bin/env python3
#
import sys, copy
import argparse
import yaml
import falconspy
from rdlLib import RDLFile
AGENTS_IGNORE = [1] # the ones which do not have ballHandlers
def loadYAMLcalibration(yamlfile):
f = open(yamlfile, mode='r')
y = yaml.load(f.read())
f.close()
# only return the calibration section
# convert from list to dict, which is more convenient
return {cal['robotId']: cal for cal in y['calibration']}
def updateYAML(newCalibration, yamlfile):
# to preserve formatting, ordering and comments, we just process the yaml line by line - parsing is easy enough
f = open(yamlfile, mode='r')
lines = f.readlines()
f.close()
# parse helpers
def stripComment(line):
p = line.split('#')
return p[0]
def getValue(line):
p = line.split(':')
return p[1]
def replaceValue(line, newValue):
newValue = int(newValue)
p = line.split('#')
comment = ""
if len(p) > 1:
comment = ' #' + p[1]
p = line.split(':')
key = p[0] + ':'
return "%-14s %d%s" % (key, newValue, comment)
# open in write mode
f = open(yamlfile, mode='w')
inCalibration = False
arm = "leftArm"
robotId = 0
for line in lines:
newLine = None
if line.startswith('calibration'):
inCalibration = True
if inCalibration:
if "robotId" in line:
robotId = int(getValue(stripComment(line)))
if "leftArm" in line:
arm = "leftArm"
if "rightArm" in line:
arm = "rightArm"
if "down:" in line and newCalibration.has_key(robotId):
newLine = replaceValue(line, int(newCalibration[robotId][arm]['down'])) + '\n'
if "up:" in line and newCalibration.has_key(robotId):
newLine = replaceValue(line, int(newCalibration[robotId][arm]['up'])) + '\n'
if newLine == None:
f.write(line)
else:
f.write(newLine)
f.close()
def analyzeRDL(rdlfile):
# TODO: improve mem usage by rdlLib and remove the need for calling parseRDL()
f = RDLFile(rdlfile)
f.parseRDL()
# helpers
result = {}
emptyCal = {'robotId': None, 'leftArm': {'down': 1e6, 'up': 0.0}, 'rightArm': {'down': 1e6, 'up': 0.0}}
def processItem(agent, angleLeft, angleRight):
# zero reading might be produced by peripheralsInterface at init time, ignore them
if angleLeft == 0.0 or angleRight == 0.0:
return
if not result.has_key(agent):
result[agent] = copy.deepcopy(emptyCal)
result[agent]['robotId'] = agent
result[agent]['leftArm']['down'] = min(result[agent]['leftArm']['down'], angleLeft)
result[agent]['leftArm']['up'] = max(result[agent]['leftArm']['up'], angleLeft)
result[agent]['rightArm']['down'] = min(result[agent]['rightArm']['down'], angleRight)
result[agent]['rightArm']['up'] = max(result[agent]['rightArm']['up'], angleRight)
# work through the data
robotsWithoutBall = set()
robotsWithBall = set()
for frame in f.frames:
for agent in frame.data.keys():
# store the ballHandler angles
key = "BALLHANDLERS_FEEDBACK"
if frame.data[agent].has_key(key):
item = frame.data[agent][key]
angleLeft = item.value[0]
angleRight = item.value[1]
processItem(agent, angleLeft, angleRight)
# extra check on ball possession, since calibration only makes sense if the robot was
# without ball for a while, AND with ball
key = "ROBOT_STATE"
if frame.data[agent].has_key(key):
item = frame.data[agent][key]
hasball = item.value[4]
if hasball:
robotsWithBall.add(agent)
else:
robotsWithoutBall.add(agent)
# warn for bad data regarding ball possession
for agent in result.keys():
if agent in AGENTS_IGNORE:
continue
if agent not in robotsWithoutBall:
print "WARNING: agent %d never lost the ball, so DOWN value is unreliable" % (agent)
if agent not in robotsWithBall:
print "WARNING: agent %d never got the ball, so UP value is unreliable" % (agent)
return result
def report(currentCalibation, newCalibration):
# helper
def findAgent(agent, calibrationDict):
if agent in calibrationDict.keys():
return calibrationDict[agent]
return None
# for all agents
for agent in range(1, 20):
# ignore agent 1, keeper has no ballHandlers
if agent in AGENTS_IGNORE:
continue
curCal = findAgent(agent, currentCalibation)
newCal = findAgent(agent, newCalibration)
if curCal != None or newCal != None:
if curCal == None:
print "WARNING: calibration data missing in yaml for agent " + str(agent)
elif newCal == None:
print "WARNING: data missing in RDL for agent " + str(agent)
else:
print agent
for arm in ['leftArm', 'rightArm']:
for side in ['down', 'up']:
print '%15s : %4d -> %4d' % (arm + '.' + side, int(curCal[arm][side]), int(newCal[arm][side]))
def run(args):
# which yaml file to read/write
yamlfile = falconspy.FALCONS_CODE_PATH + "/config/BallHandling.yaml"
# load yaml
currentCalibation = loadYAMLcalibration(yamlfile)
# analyze values from RDL, come up with new calibration
newCalibration = analyzeRDL(args.rdlfile)
# pretty-print the values for comparison
report(currentCalibation, newCalibration)
# update?
if args.update:
# strip bad data
for agent in AGENTS_IGNORE:
if newCalibration.has_key(agent):
del newCalibration[agent]
updateYAML(newCalibration, yamlfile)
print "INFO: yaml configuration file has been updated"
if __name__ == '__main__':
# Argument parsing.
descriptionTxt = 'Suggest new values for ballHandler angle calibration based on .rdl file.\n'
exampleTxt = 'Example: checkBhCalibration.py ~/falcons/matchLogs2020/practice_matches/20190925_204116_coach_VDL_half2.rdl\n'
parser = argparse.ArgumentParser(description=descriptionTxt, epilog=exampleTxt, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-u', '--update', help='update the configuration yaml file', action='store_true')
parser.add_argument('rdlfile', help='.rdl file to load')
args = parser.parse_args()
# run
run(args)
| 38.216667 | 141 | 0.602268 | [
"Apache-2.0"
] | Falcons-Robocup/code | packages/ballHandling/py/checkBhCalibration.py | 6,879 | Python |
_base_ = [
'../../_base_/models/resnet50_cifar.py', '../../_base_/datasets/cifar100_bs128.py',
'../../_base_/schedules/cifar10_bs128.py', '../../_base_/default_runtime.py'
]
norm_cfg = dict(type="SyncBN", requires_grad=True)
model = dict(
type='ImageClassifierAD',
backbone=dict(
norm_cfg=norm_cfg,
out_indices=(0, 1, 2, 3)
),
head=dict(
type='LinearClsHeadKD',
num_classes=100
)
)
model = dict(backbone=dict(norm_cfg=norm_cfg, out_indices=(0, 1, 2, 3)), head=dict(num_classes=100))
lr_config = dict(policy='step', step=[60, 120, 160], gamma=0.2)
| 33.1 | 100 | 0.583082 | [
"MIT"
] | wyze-AI/AdaptiveDistillation | configs/kd/cifar100/resnet18_b128_cifar100.py | 662 | Python |
#!/usr/bin/env python
from distutils.core import setup
DISTNAME = 'tract_querier'
DESCRIPTION = \
'WMQL: Query language for automatic tract extraction from '\
'full-brain tractographies with '\
'a registered template on top of them'
LONG_DESCRIPTION = open('README.md').read()
MAINTAINER = 'Demian Wassermann'
MAINTAINER_EMAIL = '[email protected]'
URL = 'http://demianw.github.io/tract_querier'
LICENSE = open('license.rst').read()
DOWNLOAD_URL = 'https://github.com/demianw/tract_querier'
VERSION = '0.1'
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(quiet=True)
config.add_subpackage('tract_querier')
return config
if __name__ == "__main__":
setup(
name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
requires=[
'numpy(>=1.6)',
'nibabel(>=1.3)'
],
classifiers=[
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS'
],
scripts=[
'scripts/tract_querier',
'scripts/tract_math',
'scripts/volume_querier'
],
test_suite='nose.collector',
**(configuration().todict())
)
| 30.448276 | 64 | 0.625708 | [
"BSD-3-Clause"
] | gagdiez/WPI_WMQL_t | setup.py | 1,766 | Python |
from django.conf.urls import url
from visitorcounter import views
urlpatterns = [
url(r'^$', views.index),
]
| 16.285714 | 32 | 0.710526 | [
"Unlicense"
] | benjaminbrinkman/itchallenge | visitorcounter/urls.py | 114 | Python |
import os
import pandas
import numpy as np
from numpy.random import default_rng
import cv2
from time import time_ns
from datetime import datetime, timedelta
from PIL import Image
class Imagebot:
def __init__(self, queue="./queue", sourcedir="./source", index="index.csv",
min_queue_length=240, images_to_autoadd=24):
self.queue = queue
self.sourcedir = sourcedir
self.index = pandas.read_csv(index)
self.min_queue_length = min_queue_length
self.images_to_autoadd = images_to_autoadd
def get_specific_image(self, key, frame):
# 1. Find the first file in the source directory that matches the key
# If none, go with the first file in the source directory
files = os.listdir(self.sourcedir)
file = files[0]
for file_check in files:
if key in file_check:
file = file_check
break
filepath = os.path.join(self.sourcedir, file)
# 2. Extract the frame
video = cv2.VideoCapture(filepath)
video.set(1, frame) # Set the frame
ret, im = video.read()
# 3. Return the result
return im
def get_random_image(self):
# Returns the image data from a random clip in the source files
# 1. Pick a clip (row) from the index
clip = default_rng().integers(0, self.index.shape[0])
# 2. Extract the data from the row
key = self.index.iloc[clip]["key"]
clip_start = self.index.iloc[clip]["clip_start"]
clip_end = self.index.iloc[clip]["clip_end"]
# 3. Pick a random frame from the clip
frame = default_rng().integers(clip_start, clip_end+1)
# 4. Return the result
return self.get_specific_image(key, frame)
@staticmethod
def rgb_correction(im):
# CV2 package switches the red and blue channels for some reason, correct them here
b, g, r = Image.fromarray(im).split()
image = Image.merge("RGB", (r, g, b))
return image
def populate_queue(self, n=1):
# Add n images to the queue
print("Retreiving", n, "images")
start_time = time_ns()
for i in np.arange(n)+1:
im = self.get_random_image()
image = self.rgb_correction(im)
filename = "New File - " + str(time_ns()) + ".png"
filepath = os.path.join(self.queue, filename)
image.save(filepath)
print(i, filepath)
end_time = time_ns()
delta = (end_time - start_time) / 1e9
avg = delta / n
print("Retreived", n, "images")
print(" Total time:", np.round(delta, decimals=1), "seconds")
print(" Average time:", np.round(avg, decimals=1), "seconds per image")
def autopopulate_queue(self, min_queue_length=240, images_to_add=24):
# Check the length of the queue, if it's below the specified threshold then run populate_queue()
# Defaults are good for an hourly bot (Queue is at least 10 days long, add 24 images at a time)
queue_length = len(os.listdir(self.queue))
print("There are", queue_length, "images left in the queue.")
if queue_length < min_queue_length:
print("Queue length is below threshold (min", min_queue_length, "images)")
self.populate_queue(images_to_add)
def pop_from_queue(self, dryrun=False):
# Return the raw image data from the first image in the queue & delete it
# Use this method to post the data to Twitter or some other API
files = os.listdir(self.queue)
files.sort()
file = files[0]
filepath = os.path.join(self.queue, file)
imagefile = open(filepath, "rb")
imagedata = imagefile.read()
print("Loaded data from", filepath)
if not dryrun:
os.remove(filepath)
print("Removed", filepath)
else:
print("Dry run, did not remove", filepath)
return imagedata
def organize_queue(self, start=datetime.now(), interval=60, dryrun=False):
# Rename the files in the queue with timestamps of when they will be posted
# Default settings: Items in the queue are posted an hour apart, starting now
# Get the queue and sort it
files = os.listdir(self.queue)
files.sort()
# Loop through and rename the files
for i in range(len(files)):
stamp = start + timedelta(minutes=i*interval)
stamp_str = stamp.strftime("%Y-%m-%d %H:%M")
extension = os.path.splitext(files[i])[1]
src = os.path.join(self.queue, files[i])
dst = os.path.join(self.queue, stamp_str+extension)
print("os.rename("+src+", "+dst+")")
if not dryrun:
os.rename(src, dst)
| 38.373016 | 104 | 0.611169 | [
"MIT"
] | mattrogers1124/imagebot | imagebot/imagebot.py | 4,835 | Python |
# -*- coding:utf-8 -*-
class TabSetup(object):
def __init__(self, url_name='', click_css_selector='', pause_time=1, x_offset=8, y_offset=8, try_times=20):
"""
爬虫标签页设置
:param url_name:
:param click_css_selector:
:param pause_time:暂停时间
:param x_offset:x轴方向页面偏移
:param y_offset:y轴方向页面偏移
:param try_times:尝试的次数
"""
self.url_name = url_name
self.click_css_selector = click_css_selector
self.pause_time = pause_time
self.x_offset = x_offset
self.y_offset = y_offset
self.try_times = try_times
def __str__(self):#url_name与click_css_selector两者只能存在一个
if (not self.url_name and not self.click_css_selector) or (self.url_name and self.click_css_selector):
return str(None)
else:
result = vars(self).copy()
if self.url_name:
result.pop('click_css_selector')
elif self.click_css_selector:
result.pop('url_name')
return str(result)
def __eq__(self, other):
if other is None:
return (self.url_name and self.click_css_selector) or (not self.url_name and not self.click_css_selector)
else:
if vars(other) == vars(self):
return True
else:
super.__eq__(self, other)
| 34.35 | 117 | 0.598253 | [
"Apache-2.0"
] | mannuan/dspider | spider/driver/base/tabsetup.py | 1,452 | Python |
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_recognize_receipts.py
DESCRIPTION:
This sample demonstrates how to recognize and extract common fields from receipts,
using a pre-trained receipt model. For a suggested approach to extracting information
from receipts, see sample_strongly_typed_recognized_form.py.
See fields found on a receipt here:
https://aka.ms/formrecognizer/receiptfields
USAGE:
python sample_recognize_receipts.py
Set the environment variables with your own values before running the sample:
1) AZURE_FORM_RECOGNIZER_ENDPOINT - the endpoint to your Cognitive Services resource.
2) AZURE_FORM_RECOGNIZER_KEY - your Form Recognizer API key
"""
import os
class RecognizeReceiptsSample(object):
def recognize_receipts(self):
path_to_sample_forms = os.path.abspath(os.path.join(os.path.abspath(__file__),
"..", "./sample_forms/receipt/contoso-allinone.jpg"))
# [START recognize_receipts]
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer import FormRecognizerClient
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
form_recognizer_client = FormRecognizerClient(
endpoint=endpoint, credential=AzureKeyCredential(key)
)
with open(path_to_sample_forms, "rb") as f:
poller = form_recognizer_client.begin_recognize_receipts(receipt=f, locale="en-US")
receipts = poller.result()
for idx, receipt in enumerate(receipts):
print("--------Recognizing receipt #{}--------".format(idx+1))
receipt_type = receipt.fields.get("ReceiptType")
if receipt_type:
print("Receipt Type: {} has confidence: {}".format(receipt_type.value, receipt_type.confidence))
merchant_name = receipt.fields.get("MerchantName")
if merchant_name:
print("Merchant Name: {} has confidence: {}".format(merchant_name.value, merchant_name.confidence))
transaction_date = receipt.fields.get("TransactionDate")
if transaction_date:
print("Transaction Date: {} has confidence: {}".format(transaction_date.value, transaction_date.confidence))
print("Receipt items:")
for idx, item in enumerate(receipt.fields.get("Items").value):
print("...Item #{}".format(idx+1))
item_name = item.value.get("Name")
if item_name:
print("......Item Name: {} has confidence: {}".format(item_name.value, item_name.confidence))
item_quantity = item.value.get("Quantity")
if item_quantity:
print("......Item Quantity: {} has confidence: {}".format(item_quantity.value, item_quantity.confidence))
item_price = item.value.get("Price")
if item_price:
print("......Individual Item Price: {} has confidence: {}".format(item_price.value, item_price.confidence))
item_total_price = item.value.get("TotalPrice")
if item_total_price:
print("......Total Item Price: {} has confidence: {}".format(item_total_price.value, item_total_price.confidence))
subtotal = receipt.fields.get("Subtotal")
if subtotal:
print("Subtotal: {} has confidence: {}".format(subtotal.value, subtotal.confidence))
tax = receipt.fields.get("Tax")
if tax:
print("Tax: {} has confidence: {}".format(tax.value, tax.confidence))
tip = receipt.fields.get("Tip")
if tip:
print("Tip: {} has confidence: {}".format(tip.value, tip.confidence))
total = receipt.fields.get("Total")
if total:
print("Total: {} has confidence: {}".format(total.value, total.confidence))
print("--------------------------------------")
# [END recognize_receipts]
if __name__ == '__main__':
sample = RecognizeReceiptsSample()
sample.recognize_receipts()
| 47.494737 | 134 | 0.605053 | [
"MIT"
] | 4thel00z/microsoft-crap-that-doesnt-work | sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_recognize_receipts.py | 4,512 | Python |
from typing import List, Tuple
import aiosqlite
from venidium.types.blockchain_format.sized_bytes import bytes32
from venidium.util.db_wrapper import DBWrapper
import logging
log = logging.getLogger(__name__)
class HintStore:
coin_record_db: aiosqlite.Connection
db_wrapper: DBWrapper
@classmethod
async def create(cls, db_wrapper: DBWrapper):
self = cls()
self.db_wrapper = db_wrapper
self.coin_record_db = db_wrapper.db
await self.coin_record_db.execute(
"CREATE TABLE IF NOT EXISTS hints(id INTEGER PRIMARY KEY AUTOINCREMENT, coin_id blob, hint blob)"
)
await self.coin_record_db.execute("CREATE INDEX IF NOT EXISTS hint_index on hints(hint)")
await self.coin_record_db.commit()
return self
async def get_coin_ids(self, hint: bytes) -> List[bytes32]:
cursor = await self.coin_record_db.execute("SELECT * from hints WHERE hint=?", (hint,))
rows = await cursor.fetchall()
await cursor.close()
coin_ids = []
for row in rows:
coin_ids.append(row[1])
return coin_ids
async def add_hints(self, coin_hint_list: List[Tuple[bytes32, bytes]]) -> None:
cursor = await self.coin_record_db.executemany(
"INSERT INTO hints VALUES(?, ?, ?)",
[(None,) + record for record in coin_hint_list],
)
await cursor.close()
| 34.560976 | 110 | 0.666196 | [
"Apache-2.0"
] | Venidium-Network/venidium-blockchain | venidium/full_node/hint_store.py | 1,417 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class StorageInsightConfigsOperations(object):
"""StorageInsightConfigsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.loganalytics.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def create_or_update(
self,
resource_group_name, # type: str
workspace_name, # type: str
storage_insight_name, # type: str
parameters, # type: "_models.StorageInsight"
**kwargs # type: Any
):
# type: (...) -> "_models.StorageInsight"
"""Create or update a storage insight.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param storage_insight_name: Name of the storageInsightsConfigs resource.
:type storage_insight_name: str
:param parameters: The parameters required to create or update a storage insight.
:type parameters: ~azure.mgmt.loganalytics.models.StorageInsight
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageInsight, or the result of cls(response)
:rtype: ~azure.mgmt.loganalytics.models.StorageInsight
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageInsight"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=63, min_length=4, pattern=r'^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$'),
'storageInsightName': self._serialize.url("storage_insight_name", storage_insight_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'StorageInsight')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('StorageInsight', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('StorageInsight', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/storageInsightConfigs/{storageInsightName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
workspace_name, # type: str
storage_insight_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.StorageInsight"
"""Gets a storage insight instance.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param storage_insight_name: Name of the storageInsightsConfigs resource.
:type storage_insight_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageInsight, or the result of cls(response)
:rtype: ~azure.mgmt.loganalytics.models.StorageInsight
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageInsight"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=63, min_length=4, pattern=r'^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$'),
'storageInsightName': self._serialize.url("storage_insight_name", storage_insight_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageInsight', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/storageInsightConfigs/{storageInsightName}'} # type: ignore
def delete(
self,
resource_group_name, # type: str
workspace_name, # type: str
storage_insight_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Deletes a storageInsightsConfigs resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param storage_insight_name: Name of the storageInsightsConfigs resource.
:type storage_insight_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=63, min_length=4, pattern=r'^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$'),
'storageInsightName': self._serialize.url("storage_insight_name", storage_insight_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/storageInsightConfigs/{storageInsightName}'} # type: ignore
def list_by_workspace(
self,
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.StorageInsightListResult"]
"""Lists the storage insight instances within a workspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either StorageInsightListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.loganalytics.models.StorageInsightListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageInsightListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_workspace.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=63, min_length=4, pattern=r'^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('StorageInsightListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_workspace.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/storageInsightConfigs'} # type: ignore
| 50.323899 | 235 | 0.670312 | [
"MIT"
] | AFengKK/azure-sdk-for-python | sdk/loganalytics/azure-mgmt-loganalytics/azure/mgmt/loganalytics/operations/_storage_insight_configs_operations.py | 16,003 | Python |
from django import forms
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, Http404
from Manager import models as m
from . import templater
def process_request(request):
group = request.urlparams[0]
try:
members = m.Membership.objects.filter(group = group)
print(members)
users = list()
uWeights = list()
print(users)
for g in members:
users.append(g.member)
print(users)
userWeights = m.Weight.objects.filter(user__in=(users))
print(userWeights)
for i in users:
try:
uw = m.Weight.objects.filter(user = i).latest('dateWeighed')
print(uw)
uWeights.append(uw.weightLost)
except:
print('no weight')
uWeights.append('No weight entry yet')
print(uWeights)
except:
members = False
tvars = {
'members' : users,
'userWeights' : uWeights
}
return templater.render_to_response(request, 'groupMembers.html', tvars)
| 24.675676 | 73 | 0.71851 | [
"Apache-2.0"
] | bmackley/ancientassyrian | member/views/groupMembers.py | 913 | Python |
# Run the person detection model
# This version reads the images from the ov2640 camera on the esp32-cam board
# with minor changes this also works for the m5 timer camera
import sys
import microlite
import camera
from machine import Pin,PWM
# initialize the camera to read 96x96 pixel gray scale images
try:
# uncomment for esp32-cam-mb with ov2640 sensor
camera.init(0,format=camera.GRAYSCALE,framesize=camera.FRAME_96X96)
# uncomment for the m5 timer camera with ov3660 sensor
# camera.init(0,format=camera.GRAYSCALE,framesize=camera.FRAME_96X96,
# sioc=23,siod=25,xclk=27,vsync=22,href=26,pclk=21,
# d0=32,d1=35,d2=34,d3=5,d4=39,d5=18,d6=36,d7=19,
# reset=15)
except:
print("Error when initializing the camera")
sys.exit()
# initialize the flash-light LED, it is connected to GPIO 4
flash_light = PWM(Pin(4))
# switch it off
flash_light.duty(0)
# change for m5 timer camera
# # initialize the flash-light LED, it is connected to GPIO 4
# flash_light = Pin(2,Pin.OUT)
# switch it off
# flash_light.off()
mode = 1
test_image = bytearray(9612)
def handle_output(person):
if person > 10:
flash_light.duty(5)
# if m5 timer camera
# flash_light.on()
else:
flash_light.duty(0)
# if m5 timer camera
# flash_light.off()
def input_callback (microlite_interpreter):
inputTensor = microlite_interpreter.getInputTensor(0)
for i in range (0, len(test_image)):
inputTensor.setValue(i, test_image[i])
print ("setup %d bytes on the inputTensor." % (len(test_image)))
def output_callback (microlite_interpreter):
outputTensor = microlite_interpreter.getOutputTensor(0)
not_a_person = outputTensor.getValue(0)
person = outputTensor.getValue(1)
print ("'not a person' = %d, 'person' = %d" % (not_a_person, person))
handle_output(person)
# read the model
person_detection_model_file = open ('person_detect_model.tflite', 'rb')
person_detection_model = bytearray (300568)
person_detection_model_file.readinto(person_detection_model)
person_detection_model_file.close()
# create the interpreter
interp = microlite.interpreter(person_detection_model,136*1024, input_callback, output_callback)
# Permanently read images from the camera and pass them into the model for
# inference
while True:
test_image = camera.capture()
interp.invoke()
camera.deinit()
| 29.2875 | 96 | 0.745198 | [
"MIT"
] | TGiles1998/tensorflow-micropython-examples | examples/person_detection/esp32-cam/person_detection_cam.py | 2,343 | Python |
import torch
import torch.nn as nn
import torch.nn.functional as F
from .base_model import BaseModel
# F.max_pool2d needs kernel_size and stride. If only one argument is passed,
# then kernel_size = stride
from .audio import MelspectrogramStretch
from torchparse import parse_cfg
# Architecture inspiration from: https://github.com/keunwoochoi/music-auto_tagging-keras
class AudioCRNN(BaseModel):
def __init__(self, classes, config={}, state_dict=None):
super(AudioCRNN, self).__init__(config)
in_chan = 2 if config['transforms']['args']['channels'] == 'stereo' else 1
self.classes = classes
self.lstm_units = 64
self.lstm_layers = 2
self.spec = MelspectrogramStretch(hop_length=64,
num_mels=80,
fft_length=1024,
norm='whiten',
stretch_param=[0.4, 0.4])
# shape -> (channel, freq, token_time)
self.net = parse_cfg(config['cfg'], in_shape=[in_chan, self.spec.n_mels, 400])
def _many_to_one(self, t, lengths):
return t[torch.arange(t.size(0)), lengths - 1]
def modify_lengths(self, lengths):
def safe_param(elem):
return elem if isinstance(elem, int) else elem[0]
for name, layer in self.net['convs'].named_children():
#if name.startswith(('conv2d','maxpool2d')):
if isinstance(layer, (nn.Conv2d, nn.MaxPool2d)):
p, k, s = map(safe_param, [layer.padding, layer.kernel_size,layer.stride])
lengths = ((lengths + 2*p - k)//s + 1).long()
return torch.where(lengths > 0, lengths, torch.tensor(1, device=lengths.device))
def forward(self, batch):
# x-> (batch, time, channel)
x, lengths, _ = batch # unpacking seqs, lengths and srs
# x-> (batch, channel, time)
xt = x.float().transpose(1,2)
# xt -> (batch, channel, freq, time)
xt, lengths = self.spec(xt, lengths)
# (batch, channel, freq, time)
xt = self.net['convs'](xt)
lengths = self.modify_lengths(lengths)
# xt -> (batch, time, freq, channel)
x = xt.transpose(1, -1)
# xt -> (batch, time, channel*freq)
batch, time = x.size()[:2]
x = x.reshape(batch, time, -1)
x_pack = torch.nn.utils.rnn.pack_padded_sequence(x, lengths, batch_first=True)
# x -> (batch, time, lstm_out)
x_pack, hidden = self.net['recur'](x_pack)
x, _ = torch.nn.utils.rnn.pad_packed_sequence(x_pack, batch_first=True)
# (batch, lstm_out)
x = self._many_to_one(x, lengths)
# (batch, classes)
x = self.net['dense'](x)
x = F.log_softmax(x, dim=1)
return x
def predict(self, x):
with torch.no_grad():
out_raw = self.forward( x )
out = torch.exp(out_raw)
max_ind = out.argmax().item()
return self.classes[max_ind], out[:,max_ind].item()
class AudioCNN(AudioCRNN):
def forward(self, batch):
x, _, _ = batch
# x-> (batch, channel, time)
x = x.float().transpose(1,2)
# x -> (batch, channel, freq, time)
x = self.spec(x)
# (batch, channel, freq, time)
x = self.net['convs'](x)
# x -> (batch, time*freq*channel)
x = x.view(x.size(0), -1)
# (batch, classes)
x = self.net['dense'](x)
x = F.log_softmax(x, dim=1)
return x
class AudioRNN(AudioCRNN):
def forward(self, batch):
# x-> (batch, time, channel)
x, lengths, _ = batch # unpacking seqs, lengths and srs
# x-> (batch, channel, time)
x = x.float().transpose(1,2)
# x -> (batch, channel, freq, time)
x, lengths = self.spec(x, lengths)
# x -> (batch, time, freq, channel)
x = x.transpose(1, -1)
# x -> (batch, time, channel*freq)
batch, time = x.size()[:2]
x = x.reshape(batch, time, -1)
x_pack = torch.nn.utils.rnn.pack_padded_sequence(x, lengths, batch_first=True)
# x -> (batch, time, lstm_out)
x_pack, hidden = self.net['recur'](x_pack)
x, _ = torch.nn.utils.rnn.pad_packed_sequence(x_pack, batch_first=True)
# (batch, lstm_out)
x = self._many_to_one(x, lengths)
# (batch, classes)
x = self.net['dense'](x)
x = F.log_softmax(x, dim=1)
return x | 32.992806 | 91 | 0.551025 | [
"MIT"
] | Moon-sung-woo/Audio_classification_CRNN | net/model.py | 4,586 | Python |
"""
Ballastsolver
"""
"""
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Ruben de Bruin - 2019
"""
from DAVE.gui.dockwidget import *
from PySide2 import QtGui, QtCore, QtWidgets
from DAVE.gui.forms.widgetUI_ballastsolver import Ui_BallastSolver
import DAVE.scene as nodes
import DAVE.settings as ds
from DAVE.solvers.ballast import force_vessel_to_evenkeel_and_draft, BallastSystemSolver
class WidgetBallastSolver(guiDockWidget):
def guiCreate(self):
"""
Add gui components to self.contents
Do not fill the controls with actual values here. This is executed
upon creation and guiScene etc are not yet available.
"""
# or from a generated file
self.ui = Ui_BallastSolver()
self.ui.setupUi(self.contents)
self._vesselNode = None
self._bs = None # selected ballast system
self.ui.pushButton.clicked.connect(self.determineRequiredBallast)
self.ui.pushButton_2.clicked.connect(self.solveBallast)
self.ui.doubleSpinBox.valueChanged.connect(self.determineRequiredBallast)
def guiProcessEvent(self, event):
"""
Add processing that needs to be done.
After creation of the widget this event is called with guiEventType.FULL_UPDATE
"""
if event in [guiEventType.FULL_UPDATE, guiEventType.SELECTION_CHANGED]:
self.ballast_system_selected()
def guiDefaultLocation(self):
return QtCore.Qt.DockWidgetArea.LeftDockWidgetArea
# ======
def assert_selection_valid(self):
try:
self.guiScene[self._bs.name]
return True
except:
print('Please select a ballast system first')
return False
def ballast_system_selected(self):
if self.guiSelection:
if isinstance(self.guiSelection[0], nodes.BallastSystem):
self._bs = self.guiSelection[0]
self._vesselNode = self._bs.parent
self.ui.label_4.setText(self._vesselNode.name)
def determineRequiredBallast(self):
if not self.assert_selection_valid():
return
code = 'from DAVE.solvers.ballast import force_vessel_to_evenkeel_and_draft'
code += '\ns["{}"].empty_all_usable_tanks()'.format(self._bs.name)
code += '\ns.required_ballast = force_vessel_to_evenkeel_and_draft(scene=s,vessel="{}",z={})'.format(self._vesselNode.name, self.ui.doubleSpinBox.value())
self.guiRunCodeCallback(code, guiEventType.MODEL_STATE_CHANGED)
self.ui.tableWidget.item(0,0)
self.ui.tableWidget.setItem(0, 0, QtWidgets.QTableWidgetItem(str(-self.guiScene.required_ballast[0])))
self.ui.tableWidget.setItem(0, 1, QtWidgets.QTableWidgetItem(str(self.guiScene.required_ballast[1])))
self.ui.tableWidget.setItem(0, 2, QtWidgets.QTableWidgetItem(str(self.guiScene.required_ballast[2])))
def solveBallast(self):
if not self.assert_selection_valid():
return
code = 'from DAVE.solvers.ballast import BallastSystemSolver'
code += '\nballast_solver = BallastSystemSolver(s["{}"])\n'.format(self._bs.name)
code += 'ballast_solver.ballast_to(cogx = s.required_ballast[1], cogy = s.required_ballast[2], weight = -s.required_ballast[0])\n'
self.guiRunCodeCallback(code,guiEventType.SELECTED_NODE_MODIFIED)
def draftChanged(self):
self.determineRequiredBallast()
self.solveBallast()
| 35.330097 | 162 | 0.684804 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | HESChristof/DAVE | src/DAVE/gui/widget_ballastsolver.py | 3,639 | Python |
# Copyright (c) 2016 Shunta Saito
import os
import chainer.functions as F
import chainer.links as L
from chainer import Chain
from chainer import Variable
from chainer import cuda
from chainer import initializers
from chainer import reporter
from models.bbox_transform import bbox_transform_inv
from models.bbox_transform import clip_boxes
from models.proposal_target_layer import ProposalTargetLayer
from models.region_proposal_network import RegionProposalNetwork
from models.vgg16 import VGG16
class FasterRCNN(Chain):
type_check_enable = int(os.environ.get('CHAINER_TYPE_CHECK', '1')) != 0
def __init__(
self, trunk_class=VGG16, rpn_in_ch=512, rpn_mid_ch=512,
feat_stride=16, anchor_ratios=(0.5, 1, 2),
anchor_scales=(8, 16, 32), num_classes=21, loss_lambda=1,
rpn_delta=3, rcnn_delta=1):
w = initializers.Normal(0.01)
super(FasterRCNN, self).__init__(
trunk=trunk_class(),
RPN=RegionProposalNetwork(
rpn_in_ch, rpn_mid_ch, feat_stride, anchor_ratios,
anchor_scales, num_classes, loss_lambda, rpn_delta),
fc6=L.Linear(None, 4096, initialW=w),
fc7=L.Linear(4096, 4096, initialW=w),
cls_score=L.Linear(4096, num_classes, initialW=w),
bbox_pred=L.Linear(4096, num_classes * 4, initialW=w),
)
self._feat_stride = feat_stride
self._anchor_ratios = anchor_ratios
self._anchor_scales = anchor_scales
self._num_classes = num_classes
self.RPN.train = False
self._rcnn_train = False
self._spatial_scale = 1. / feat_stride
self._rpn_delta = rpn_delta
self._rcnn_delta = rcnn_delta
@property
def rcnn_train(self):
return self._rcnn_train
@rcnn_train.setter
def rcnn_train(self, val):
self._rcnn_train = val
if val:
self.RPN.train = not val
if self.rcnn_train or self.rpn_train:
self.trunk.train = True
else:
self.trunk.train = False
@property
def rpn_train(self):
return self.RPN.train
@rpn_train.setter
def rpn_train(self, val):
self.RPN.train = val
if val:
self._rcnn_train = not val
if self.rcnn_train or self.rpn_train:
self.trunk.train = True
else:
self.trunk.train = False
def _check_data_type_forward(self, x, img_info, gt_boxes):
assert x.shape[0] == 1
assert x.dtype.kind == 'f'
assert isinstance(x, Variable)
assert img_info.shape == (1, 2)
assert img_info.dtype.kind == 'i'
assert isinstance(img_info, Variable)
if gt_boxes is not None:
assert gt_boxes.shape[0] == 1
assert gt_boxes.shape[1] > 0
assert gt_boxes.shape[2] == 5
assert gt_boxes.dtype.kind == 'f'
assert isinstance(gt_boxes, Variable)
def __call__(self, x, img_info, gt_boxes=None):
"""Faster RCNN forward
Args:
x (:class:`~chainer.Variable`): The input image. Note that the
batchsize should be 1. So the shape should be
:math:`(1, n_channels, height, width)`.
img_info (:class:`~chainer.Variable`): The input image info. It
contains :math:`(height, width)` and the batchsize should be 1.
So the shape should be :math:`(1, 2)`.
gt_boxes (:class:`~chainer.Variable`): The ground truth bounding
boxes and its class label array. The shape should be
:math:`(1, n_gt_boxes, 5)` and the batchsize should be 1.
"""
if self.type_check_enable:
self._check_data_type_forward(x, img_info, gt_boxes)
# Use the array module of the backend of trunk model
with cuda.get_device_from_array(x.data):
xp, feature_map = self.trunk.xp, self.trunk(x)
# RPN training mode
if self.rpn_train and gt_boxes is not None:
return self.RPN(feature_map, img_info, gt_boxes)
else:
proposals, probs = self.RPN(feature_map, img_info, gt_boxes)
self.rpn_proposals = proposals
self.rpn_probs = probs
# RCNN
batch_id = xp.zeros((len(proposals), 1), dtype=xp.float32)
brois = xp.concatenate((batch_id, proposals), axis=1)
pool5 = F.roi_pooling_2d(feature_map, brois, 7, 7,
self._spatial_scale)
fc6 = F.dropout(F.relu(self.fc6(pool5)), train=self.rcnn_train)
fc7 = F.dropout(F.relu(self.fc7(fc6)), train=self.rcnn_train)
# Per class probability
cls_score = self.cls_score(fc7)
# BBox predictions
bbox_pred = self.bbox_pred(fc7)
if self.rcnn_train and gt_boxes is not None:
# Create proposal target layer if not exsist
if not hasattr(self, 'proposal_target_layer'):
self.proposal_target_layer = ProposalTargetLayer(
self._feat_stride, self._anchor_ratios,
self._anchor_scales, self._num_classes)
use_gt_boxes, bbox_reg_targets, keep_inds = \
self.proposal_target_layer(proposals, gt_boxes)
# TODO(mitmul): Remove this re-sending below vars to GPU
xp = self.RPN.xp
if xp is cuda.cupy:
use_gt_boxes = xp.asarray(use_gt_boxes)
bbox_reg_targets = xp.asarray(bbox_reg_targets)
keep_inds = xp.asarray(keep_inds)
# Select predicted scores and calc loss
cls_score = cls_score[keep_inds]
cls_labels = use_gt_boxes[:, -1].astype(xp.int32)
loss_cls = F.softmax_cross_entropy(cls_score, cls_labels)
loss_cls = loss_cls.reshape(())
cls_acc = F.accuracy(cls_score, cls_labels, -1)
# Select predicted bbox transformations and calc loss
bbox_pred = bbox_pred[keep_inds]
loss_bbox = F.huber_loss(bbox_pred, bbox_reg_targets,
self._rcnn_delta)
loss_bbox = F.sum(loss_bbox) / loss_bbox.size
loss_bbox = loss_bbox.reshape(())
loss_rcnn = loss_cls + loss_bbox
reporter.report({'loss_cls': loss_cls,
'cls_accuracy': cls_acc,
'loss_bbox': loss_bbox,
'loss_rcnn': loss_rcnn}, self)
return loss_rcnn
pred_boxes = bbox_transform_inv(proposals, bbox_pred.data)
pred_boxes = clip_boxes(pred_boxes, img_info.data[0])
return F.softmax(cls_score), pred_boxes
| 38.899441 | 79 | 0.590694 | [
"MIT"
] | Walter1218/Self_Driving_Car_ND | faster_rcnn_explorer/models/faster_rcnn.py | 6,963 | Python |
import tensorflow as tf
if tf.__version__ > '2':
import tensorflow.compat.v1 as tf
import model
def top_k_logits(logits, k):
if k == 0:
# no truncation
return logits
def _top_k():
values, _ = tf.nn.top_k(logits, k=k)
min_values = values[:, -1, tf.newaxis]
return tf.where(
logits < min_values,
tf.ones_like(logits, dtype=logits.dtype) * -1e10,
logits,
)
return tf.cond(
tf.equal(k, 0),
lambda: logits,
lambda: _top_k(),
)
def top_p_logits(logits, p):
"""Nucleus sampling"""
batch, _ = logits.shape.as_list()
sorted_logits = tf.sort(logits, direction='DESCENDING', axis=-1)
cumulative_probs = tf.cumsum(tf.nn.softmax(sorted_logits, axis=-1), axis=-1)
indices = tf.stack([
tf.range(0, batch),
# number of indices to include
tf.maximum(tf.reduce_sum(tf.cast(cumulative_probs <= p, tf.int32), axis=-1) - 1, 0),
], axis=-1)
min_values = tf.gather_nd(sorted_logits, indices)
return tf.where(
logits < min_values,
tf.ones_like(logits) * -1e10,
logits,
)
def sample_sequence(*, hparams, length, start_token=None, batch_size=None, context=None, temperature=1, top_k=0, top_p=1):
if start_token is None:
assert context is not None, 'Specify exactly one of start_token and context!'
else:
assert context is None, 'Specify exactly one of start_token and context!'
context = tf.fill([batch_size, 1], start_token)
def step(hparams, tokens, past=None):
lm_output = model.model(hparams=hparams, X=tokens, past=past, reuse=tf.AUTO_REUSE)
logits = lm_output['logits'][:, :, :hparams.n_vocab]
presents = lm_output['present']
presents.set_shape(model.past_shape(hparams=hparams, batch_size=batch_size))
return {
'logits': logits,
'presents': presents,
}
with tf.name_scope('sample_sequence'):
def body(past, prev, output):
next_outputs = step(hparams, prev, past=past)
logits = next_outputs['logits'][:, -1, :] / tf.to_float(temperature)
logits = top_k_logits(logits, k=top_k)
logits = top_p_logits(logits, p=top_p)
samples = tf.multinomial(logits, num_samples=1, output_dtype=tf.int32)
return [
next_outputs['presents'] if past is None else tf.concat([past, next_outputs['presents']], axis=-2),
samples,
tf.concat([output, samples], axis=1)
]
past, prev, output = body(None, context, context)
def cond(*args):
return True
_, _, tokens = tf.while_loop(
cond=cond, body=body,
maximum_iterations=length - 1,
loop_vars=[
past,
prev,
output
],
shape_invariants=[
tf.TensorShape(model.past_shape(hparams=hparams, batch_size=batch_size)),
tf.TensorShape([batch_size, None]),
tf.TensorShape([batch_size, None]),
],
back_prop=False,
)
return tokens
| 32.94898 | 122 | 0.578198 | [
"MIT"
] | gkswjdzz/gpt-2-model-generator | src/sample.py | 3,229 | Python |
# backend/server/apps/endpoints/serializers.py file
from rest_framework import serializers
from apps.endpoints.models import Endpoint
from apps.endpoints.models import MLAlgorithm
from apps.endpoints.models import MLAlgorithmStatus
from apps.endpoints.models import MLRequest
class EndpointSerializer(serializers.ModelSerializer):
class Meta:
model = Endpoint
read_only_fields = ("id", "name", "owner", "created_at")
fields = read_only_fields
class MLAlgorithmSerializer(serializers.ModelSerializer):
current_status = serializers.SerializerMethodField(read_only=True)
def get_current_status(self, mlalgorithm):
return MLAlgorithmStatus.objects.filter(parent_mlalgorithm=mlalgorithm).latest('created_at').status
class Meta:
model = MLAlgorithm
read_only_fields = ("id", "name", "description", "code",
"version", "owner", "created_at",
"parent_endpoint", "current_status")
fields = read_only_fields
class MLAlgorithmStatusSerializer(serializers.ModelSerializer):
class Meta:
model = MLAlgorithmStatus
read_only_fields = ("id", "active")
fields = ("id", "active", "status", "created_by", "created_at",
"parent_mlalgorithm")
class MLRequestSerializer(serializers.ModelSerializer):
class Meta:
model = MLRequest
read_only_fields = (
"id",
"input_data",
"full_response",
"response",
"created_at",
"parent_mlalgorithm",
)
fields = (
"id",
"input_data",
"full_response",
"response",
"feedback",
"created_at",
"parent_mlalgorithm",
) | 33.740741 | 107 | 0.622393 | [
"MIT"
] | netoferraz/tutorial_deploy_ml_service | backend/server/apps/endpoints/serializers.py | 1,822 | Python |
"""Utilities for generating synthetic segmentation datasets."""
import os
from typing import Tuple
from pathlib import Path
import numpy as np
from skimage.draw import random_shapes
from skimage.transform import rotate
from skimage.io import imsave
def gen_shape_image(im_size: Tuple[int, int], max_shapes: int=10, overlap: bool=False, rotation: bool=False):
# Generate an image with random shapes
img, shapes = random_shapes(im_size, max_shapes, min_size=25, max_size=150,
multichannel=False, allow_overlap=overlap)
# Find each shape and get the corresponding pixels for the label map
labels = np.zeros(im_size)
shape_map = {'circle': 1, 'rectangle': 2, 'triangle': 3}
for shape, coords in shapes:
rr, cc = coords
shape_img = img[rr[0]:rr[1], cc[0]:cc[1]]
colors = np.bincount(shape_img.ravel()).argsort()
shape_color = colors[-1] if colors[-1] != 255 else colors[-2]
shape_rr, shape_cc = np.where(shape_img == shape_color)
shape_rr += rr[0]
shape_cc += cc[0]
labels[shape_rr, shape_cc] = shape_map[shape]
# If we're rotating pick a random number between -180 and 180 and then rotate
if rotation:
angle = np.random.uniform(-180, 180)
img = rotate(img, angle, preserve_range=True, resize=True, cval=255).astype(np.int)
labels = rotate(labels, angle, preserve_range=True, resize=True).astype(np.int)
# Swap the background color to a random color to make things interesting
background = 255
while background in np.unique(img):
background = np.random.randint(0, 255)
img[img == 255] = background
return img.astype(np.int), labels.astype(np.int)
def generate_synthetic_dataset(path, num_samples: int, im_size=(256, 256),
max_shapes: int=10, overlap: bool=False, p_rotate: float=0):
path = Path(path)
img_path = path / 'images'
label_path = path / 'labels'
os.makedirs(img_path, exist_ok=True)
os.makedirs(label_path, exist_ok=True)
for i in range(num_samples):
rotation = bool(np.random.rand(1) < p_rotate)
img, labels = gen_shape_image(im_size, max_shapes, overlap, rotation)
img_name = f'{i}.png'
imsave(img_path / img_name, img)
imsave(label_path / img_name, labels)
| 39.847458 | 109 | 0.6661 | [
"MIT"
] | dylanv/unet | unet/data/synthetic_data.py | 2,351 | Python |
from .predict_binary_class import predict_binary_class
from .predict_multi_class import predict_multi_class
| 36 | 54 | 0.907407 | [
"MIT"
] | dvats/bnn_mcmc_examples | bnn_mcmc_examples/inference/__init__.py | 108 | Python |
#!/usr/bin/env python
import sys, json, yaml, requests, random, io
import pkg_resources
from termcolor import colored
from urllib import parse
# This line is for pyinstaller and the binary release
##VERSION_PARSE##
if 'version' not in vars():
version = pkg_resources.require("majime")[0].version
def getopts(argv):
opts = {}
while argv:
if argv[0][0] == '-':
try:
opts[argv[0]] = argv[1]
except:
opts[argv[0]] = ""
argv = argv[1:]
return opts
def output(content, type=""):
try:
print(str(content))
except:
output("ERROR: output contains binary or UTF-8 content that this terminal cannot render.")
sys.exit(0)
def exit_with_errror(message):
print(colored(message, "red"))
sys.exit(1)
def generate_test(url):
print ("Generate test suite from " + str(url))
try:
swagger=requests.get(url).text
data=json.loads(swagger)
title = data["info"]["title"]
host = data["host"]
basepath = data["basePath"]
scheme = data["schemes"][0]
except:
exit_with_errror("ERROR: cannot open or parse Swagger URL.")
# print ("Title: " + str(title))
print(colored("Title:", "yellow"), colored(title, "green"))
# print ("Host: " + str(host))
print(colored("Host:", "yellow"), colored(host, "green"))
# print ("Base Path: " + str(basepath))
print(colored("Base Path:", "yellow"), colored(basepath, "green"))
# print ("Scheme: " + str(scheme))
print(colored("Scheme:", "yellow"), colored(scheme, "green"))
base_url = str(scheme) + "://" + str(host) + str(basepath)
gen_file = str(title).replace(" ", "_") + "-" + str(random.randint(1000,9999)) + ".yaml"
with io.open(gen_file, encoding='utf-8', mode='w') as f:
f.write("Base: " + '"%s"' % base_url + "\n")
f.write("Tests:\n")
for api_path in data["paths"]:
#print ("Path: " + api_path)
print(colored("Path:", "yellow"), colored(api_path, "green"))
for path_method in data["paths"][api_path]:
method = str(path_method).upper()
if method == "PARAMETERS":
continue
try:
description = data["paths"][api_path][path_method]["description"]
except:
description = ""
try:
description += data["paths"][api_path][path_method]["summary"]
except:
dummy=1
try:
example = data["paths"][api_path][path_method]["parameters"][0]["schema"]["example"]
except:
example = {}
# We only want the first response
try:
response = list(data["paths"][api_path][path_method]["responses"].keys())[0]
except:
response = "200"
try:
params = data["paths"][api_path][path_method]["parameters"]
except:
params = ""
if response == "default":
response = "200"
query_parameters = []
params_str = ""
params_num = 0
for p in params:
if str(p["in"]).upper() == "QUERY":
query_parameters.append(str(p["name"]))
if params_num == 0:
params_str += str(p["name"]) + "=XXX"
else:
params_str += "&" + str(p["name"]) + "=XXX"
params_num += 1
# print ("\tMethod: " + method)
print("\t" + colored("Method:", "magenta"), colored(method, "cyan"))
# print ("\tDescription: " + description)
print("\t" + colored("Description:", "magenta"), colored(description, "cyan"))
# print ("\tQuery Parameters: " + str(query_parameters))
print("\t" + colored("Query Parameters:", "magenta"), colored(query_parameters, "cyan"))
# print ("\tExpected Response: " + str(response))
print("\t" + colored("Expected Response:", "magenta"), colored(response, "cyan"))
if params_str == "":
out_path = api_path
else:
out_path = api_path + "?" + params_str
if description == "":
f.write(' # %s\n' % method)
else:
f.write(' # %s - %s\n' % (method, description))
f.write(' - path: "%s"\n' % out_path)
f.write(' method: "%s"\n' % method)
f.write(' headers: ""\n')
if method == "POST" or method == "PUT":
f.write(' content-type: "application/json"\n')
f.write(' body: %s\n' % str(example).replace("'", "\""))
f.write(' expect-response: "%s"\n' % response)
if method == "GET":
f.write(' expect-body: "json"\n')
f.write(' \n')
f.close()
print(colored("\n%s created" % gen_file, "green"))
def perform_test(testfile, output_format, dryrun):
try:
with open(testfile, 'r') as stream:
test_data = yaml.safe_load(stream)
except:
exit_with_errror("ERROR: cannot open or parse file")
if dryrun == "yes":
output(testfile + " successfully loaded.")
majime_base = test_data["Base"]
majime_host = majime_base.split("//")[-1].split("/")[0].split('?')[0]
tests_run = 0
tests_successful = 0
tests_failed = 0
json_out = {}
json_out["tests"] = []
json_out["output"] = {}
for majime_test in test_data["Tests"]:
majime_url = majime_base + majime_test["path"]
majime_queryparams = dict(parse.parse_qsl(parse.urlsplit(majime_url).query))
majime_baseurl = majime_url.split('?')[0].split('#')[0]
majime_method = majime_test["method"]
try:
majime_ctype = majime_test["content-type"]
except:
majime_ctype = ""
try:
majime_payload1 = majime_test["body"]
majime_payload = json.dumps(majime_payload1)
except:
majime_payload = ""
majime_expect_response = majime_test["expect-response"]
headers = { 'User-Agent': "majime-%s" % version}
if majime_ctype != "":
headers["Content-Type"] = majime_ctype
try:
response = requests.request(majime_method, majime_baseurl, data=majime_payload, params=majime_queryparams, headers=headers)
except:
print ("Fatal error when running test. Is the endpoint reachable?")
sys.exit(1)
code = response.status_code
tests_run += 1
if str(majime_expect_response) == str(code):
tests_successful +=1
json_out["tests"].append(dict(method=majime_method, url=majime_url, http_response=str(code), result="OK"))
else:
tests_failed +=1
json_out["tests"].append(dict(method=majime_method, url=majime_url, http_response=str(code), http_expected_response=majime_expect_response, result="FAIL"))
if tests_failed == 0:
json_out["output"]["overall_result"] = "OK"
else:
json_out["output"]["overall_result"] = "FAIL"
json_out["output"]["successful-tests"] = str(tests_successful)
json_out["output"]["failed-tests"] = str(tests_failed)
if output_format == "json":
print(str(json_out).replace("'","\""))
else:
for t in json_out["tests"]:
print("%s %s" % (t["method"], t["url"] ))
if t["result"] == "OK":
print (colored("HTTP " + str(t["http_response"]), "green" ))
else:
print (colored("HTTP " + str(t["http_response"]) + " but expected " + str(t["http_expected_response"]), "red" ))
print("%s tests, %s successful and %s failed" % (tests_run, tests_successful, tests_failed))
if tests_failed == 0:
sys.exit(0)
else:
sys.exit(1)
def print_help():
print ('''Majime - Dead Simple API Unit Tests
Usage:
-f Load and run tests from YAML file
Example: majime -f test.yaml
-g Generate test suite (YAML) from Swagger document
Example: majime -g http://api.example.com/swagger.json
Switches:
-j JSON output for test runs
-d Dry-Run, do not execute tests - good for testing your YAML file
''')
def main():
args = getopts(sys.argv)
if '-v' in args:
output("Majime version " + str(version))
sys.exit(0)
if '-h' in args or args == {}:
print_help()
sys.exit(0)
if '-j' in args:
output_format = "json"
else:
output_format = "standard"
if '-d' in args:
dryrun = "yes"
else:
dryrun = "no"
if '-g' in args:
swagger_url = args['-g']
generate_test(swagger_url)
if '-f' in args:
test_file = args['-f']
perform_test(test_file, output_format, dryrun=dryrun)
if __name__ == '__main__':
main()
| 32.750877 | 167 | 0.52357 | [
"MIT"
] | u1i/majime | majime/__main__.py | 9,334 | Python |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-11-21 09:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cms', '0058_auto_20171110_1230'),
]
operations = [
migrations.AlterModelOptions(
name='welcomecontent',
options={'verbose_name': 'Welcome'},
),
migrations.AddField(
model_name='logo',
name='link',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='link',
name='component',
field=models.CharField(blank=True, choices=[(b'page', 'Page'), (b'project', 'Project'), (b'task', 'Task'), (b'fundraiser', 'Fundraiser'), (b'results-page', 'Results Page'), (b'news', 'News')], max_length=50, null=True, verbose_name='Component'),
),
]
| 31.966667 | 257 | 0.591241 | [
"BSD-3-Clause"
] | jayvdb/bluebottle | bluebottle/cms/migrations/0059_auto_20171121_1022.py | 959 | Python |
import pyb
led = pyb.LED(4)
brightness = 0
fade_amount = 5
while True:
led.intensity(brightness)
brightness += fade_amount
pyb.delay(30)
if brightness > 255 or brightness < 0:
fade_amount *= -1
| 15 | 41 | 0.685714 | [
"MIT"
] | Anthlis/MicroPython-Quickstart-Scripts | 01.LEDs/fade.py | 210 | Python |
import discord
from redbot.core import commands
import datetime
import aiohttp
import asyncio
import json
import re
from typing import Optional
class Conversions(getattr(commands, "Cog", object)):
"""
Gather information about various crypto currencies,
rare metals, stocks, and converts to different currencies
"""
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=["bitcoin", "BTC"])
async def btc(self, ctx, ammount: Optional[float] = 1.0, currency="USD", full: bool = True):
"""
converts from BTC to a given currency.
`[ammount]` is any number to convert the value of defaults to 1 coin
`[currency]` is the desired currency you want to convert defaults to USD
`[full]` is a True/False value whether to display just the converted amount
or the full display for the currency
"""
if ammount == 1.0:
embed = await self.crypto_embed(ctx, "BTC", ammount, currency, full)
else:
embed = await self.crypto_embed(ctx, "BTC", ammount, currency, False)
if type(embed) is str:
await ctx.send(embed)
else:
await ctx.send(embed=embed)
@commands.command(aliases=["ethereum", "ETH"])
async def eth(self, ctx, ammount: Optional[float] = 1.0, currency="USD", full: bool = True):
"""
converts from ETH to a given currency.
`[ammount]` is any number to convert the value of defaults to 1 coin
`[currency]` is the desired currency you want to convert defaults to USD
`[full]` is a True/False value whether to display just the converted amount
or the full display for the currency
"""
if ammount == 1.0:
embed = await self.crypto_embed(ctx, "ETH", ammount, currency, full)
else:
embed = await self.crypto_embed(ctx, "ETH", ammount, currency, False)
if type(embed) is str:
await ctx.send(embed)
else:
await ctx.send(embed=embed)
@commands.command(aliases=["litecoin", "LTC"])
async def ltc(self, ctx, ammount: Optional[float] = 1.0, currency="USD", full: bool = True):
"""
converts from LTC to a given currency.
`[ammount]` is any number to convert the value of defaults to 1 coin
`[currency]` is the desired currency you want to convert defaults to USD
`[full]` is a True/False value whether to display just the converted amount
or the full display for the currency
"""
if ammount == 1.0:
embed = await self.crypto_embed(ctx, "LTC", ammount, currency, full)
else:
embed = await self.crypto_embed(ctx, "LTC", ammount, currency, False)
if type(embed) is str:
await ctx.send(embed)
else:
await ctx.send(embed=embed)
@commands.command(aliases=["monero", "XMR"])
async def xmr(self, ctx, ammount: Optional[float] = 1.0, currency="USD", full: bool = True):
"""
converts from XMR to a given currency.
`[ammount]` is any number to convert the value of defaults to 1 coin
`[currency]` is the desired currency you want to convert defaults to USD
`[full]` is a True/False value whether to display just the converted amount
or the full display for the currency
"""
if ammount == 1.0:
embed = await self.crypto_embed(ctx, "XMR", ammount, currency, full)
else:
embed = await self.crypto_embed(ctx, "XMR", ammount, currency, False)
if type(embed) is str:
await ctx.send(embed)
else:
await ctx.send(embed=embed)
@commands.command(aliases=["bitcoin-cash", "BCH"])
async def bch(self, ctx, ammount: Optional[float] = 1.0, currency="USD", full: bool = True):
"""
converts from BCH to a given currency.
`[ammount]` is any number to convert the value of defaults to 1 coin
`[currency]` is the desired currency you want to convert defaults to USD
`[full]` is a True/False value whether to display just the converted amount
or the full display for the currency
"""
if ammount == 1.0:
embed = await self.crypto_embed(ctx, "BCH", ammount, currency, full)
else:
embed = await self.crypto_embed(ctx, "BCH", ammount, currency, False)
if type(embed) is str:
await ctx.send(embed)
else:
await ctx.send(embed=embed)
async def checkcoins(self, base):
link = "https://api.coinmarketcap.com/v2/ticker/"
async with aiohttp.ClientSession() as session:
async with session.get(link) as resp:
data = await resp.json()
for coin in data["data"]:
if (
base.upper() == data["data"][coin]["symbol"].upper()
or base.lower() == data["data"][coin]["name"].lower()
):
return data["data"][coin]
return None
@commands.command()
async def multicoin(self, ctx, *, coins=None):
"""
Gets the current USD value for a list of coins
`coins` must be a list of white space separated crypto coins
e.g. `[p]multicoin BTC BCH LTC ETH DASH XRP`
"""
coin_list = []
if coins is None:
async with aiohttp.ClientSession() as session:
async with session.get("https://api.coinmarketcap.com/v2/ticker/") as resp:
data = await resp.json()
for coin in data["data"]:
coin_list.append(data["data"][coin])
else:
coins = re.split("\W+", coins)
for coin in coins:
coin_list.append(await self.checkcoins(coin))
embed = discord.Embed(title="Crypto coin comparison")
if ctx.channel.permissions_for(ctx.me).embed_links:
for coin in coin_list[:25]:
if coin is not None:
msg = "1 {0} is {1:,.2f} USD".format(
coin["symbol"], float(coin["quotes"]["USD"]["price"])
)
embed.add_field(name=coin["name"], value=msg)
await ctx.send(embed=embed)
else:
msg = ""
for coin in coin_list[:25]:
if coin is not None:
msg += "1 {0} is {1:,.2f} USD\n".format(
coin["symbol"], float(coin["quotes"]["USD"]["price"])
)
await ctx.send(msg)
@commands.command()
async def crypto(
self, ctx, coin, ammount: Optional[float] = 1.0, currency="USD", full: bool = True
):
"""
Displays the latest information about a specified crypto currency
`coin` must be the name or symbol of a crypto coin
`[ammount]` is any number to convert the value of defaults to 1 coin
`[currency]` is the desired currency you want to convert defaults to USD
`[full]` is a True/False value whether to display just the converted amount
or the full display for the currency
"""
if ammount == 1.0:
embed = await self.crypto_embed(ctx, coin, ammount, currency, full)
else:
embed = await self.crypto_embed(ctx, coin, ammount, currency, False)
if type(embed) is str:
await ctx.send(embed)
else:
await ctx.send(embed=embed)
async def crypto_embed(self, ctx, coin, ammount=1.0, currency="USD", full=True):
"""Creates the embed for the crypto currency"""
coin_data = await self.checkcoins(coin)
if coin_data is None:
await ctx.send("{} is not in my list of currencies!".format(coin))
return
coin_colour = {
"Bitcoin": discord.Colour.gold(),
"Bitcoin Cash": discord.Colour.orange(),
"Ethereum": discord.Colour.dark_grey(),
"Litecoin": discord.Colour.dark_grey(),
"Monero": discord.Colour.orange(),
}
price = float(coin_data["quotes"]["USD"]["price"]) * ammount
market_cap = float(coin_data["quotes"]["USD"]["market_cap"])
volume_24h = float(coin_data["quotes"]["USD"]["volume_24h"])
coin_image = "https://s2.coinmarketcap.com/static/img/coins/128x128/{}.png".format(
coin_data["id"]
)
coin_url = "https://coinmarketcap.com/currencies/{}".format(coin_data["id"])
if currency.upper() != "USD":
conversionrate = await self.conversionrate("USD", currency.upper())
price = conversionrate * price
market_cap = conversionrate * market_cap
volume_24h = conversionrate * volume_24h
msg = "{0} {3} is **{1:,.2f} {2}**".format(
ammount, price, currency.upper(), coin_data["symbol"]
)
embed = discord.Embed(description=msg, colour=discord.Colour.dark_grey())
if coin_data["name"] in coin_colour:
embed.colour = coin_colour[coin_data["name"]]
embed.set_footer(text="As of")
embed.set_author(name=coin_data["name"], url=coin_url, icon_url=coin_image)
embed.timestamp = datetime.datetime.utcfromtimestamp(int(coin_data["last_updated"]))
if full:
hour_1 = coin_data["quotes"]["USD"]["percent_change_1h"]
hour_24 = coin_data["quotes"]["USD"]["percent_change_24h"]
days_7 = coin_data["quotes"]["USD"]["percent_change_7d"]
hour_1_emoji = "🔼" if hour_1 >= 0 else "🔽"
hour_24_emoji = "🔼" if hour_24 >= 0 else "🔽"
days_7_emoji = "🔼" if days_7 >= 0 else "🔽"
available_supply = "{0:,.2f}".format(coin_data["circulating_supply"])
try:
max_supply = "{0:,.2f}".format(coin_data["max_supply"])
except:
max_supply = "\N{INFINITY}"
total_supply = "{0:,.2f}".format(coin_data["total_supply"])
embed.set_thumbnail(url=coin_image)
embed.add_field(
name="Market Cap", value="{0:,.2f} {1}".format(market_cap, currency.upper())
)
embed.add_field(
name="24 Hour Volume", value="{0:,.2f} {1}".format(volume_24h, currency.upper())
)
embed.add_field(name="Available Supply", value=available_supply)
if max_supply is not None:
embed.add_field(name="Max Supply", value=max_supply)
embed.add_field(name="Total Supply ", value=total_supply)
embed.add_field(name="Change 1 hour " + hour_1_emoji, value="{}%".format(hour_1))
embed.add_field(name="Change 24 hours " + hour_24_emoji, value="{}%".format(hour_24))
embed.add_field(name="Change 7 days " + days_7_emoji, value="{}%".format(days_7))
if not ctx.channel.permissions_for(ctx.me).embed_links:
if full:
return (
f"{msg}\nMarket Cap: **{market_cap}**\n"
f"24 Hour Volume: **{volume_24h}**\nAvailable Supply: **{available_supply}**\n"
f"Max Supply: **{max_supply}**\nTotal Supply: **{total_supply}**\n"
f"Change 1 hour{hour_1_emoji}: **{hour_1}%**\n"
f"Change 24 hours{hour_24_emoji}: **{hour_24}%**\n"
f"Change 7 days{days_7_emoji}: **{days_7}%**\n"
)
else:
return msg
else:
return embed
@commands.command()
async def gold(self, ctx, ammount: Optional[int] = 1, currency="USD"):
"""
Converts gold in ounces to a given currency.
`ammount` must be a number of ounces to convert defaults to 1 ounce
`[currency]` must be a valid currency defaults to USD
"""
GOLD = "https://www.quandl.com/api/v3/datasets/WGC/GOLD_DAILY_{}.json?api_key=EKvr5W-sJUFVSevcpk4v"
async with aiohttp.ClientSession() as session:
async with session.get(GOLD.format(currency.upper())) as resp:
data = await resp.json()
price = (data["dataset"]["data"][0][1]) * ammount
msg = "{0} oz of Gold is {1:,.2f} {2}".format(ammount, price, currency.upper())
embed = discord.Embed(descirption="Gold", colour=discord.Colour.gold())
embed.add_field(name="Gold", value=msg)
embed.set_thumbnail(
url="https://upload.wikimedia.org/wikipedia/commons/d/d7/Gold-crystals.jpg"
)
if not ctx.channel.permissions_for(ctx.me).embed_links:
await ctx.send(msg)
else:
await ctx.send(embed=embed)
@commands.command()
async def silver(self, ctx, ammount: Optional[int] = 1, currency="USD"):
"""
Converts silver in ounces to a given currency.
`[ammount]` must be a number of ounces to convert defaults to 1 ounce
`[currency]` must be a valid currency defaults to USD
"""
SILVER = (
"https://www.quandl.com/api/v3/datasets/LBMA/SILVER.json?api_key=EKvr5W-sJUFVSevcpk4v"
)
async with aiohttp.ClientSession() as session:
async with session.get(SILVER) as resp:
data = await resp.json()
price = (data["dataset"]["data"][0][1]) * ammount
if currency != "USD":
price = await self.conversionrate("USD", currency.upper()) * price
msg = "{0} oz of Silver is {1:,.2f} {2}".format(ammount, price, currency.upper())
embed = discord.Embed(descirption="Silver", colour=discord.Colour.lighter_grey())
embed.add_field(name="Silver", value=msg)
embed.set_thumbnail(
url="https://upload.wikimedia.org/wikipedia/commons/5/55/Silver_crystal.jpg"
)
if not ctx.channel.permissions_for(ctx.me).embed_links:
await ctx.send(msg)
else:
await ctx.send(embed=embed)
@commands.command()
async def platinum(self, ctx, ammount: Optional[int] = 1, currency="USD"):
"""
Converts platinum in ounces to a given currency.
`[ammount]` must be a number of ounces to convert defaults to 1 ounce
`[currency]` must be a valid currency defaults to USD
"""
PLATINUM = "https://www.quandl.com/api/v3/datasets/JOHNMATT/PLAT.json?api_key=EKvr5W-sJUFVSevcpk4v"
async with aiohttp.ClientSession() as session:
async with session.get(PLATINUM) as resp:
data = await resp.json()
price = (data["dataset"]["data"][0][1]) * ammount
if currency != "USD":
price = await self.conversionrate("USD", currency.upper()) * price
msg = "{0} oz of Platinum is {1:,.2f} {2}".format(ammount, price, currency.upper())
embed = discord.Embed(descirption="Platinum", colour=discord.Colour.dark_grey())
embed.add_field(name="Platinum", value=msg)
embed.set_thumbnail(
url="https://upload.wikimedia.org/wikipedia/commons/6/68/Platinum_crystals.jpg"
)
if not ctx.channel.permissions_for(ctx.me).embed_links:
await ctx.send(msg)
else:
await ctx.send(embed=embed)
@commands.command(aliases=["ticker"])
async def stock(self, ctx, ticker, currency="USD"):
"""
Gets current ticker symbol price.
`<ticker>` is the ticker symbol you want to look up
`[currency]` is the currency you want to convert to defaults to USD
"""
stock = "https://www.quandl.com/api/v3/datasets/WIKI/{}.json?api_key=EKvr5W-sJUFVSevcpk4v"
async with aiohttp.ClientSession() as session:
async with session.get(stock.format(ticker.upper())) as resp:
data = await resp.json()
convertrate = 1
if currency != "USD":
convertrate = self.conversionrate("USD", currency.upper())
price = (data["dataset"]["data"][0][1]) * convertrate
msg = "{0} is {1:,.2f} {2}".format(ticker.upper(), price, currency.upper())
embed = discord.Embed(descirption="Stock Price", colour=discord.Colour.lighter_grey())
embed.add_field(name=ticker.upper(), value=msg)
if not ctx.channel.permissions_for(ctx.me).embed_links:
await ctx.send(msg)
else:
await ctx.send(embed=embed)
@commands.command(aliases=["currency"])
async def convertcurrency(
self, ctx, ammount: Optional[float] = 1.0, currency1="USD", currency2="GBP"
):
"""
Converts a value between 2 different currencies
`[ammount]` is the ammount you want to convert default is 1
`[currency1]` is the currency you have default is USD
`[currency2]` is the currency you want to convert to default is GBP
"""
currency1 = currency1.upper()
currency2 = currency2.upper()
conversion = await self.conversionrate(currency1, currency2)
if not conversion:
return await ctx.send("The currencies provided are not valid!")
conversion = conversion * ammount
await ctx.send("{0} {1} is {2:,.2f} {3}".format(ammount, currency1, conversion, currency2))
async def conversionrate(self, currency1, currency2):
"""Function to convert different currencies"""
params = {"base": currency1, "symbols": currency2}
CONVERSIONRATES = "https://api.exchangeratesapi.io/latest"
try:
async with aiohttp.ClientSession() as session:
async with session.get(CONVERSIONRATES, params=params) as resp:
data = await resp.json()
conversion = data["rates"][currency2]
return conversion
except Exception as e:
print(e)
return None
| 45.423174 | 107 | 0.580769 | [
"MIT"
] | Jintaku/Trusty-cogs | conversions/conversions.py | 18,051 | Python |
from pythonosc.udp_client import SimpleUDPClient
ip = "127.0.0.1"
port = 53000
client = SimpleUDPClient(ip, port)
client.send_message("/new", "text")
client.send_message("/cue/selected/text/format/color", [1, 1, 0, 1])
| 20.272727 | 68 | 0.721973 | [
"MIT"
] | bozi6/hello-world | osctests/testingosc.py | 223 | Python |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from wagtail.core.models import Page
from wagtail.admin.edit_handlers import FieldPanel
from wagtailtextanalysis.text_analysis import (
TextAnalysis,
KeyPhrasesField,
SentimentField,
)
class ArticlePage(Page, TextAnalysis):
wysiwyg = models.TextField(blank=True, null=True, verbose_name=_("Wysiwyg"))
key_phrases = models.TextField(blank=True)
content_panels = Page.content_panels + [
FieldPanel("wysiwyg"),
FieldPanel("key_phrases"),
]
text_analysis_fields = [KeyPhrasesField("title"), KeyPhrasesField("wysiwyg")]
def update_key_phrases(self, phrases):
self.key_phrases = " ".join(phrases)
class Comment(models.Model, TextAnalysis):
title = models.CharField(max_length=255)
content = models.TextField()
sentiment = models.DecimalField(max_digits=7, decimal_places=6, default=0)
text_analysis_fields = [SentimentField("title"), SentimentField("wysiwyg")]
def update_sentiment(self, sentiment):
self.sentiment = sentiment
| 29.837838 | 81 | 0.735507 | [
"MIT"
] | marteinn/wagtail-text-analysis | wagtailtextanalysis/tests/demosite/models.py | 1,104 | Python |
"""Analysis of a repository for needed Python updates."""
from __future__ import annotations
import logging
import subprocess
from typing import TYPE_CHECKING
from git import Repo
from neophile.analysis.base import BaseAnalyzer
from neophile.exceptions import UncommittedChangesError
from neophile.update.python import PythonFrozenUpdate
if TYPE_CHECKING:
from pathlib import Path
from typing import List, Optional
from neophile.update.base import Update
from neophile.virtualenv import VirtualEnv
__all__ = ["PythonAnalyzer"]
class PythonAnalyzer(BaseAnalyzer):
"""Analyze a tree for needed Python frozen dependency updates.
Parameters
----------
root : `pathlib.Path`
Root of the directory tree to analyze.
virtualenv : `neophile.virtualenv.VirtualEnv`, optional
Virtual environment manager.
"""
def __init__(
self, root: Path, virtualenv: Optional[VirtualEnv] = None
) -> None:
self._root = root
self._virtualenv = virtualenv
async def analyze(self, update: bool = False) -> List[Update]:
"""Analyze a tree and return needed Python frozen dependency updates.
Parameters
----------
update : `bool`, optional
If set to `True`, leave the update applied. This avoids having
to run ``make update-deps`` twice, once to see if an update is
needed and again to apply it properly.
Returns
-------
results : List[`neophile.update.base.Update`]
Will contain either no elements (no updates needed) or a single
element (an update needed).
Raises
------
neophile.exceptions.UncommittedChangesError
The repository being analyzed has uncommitted changes and
therefore cannot be checked for updates.
subprocess.CalledProcessError
Running ``make update-deps`` failed.
"""
for name in ("Makefile", "requirements/main.in"):
if not (self._root / name).exists():
return []
repo = Repo(str(self._root))
if repo.is_dirty():
msg = "Working tree contains uncommitted changes"
raise UncommittedChangesError(msg)
try:
if self._virtualenv:
self._virtualenv.run(
["make", "update-deps"],
cwd=str(self._root),
check=True,
capture_output=True,
)
else:
subprocess.run(
["make", "update-deps"],
cwd=str(self._root),
check=True,
capture_output=True,
)
except subprocess.CalledProcessError as e:
logging.error("make update-deps failed: %s%s", e.stdout, e.stderr)
return []
if not repo.is_dirty():
return []
if not update:
repo.git.restore(".")
return [
PythonFrozenUpdate(
path=self._root / "requirements",
applied=update,
virtualenv=self._virtualenv,
)
]
def name(self) -> str:
return "python"
| 30.009174 | 78 | 0.578722 | [
"MIT"
] | lsst-sqre/neophile | src/neophile/analysis/python.py | 3,271 | Python |
if 0:
for i in range(1,11):
print(i) | 16.666667 | 26 | 0.46 | [
"MIT"
] | ZhouBo20171229/- | test.py | 50 | Python |
import unittest
import zserio
from testutils import getZserioApi
class UnionConstraintsTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "constraints.zs").union_constraints
def testReadCorrectConstraints(self):
value8 = self.VALUE8_CORRECT_CONSTRAINT
writer = zserio.BitStreamWriter()
self._writeValue8(writer, value8)
reader = zserio.BitStreamReader(writer.getByteArray())
unionConstraints = self.api.UnionConstraints()
unionConstraints.read(reader)
self.assertEqual(self.api.UnionConstraints.CHOICE_value8, unionConstraints.choiceTag())
self.assertEqual(value8, unionConstraints.getValue8())
def testReadWrongValue8Constraint(self):
value8 = self.VALUE8_WRONG_CONSTRAINT
writer = zserio.BitStreamWriter()
self._writeValue8(writer, value8)
reader = zserio.BitStreamReader(writer.getByteArray())
unionConstraints = self.api.UnionConstraints()
with self.assertRaises(zserio.PythonRuntimeException):
unionConstraints.read(reader)
def testReadWrongValue16Constraint(self):
value16 = self.VALUE16_WRONG_CONSTRAINT
writer = zserio.BitStreamWriter()
self._writeValue16(writer, value16)
reader = zserio.BitStreamReader(writer.getByteArray())
unionConstraints = self.api.UnionConstraints()
with self.assertRaises(zserio.PythonRuntimeException):
unionConstraints.read(reader)
def testWriteCorrectConstraints(self):
value16 = self.VALUE16_CORRECT_CONSTRAINT
unionConstraints = self.api.UnionConstraints()
unionConstraints.setValue16(value16)
writer = zserio.BitStreamWriter()
unionConstraints.write(writer)
reader = zserio.BitStreamReader(writer.getByteArray())
readUnionConstraints = self.api.UnionConstraints.fromReader(reader)
self.assertEqual(self.api.UnionConstraints.CHOICE_value16, readUnionConstraints.choiceTag())
self.assertEqual(value16, readUnionConstraints.getValue16())
self.assertEqual(unionConstraints, readUnionConstraints)
def testWriteWrongValue8Constraint(self):
value8 = self.VALUE8_WRONG_CONSTRAINT
unionConstraints = self.api.UnionConstraints()
unionConstraints.setValue8(value8)
writer = zserio.BitStreamWriter()
with self.assertRaises(zserio.PythonRuntimeException):
unionConstraints.write(writer)
def testWriteWrongValue16Constraint(self):
value16 = self.VALUE16_WRONG_CONSTRAINT
unionConstraints = self.api.UnionConstraints()
unionConstraints.setValue16(value16)
writer = zserio.BitStreamWriter()
with self.assertRaises(zserio.PythonRuntimeException):
unionConstraints.write(writer)
def _writeValue8(self, writer, value8):
writer.writeVarSize(self.api.UnionConstraints.CHOICE_value8)
writer.writeBits(value8, 8)
def _writeValue16(self, writer, value16):
writer.writeVarSize(self.api.UnionConstraints.CHOICE_value16)
writer.writeBits(value16, 8)
VALUE8_CORRECT_CONSTRAINT = 1
VALUE8_WRONG_CONSTRAINT = 0
VALUE16_CORRECT_CONSTRAINT = 256
VALUE16_WRONG_CONSTRAINT = 255
| 38.465116 | 100 | 0.726421 | [
"BSD-3-Clause"
] | chenpeihua/zserio | test/language/constraints/python/UnionConstraintsTest.py | 3,308 | Python |
import numpy as np
import sys
path = sys.argv[1]
points = []
curves = []
polygons = []
with open(path, 'r') as f:
line = f.readline()
while line:
line = line.strip()
if len(line) <= 0:
line = f.readline()
continue
tokens = line.split(' ')
if tokens[0] == 'p':
points.append([
float(tokens[1]),
float(tokens[2])])
elif tokens[0] == 'c':
tmp = []
for i in range(1, len(tokens)):
tmp.append(int(tokens[i]))
curves.append(tmp)
elif tokens[0] == 'poly':
tmp = []
for i in range(1, len(tokens)):
tmp.append(int(tokens[i]))
polygons.append(tmp)
else:
assert(False)
line = f.readline()
points = np.array(points)
mmin = np.min(points, axis=0)
mmax = np.max(points, axis=0)
with open("test.eps", 'w') as f:
f.write("%!PS-Adobe-3.0 EPSF-3.0\n")
f.write("%%BoundingBox: {} {} {} {}\n\n".format(mmin[0], mmin[1], mmax[0], mmax[1]))
f.write("%%Pages: 1\n")
f.write("%%Page: 1 1\n")
f.write("/show-ctr {\ndup stringwidth pop\n -2 div 0\n rmoveto show\n} def\n\n 2 setlinejoin\n\n")
f.write("255 0 0 setrgbcolor\n")
f.write("1 setlinewidth\n\n")
for poly in polygons:
first = True
for curve_idx in poly:
curve = curves[curve_idx]
if first:
f.write("{} {} moveto\n".format(points[curve[0], 0], points[curve[0], 1]))
first = False
if len(curve) == 4:
f.write("{} {} {} {} {} {} curveto\n".format(
points[curve[1], 0], points[curve[1], 1],
points[curve[2], 0], points[curve[2], 1],
points[curve[3], 0], points[curve[3], 1]))
elif len(curve) == 2:
f.write("{} {} lineto\n".format(
points[curve[1], 0], points[curve[1], 1]))
else:
print(curve)
assert(False)
f.write("stroke\n\n\n")
| 28.662162 | 102 | 0.466289 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | Yixin-Hu/TriWild | python/toeps.py | 2,121 | Python |
import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import http, validate
from streamlink.stream import HTTPStream, HDSStream, RTMPStream
MEDIA_URL = "http://www.ardmediathek.de/play/media/{0}"
SWF_URL = "http://www.ardmediathek.de/ard/static/player/base/flash/PluginFlash.swf"
HDCORE_PARAMETER = "?hdcore=3.3.0"
QUALITY_MAP = {
"auto": "auto",
3: "544p",
2: "360p",
1: "288p",
0: "144p"
}
_url_re = re.compile(r"http(s)?://(\w+\.)?ardmediathek.de/tv")
_media_id_re = re.compile(r"/play/config/(\d+)")
_media_schema = validate.Schema({
"_mediaArray": [{
"_mediaStreamArray": [{
validate.optional("_server"): validate.text,
"_stream": validate.any(validate.text, [validate.text]),
"_quality": validate.any(int, validate.text)
}]
}]
})
_smil_schema = validate.Schema(
validate.union({
"base": validate.all(
validate.xml_find("head/meta"),
validate.get("base"),
validate.url(scheme="http")
),
"videos": validate.all(
validate.xml_findall("body/seq/video"),
[validate.get("src")]
)
})
)
class ard_mediathek(Plugin):
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url)
def _get_http_streams(self, info):
name = QUALITY_MAP.get(info["_quality"], "vod")
urls = info["_stream"]
if not isinstance(info["_stream"], list):
urls = [urls]
for url in urls:
stream = HTTPStream(self.session, url)
yield name, stream
def _get_hds_streams(self, info):
# Needs the hdcore parameter added
url = info["_stream"] + HDCORE_PARAMETER
return HDSStream.parse_manifest(self.session, url, pvswf=SWF_URL).items()
def _get_rtmp_streams(self, info):
name = QUALITY_MAP.get(info["_quality"], "live")
params = {
"rtmp": info["_server"].strip(),
"playpath": info["_stream"],
"pageUrl": self.url,
"swfVfy": SWF_URL,
"live": True
}
stream = RTMPStream(self.session, params)
yield name, stream
def _get_smil_streams(self, info):
res = http.get(info["_stream"])
smil = http.xml(res, "SMIL config", schema=_smil_schema)
for video in smil["videos"]:
url = "{0}/{1}{2}".format(smil["base"], video, HDCORE_PARAMETER)
streams = HDSStream.parse_manifest(self.session, url, pvswf=SWF_URL)
# TODO: Replace with "yield from" when dropping Python 2.
for stream in streams.items():
yield stream
def _get_streams(self):
res = http.get(self.url)
match = _media_id_re.search(res.text)
if match:
media_id = match.group(1)
else:
return
res = http.get(MEDIA_URL.format(media_id))
media = http.json(res, schema=_media_schema)
for media in media["_mediaArray"]:
for stream in media["_mediaStreamArray"]:
server = stream.get("_server", "").strip()
stream_ = stream["_stream"]
if isinstance(stream_, list):
if not stream_:
continue
stream_ = stream_[0]
stream_ = stream_.strip()
if server.startswith("rtmp://"):
parser = self._get_rtmp_streams
parser_name = "RTMP"
elif stream_.endswith(".f4m"):
parser = self._get_hds_streams
parser_name = "HDS"
elif stream_.endswith(".smil"):
parser = self._get_smil_streams
parser_name = "SMIL"
elif stream_.startswith("http"):
parser = self._get_http_streams
parser_name = "HTTP"
try:
# TODO: Replace with "yield from" when dropping Python 2.
for stream in parser(stream):
yield stream
except IOError as err:
self.logger.error("Failed to extract {0} streams: {1}",
parser_name, err)
__plugin__ = ard_mediathek
| 32.893939 | 83 | 0.546983 | [
"BSD-2-Clause"
] | nxkbd/streamlink | src/streamlink/plugins/ard_mediathek.py | 4,342 | Python |
label = 'spss'
def add_steps(steps: list, pipeline_id: str, config: dict) -> list:
steps.append(('spss.add_spss', {
'source': config['url']
}))
return steps
| 18 | 67 | 0.594444 | [
"MIT"
] | frictionlessdata/pilot-ukds | datapackage_pipelines_ukds/pipeline_steps/spss.py | 180 | Python |
import warnings
import numpy as np
from scipy import signal
from scipy import stats
import matplotlib.pylab as plt
class SpikeCalcsGeneric(object):
"""
Deals with the processing and analysis of spike data.
Parameters
----------
spike_times : array_like
The times of 'spikes' in the trial
Should be the same length as the cluster identity vector _spk_clusters
waveforms : np.array, optional
An nSpikes x nSamples array (nSamples usually 50)
Notes
-----
Units for time are provided as per the sample rate but converted
internally to milliseconds
"""
@staticmethod
def getParam(waveforms, param='Amp', t=200, fet=1):
"""
Returns the requested parameter from a spike train as a numpy array
Parameters
-------------------
waveforms - numpy array
Shape of array can be an nSpikes x nSamples
OR
a nSpikes x nElectrodes x nSamples
param - str
Valid values are:
'Amp' - peak-to-trough amplitude (default)
'P' - height of peak
'T' - depth of trough
'Vt' height at time t
'tP' - time of peak (in seconds)
'tT' - time of trough (in seconds)
'PCA' - first n fet principal components (defaults to 1)
t - int
The time used for Vt
fet - int
The number of principal components (used with param 'PCA')
"""
from scipy import interpolate
from sklearn.decomposition import PCA
if param == 'Amp':
return np.ptp(waveforms, axis=-1)
elif param == 'P':
return np.max(waveforms, axis=-1)
elif param == 'T':
return np.min(waveforms, axis=-1)
elif param == 'Vt':
times = np.arange(0, 1000, 20)
f = interpolate.interp1d(times, range(50), 'nearest')
if waveforms.ndim == 2:
return waveforms[:, int(f(t))]
elif waveforms.ndim == 3:
return waveforms[:, :, int(f(t))]
elif param == 'tP':
idx = np.argmax(waveforms, axis=-1)
m = interpolate.interp1d([0, waveforms.shape[-1]-1], [0, 1/1000.])
return m(idx)
elif param == 'tT':
idx = np.argmin(waveforms, axis=-1)
m = interpolate.interp1d([0, waveforms.shape[-1]-1], [0, 1/1000.])
return m(idx)
elif param == 'PCA':
pca = PCA(n_components=fet)
if waveforms.ndim == 2:
return pca.fit(waveforms).transform(waveforms).squeeze()
elif waveforms.ndim == 3:
out = np.zeros((waveforms.shape[0], waveforms.shape[1] * fet))
st = np.arange(0, waveforms.shape[1] * fet, fet)
en = np.arange(fet, fet + (waveforms.shape[1] * fet), fet)
rng = np.vstack((st, en))
for i in range(waveforms.shape[1]):
if ~np.any(np.isnan(waveforms[:, i, :])):
A = np.squeeze(
pca.fit(waveforms[:, i, :].squeeze()).transform(
waveforms[:, i, :].squeeze()))
if A.ndim < 2:
out[:, rng[0, i]:rng[1, i]] = np.atleast_2d(A).T
else:
out[:, rng[0, i]:rng[1, i]] = A
return out
def __init__(self, spike_times, waveforms=None, **kwargs):
self.spike_times = spike_times # IN SECONDS
self.waveforms = waveforms
self._event_ts = None # the times that events occured IN SECONDS
# vector of cluster ids, same length as spike_times
self._spk_clusters = None
# window, in seconds, either side of the stimulus, to examine
self._event_window = np.array((-0.050, 0.100))
self._stim_width = None # the width, in ms, of the stimulus
# used to increase / decrease size of bins in psth
self._secs_per_bin = 0.001
self._sample_rate = 30000
self._duration = None
self._pre_spike_samples = 18
self._post_spike_samples = 32
@property
def sample_rate(self):
return self._sample_rate
@sample_rate.setter
def sample_rate(self, value):
self._sample_rate = value
@property
def pre_spike_samples(self):
return self._pre_spike_samples
@pre_spike_samples.setter
def pre_spike_samples(self, value):
self._pre_spike_samples = int(self._pre_spike_samples)
@property
def post_spike_samples(self):
return self._post_spike_samples
@post_spike_samples.setter
def post_spike_samples(self, value):
self._post_spike_samples = int(self._post_spike_samples)
def n_spikes(self, cluster=None):
if cluster is None:
return len(self.spike_times)
else:
if self.spk_clusters is None:
warnings.warn("No clusters available, please load some.")
return
else:
return np.count_nonzero(self._spk_clusters == cluster)
@property
def event_ts(self):
return self._event_ts
@event_ts.setter
def event_ts(self, value):
self._event_ts = value
@property
def duration(self):
return self._duration
@duration.setter
def duration(self, value):
self._duration = value
@property
def spk_clusters(self):
return self._spk_clusters
@spk_clusters.setter
def spk_clusters(self, value):
self._spk_clusters = value
@property
def event_window(self):
return self._event_window
@event_window.setter
def event_window(self, value):
self._event_window = value
@property
def stim_width(self):
return self._stim_width
@stim_width.setter
def stim_width(self, value):
self._stim_width = value
@property
def _secs_per_bin(self):
return self.__secs_per_bin
@_secs_per_bin.setter
def _secs_per_bin(self, value):
self.__secs_per_bin = value
def trial_mean_fr(self, cluster: int) -> float:
# Returns the trial mean firing rate for the cluster
if self.duration is None:
raise IndexError("No duration provided, give me one!")
return self.n_spikes(cluster) / self.duration
def mean_isi_range(self, cluster: int, n: int) -> float:
"""
Calculates the mean of the autocorrelation from 0 to n milliseconds
Used to help classify a neurons type (principal, interneuron etc)
Parameters
----------
cluster : int
The cluster to analyse
n : int
The range in milliseconds to calculate the mean over
Returns
-------
mean_isi_range : float
The mean of the autocorrelogram between 0 and n milliseconds
"""
if cluster not in self.spk_clusters:
raise IndexError("Cluster not available")
bins = 201
trange = np.array((-500, 500))
t = self.spike_times[self.spk_clusters == cluster]
y = self.xcorr(t, Trange=trange)
y = y.astype(np.int64) # See xcorr docs
counts, bins = np.histogram(y[y != 0], bins=bins, range=trange)
mask = np.logical_and(bins > 0, bins < n)
return np.mean(counts[mask[1:]])
def xcorr(
self, x1: np.ndarray, x2=None,
Trange=None, **kwargs) -> np.ndarray:
"""
Calculates the histogram of the ISIs in x1 or x1 vs x2
Parameters
----------
x1, x2 : array_like
The times of the spikes emitted by the cluster(s)
NB must be signed int to accomodate negative times
Trange : array_like
Range of times to bin up. Defaults to [-500, +500] in ms
Returns
-------
y : np.ndarray
The time differences between spike times in x1 over the range
of times defined Trange
"""
if x2 is None:
x2 = x1.copy()
if Trange is None:
Trange = np.array([-500, 500])
if type(Trange) == list:
Trange = np.array(Trange)
y = []
irange = x1[:, np.newaxis] + Trange[np.newaxis, :]
dts = np.searchsorted(x2, irange)
for i, t in enumerate(dts):
y.extend(x2[t[0]:t[1]] - x1[i])
y = np.array(y, dtype=float)
return y
def getClusterWaveforms(self, cluster_id: int, channel_id: int):
"""
NB Over-ride this in the individual specialisations below
Get the waveforms for a particular cluster on a given channel
Parameters
----------
cluster_id : int
Returns
-------
np.array : the waveforms
"""
if self.waveforms is not None:
return self.waveforms[
self.spk_clusters == cluster_id,
channel_id,
:]
else:
return None
def getMeanWaveform(self, cluster_id: int, channel_id: int):
"""
Returns the mean waveform and sem for a given spike train
on a particular channel
Parameters
----------
cluster_id: int
The cluster to get the mean waveform for
Returns
-------
mn_wvs: ndarray (floats) - usually 4x50 for tetrode recordings
the mean waveforms
std_wvs: ndarray (floats) - usually 4x50 for tetrode recordings
the standard deviations of the waveforms
"""
if self.spk_clusters is not None:
if cluster_id not in self.spk_clusters:
warnings.warn('Cluster not available. Try again!')
raise IndexError("cluster_id not available")
x = self.getClusterWaveforms(cluster_id, channel_id)
if x is not None:
return np.mean(x, axis=0), np.std(x, axis=0)
else:
return None
else:
return None
def calculatePSTH(self, cluster_id, **kwargs):
"""
Calculate the PSTH of event_ts against the spiking of a cell
Parameters
----------
cluster_id : int
The cluster for which to calculate the psth
Returns
-------
x, y : list
The list of time differences between the spikes of the cluster
and the events (x) and the trials (y)
"""
if self._event_ts is None:
raise Exception("Need some event timestamps! Aborting")
if self._spk_clusters is None:
raise Exception("Need cluster identities! Aborting")
event_ts = self.event_ts
event_ts.sort()
if type(event_ts) == list:
event_ts = np.array(event_ts)
spike_times = self.spike_times[self.spk_clusters == cluster_id]
irange = event_ts[:, np.newaxis] + self.event_window[np.newaxis, :]
dts = np.searchsorted(spike_times, irange)
x = []
y = []
for i, t in enumerate(dts):
tmp = spike_times[t[0]:t[1]] - event_ts[i]
x.extend(tmp)
y.extend(np.repeat(i, len(tmp)))
return x, y
def clusterQuality(self, cluster, fet=1):
"""
returns the L-ratio and Isolation Distance measures
calculated on the principal components of the energy in a spike matrix
"""
if self.waveforms is None:
return None
nSpikes, nElectrodes, _ = self.waveforms.shape
wvs = self.waveforms.copy()
E = np.sqrt(np.nansum(self.waveforms ** 2, axis=2))
zeroIdx = np.sum(E, 0) == [0, 0, 0, 0]
E = E[:, ~zeroIdx]
wvs = wvs[:, ~zeroIdx, :]
normdWaves = (wvs.T / E.T).T
PCA_m = self.getParam(normdWaves, 'PCA', fet=fet)
badIdx = np.sum(PCA_m, axis=0) == 0
PCA_m = PCA_m[:, ~badIdx]
# get mahalanobis distance
idx = self.spk_clusters == cluster
nClustSpikes = np.count_nonzero(idx)
try:
d = self._mahal(PCA_m, PCA_m[idx, :])
# get the indices of the spikes not in the cluster
M_noise = d[~idx]
df = np.prod((fet, nElectrodes))
from scipy import stats
L = np.sum(1 - stats.chi2.cdf(M_noise, df))
L_ratio = L / nClustSpikes
# calculate isolation distance
if nClustSpikes < nSpikes / 2:
M_noise.sort()
isolation_dist = M_noise[nClustSpikes]
else:
isolation_dist = np.nan
except Exception:
isolation_dist = L_ratio = np.nan
return L_ratio, isolation_dist
def _mahal(self, u, v):
"""
gets the mahalanobis distance between two vectors u and v
a blatant copy of the Mathworks fcn as it doesn't require the
covariance matrix to be calculated which is a pain if there
are NaNs in the matrix
"""
u_sz = u.shape
v_sz = v.shape
if u_sz[1] != v_sz[1]:
warnings.warn(
'Input size mismatch: matrices must have same num of columns')
if v_sz[0] < v_sz[1]:
warnings.warn('Too few rows: v must have more rows than columns')
if np.any(np.imag(u)) or np.any(np.imag(v)):
warnings.warn('No complex inputs are allowed')
m = np.nanmean(v, axis=0)
M = np.tile(m, reps=(u_sz[0], 1))
C = v - np.tile(m, reps=(v_sz[0], 1))
_, R = np.linalg.qr(C)
ri = np.linalg.solve(R.T, (u-M).T)
d = np.sum(ri * ri, 0).T * (v_sz[0]-1)
return d
def thetaModIdx(self, x1):
"""
Calculates a theta modulation index of a spike train based on the cells
autocorrelogram
Parameters
----------
x1: np.array
The spike time-series
Returns
-------
thetaMod: float
The difference of the values at the first peak and trough of the
autocorrelogram
"""
y = self.xcorr(x1)
corr, _ = np.histogram(
y[y != 0], bins=201, range=np.array([-500, 500]))
# Take the fft of the spike train autocorr (from -500 to +500ms)
from scipy.signal import periodogram
freqs, power = periodogram(corr, fs=200, return_onesided=True)
# Smooth the power over +/- 1Hz
b = signal.boxcar(3)
h = signal.filtfilt(b, 3, power)
# Square the amplitude first
sqd_amp = h ** 2
# Then find the mean power in the +/-1Hz band either side of that
theta_band_max_idx = np.nonzero(
sqd_amp == np.max(
sqd_amp[np.logical_and(freqs > 6, freqs < 11)]))[0][0]
# Get the mean theta band power - mtbp
mtbp = np.mean(
sqd_amp[theta_band_max_idx-1:theta_band_max_idx+1])
# Find the mean amplitude in the 2-50Hz range
other_band_idx = np.logical_and(freqs > 2, freqs < 50)
# Get the mean in the other band - mobp
mobp = np.mean(sqd_amp[other_band_idx])
# Find the ratio of these two - this is the theta modulation index
return (mtbp - mobp) / (mtbp + mobp)
def thetaModIdxV2(self, x1):
"""
This is a simpler alternative to the thetaModIdx method in that it
calculates the difference between the normalized temporal
autocorrelogram at the trough between 50-70ms and the
peak between 100-140ms over their sum (data is binned into 5ms bins)
Measure used in Cacucci et al., 2004 and Kropff et al 2015
"""
y = self.xcorr(x1)
corr, bins = np.histogram(
y[y != 0], bins=201, range=np.array([-500, 500]))
# 'close' the right-hand bin
bins = bins[0:-1]
# normalise corr so max is 1.0
corr = corr/float(np.max(corr))
thetaAntiPhase = np.min(
corr[np.logical_and(bins > 50, bins < 70)])
thetaPhase = np.max(
corr[np.logical_and(bins > 100, bins < 140)])
return (thetaPhase-thetaAntiPhase) / (thetaPhase+thetaAntiPhase)
def thetaBandMaxFreq(self, x1):
"""
Calculates the frequency with the max power in the theta band (6-12Hz)
of a spike trains autocorrelogram. Partly to look for differences
in theta frequency in different running directions a la Blair
See Welday paper - https://doi.org/10.1523/jneurosci.0712-11.2011
"""
y = self.xcorr(x1)
corr, _ = np.histogram(
y[y != 0], bins=201, range=np.array([-500, 500]))
# Take the fft of the spike train autocorr (from -500 to +500ms)
from scipy.signal import periodogram
freqs, power = periodogram(corr, fs=200, return_onesided=True)
power_masked = np.ma.MaskedArray(
power, np.logical_or(freqs < 6, freqs > 12))
return freqs[np.argmax(power_masked)]
def smoothSpikePosCount(self, x1, npos, sigma=3.0, shuffle=None):
"""
Returns a spike train the same length as num pos samples that has been
smoothed in time with a gaussian kernel M in width and standard
deviation equal to sigma
Parameters
--------------
x1 : np.array
The pos indices the spikes occured at
npos : int
The number of position samples captured
sigma : float
the standard deviation of the gaussian used to smooth the spike
train
shuffle: int
The number of seconds to shift the spike train by. Default None
Returns
-----------
smoothed_spikes : np.array
The smoothed spike train
"""
spk_hist = np.bincount(x1, minlength=npos)
if shuffle is not None:
spk_hist = np.roll(spk_hist, int(shuffle * 50))
# smooth the spk_hist (which is a temporal histogram) with a 250ms
# gaussian as with Kropff et al., 2015
h = signal.gaussian(13, sigma)
h = h / float(np.sum(h))
return signal.filtfilt(h.ravel(), 1, spk_hist)
class SpikeCalcsTetrode(SpikeCalcsGeneric):
"""
Encapsulates methods specific to the geometry inherent in tetrode-based
recordings
"""
def __init__(self, spike_times, waveforms=None, **kwargs):
super().__init__(spike_times, waveforms, ** kwargs)
def ifr_sp_corr(self, x1, speed, minSpeed=2.0, maxSpeed=40.0, sigma=3,
shuffle=False, nShuffles=100, minTime=30, plot=False):
"""
x1 : np.array
The indices of pos at which the cluster fired
speed: np.array (1 x nSamples)
instantaneous speed
minSpeed: int
speeds below this value are ignored - defaults to 2cm/s as with
Kropff et al., 2015
"""
speed = speed.ravel()
posSampRate = 50
nSamples = len(speed)
# position is sampled at 50Hz and so is 'automatically' binned into
# 20ms bins
spk_hist = np.bincount(x1, minlength=nSamples)
# smooth the spk_hist (which is a temporal histogram) with a 250ms
# gaussian as with Kropff et al., 2015
h = signal.gaussian(13, sigma)
h = h / float(np.sum(h))
# filter for low speeds
lowSpeedIdx = speed < minSpeed
highSpeedIdx = speed > maxSpeed
speed_filt = speed[~np.logical_or(lowSpeedIdx, highSpeedIdx)]
spk_hist_filt = spk_hist[~np.logical_or(lowSpeedIdx, highSpeedIdx)]
spk_sm = signal.filtfilt(h.ravel(), 1, spk_hist_filt)
sm_spk_rate = spk_sm * posSampRate
res = stats.pearsonr(sm_spk_rate, speed_filt)
if plot:
# do some fancy plotting stuff
_, sp_bin_edges = np.histogram(speed_filt, bins=50)
sp_dig = np.digitize(speed_filt, sp_bin_edges, right=True)
spks_per_sp_bin = [spk_hist_filt[
sp_dig == i] for i in range(len(sp_bin_edges))]
rate_per_sp_bin = []
for x in spks_per_sp_bin:
rate_per_sp_bin.append(np.mean(x) * posSampRate)
rate_filter = signal.gaussian(5, 1.0)
rate_filter = rate_filter / np.sum(rate_filter)
binned_spk_rate = signal.filtfilt(rate_filter, 1, rate_per_sp_bin)
# instead of plotting a scatter plot of the firing rate at each
# speed bin, plot a log normalised heatmap and overlay results
spk_binning_edges = np.linspace(
np.min(sm_spk_rate), np.max(sm_spk_rate),
len(sp_bin_edges))
speed_mesh, spk_mesh = np.meshgrid(sp_bin_edges, spk_binning_edges)
binned_rate, _, _ = np.histogram2d(
speed_filt, sm_spk_rate, bins=[
sp_bin_edges, spk_binning_edges])
# blur the binned rate a bit to make it look nicer
from ephysiopy.common.utils import blurImage
sm_binned_rate = blurImage(binned_rate, 5)
fig = plt.figure()
ax = fig.add_subplot(111)
from matplotlib.colors import LogNorm
speed_mesh = speed_mesh[:-1, :-1]
spk_mesh = spk_mesh[:-1, :-1]
ax.pcolormesh(
speed_mesh, spk_mesh, sm_binned_rate,
norm=LogNorm(), alpha=0.5, shading='nearest', edgecolors='None')
# overlay the smoothed binned rate against speed
ax.plot(sp_bin_edges, binned_spk_rate, 'r')
# do the linear regression and plot the fit too
# TODO: linear regression is broken ie not regressing the correct
# variables
lr = stats.linregress(speed_filt, sm_spk_rate)
end_point = lr.intercept + (
(sp_bin_edges[-1] - sp_bin_edges[0]) * lr.slope)
ax.plot(
[np.min(sp_bin_edges), np.max(sp_bin_edges)],
[lr.intercept, end_point], 'r--')
ax.set_xlim(np.min(sp_bin_edges), np.max(sp_bin_edges[-2]))
ax.set_ylim(0, np.nanmax(binned_spk_rate) * 1.1)
ax.set_ylabel('Firing rate(Hz)')
ax.set_xlabel('Running speed(cm/s)')
ax.set_title(
'Intercept: {0:.3f} Slope: {1:.5f}\nPearson: {2:.5f}'.format(
lr.intercept, lr.slope, lr.rvalue))
# do some shuffling of the data to see if the result is signficant
if shuffle:
# shift spikes by at least 30 seconds after trial start and
# 30 seconds before trial end
timeSteps = np.random.randint(
30 * posSampRate, nSamples - (30 * posSampRate),
nShuffles)
shuffled_results = []
for t in timeSteps:
spk_count = np.roll(spk_hist, t)
spk_count_filt = spk_count[~lowSpeedIdx]
spk_count_sm = signal.filtfilt(h.ravel(), 1, spk_count_filt)
shuffled_results.append(stats.pearsonr(
spk_count_sm, speed_filt)[0])
if plot:
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.hist(np.abs(shuffled_results), 20)
ylims = ax.get_ylim()
ax.vlines(res, ylims[0], ylims[1], 'r')
if isinstance(fig, plt.Figure):
return fig
class SpikeCalcsAxona(SpikeCalcsGeneric):
"""
Replaces SpikeCalcs from ephysiopy.dacq2py.spikecalcs
"""
def half_amp_dur(self, waveforms):
"""
Half amplitude duration of a spike
Parameters
----------
A: ndarray
An nSpikes x nElectrodes x nSamples array
Returns
-------
had: float
The half-amplitude duration for the channel (electrode) that has
the strongest (highest amplitude) signal. Units are ms
"""
from scipy import optimize
best_chan = np.argmax(np.max(np.mean(waveforms, 0), 1))
mn_wvs = np.mean(waveforms, 0)
wvs = mn_wvs[best_chan, :]
half_amp = np.max(wvs) / 2
half_amp = np.zeros_like(wvs) + half_amp
t = np.linspace(0, 1/1000., 50)
# create functions from the data using PiecewisePolynomial
from scipy.interpolate import BPoly
p1 = BPoly.from_derivatives(t, wvs[:, np.newaxis])
p2 = BPoly.from_derivatives(t, half_amp[:, np.newaxis])
xs = np.r_[t, t]
xs.sort()
x_min = xs.min()
x_max = xs.max()
x_mid = xs[:-1] + np.diff(xs) / 2
roots = set()
for val in x_mid:
root, infodict, ier, mesg = optimize.fsolve(
lambda x: p1(x)-p2(x), val, full_output=True)
if ier == 1 and x_min < root < x_max:
roots.add(root[0])
roots = list(roots)
if len(roots) > 1:
r = np.abs(np.diff(roots[0:2]))[0]
else:
r = np.nan
return r
def p2t_time(self, waveforms):
"""
The peak to trough time of a spike in ms
Parameters
----------
cluster: int
the cluster whose waveforms are to be analysed
Returns
-------
p2t: float
The mean peak-to-trough time for the channel (electrode) that has
the strongest (highest amplitude) signal. Units are ms
"""
best_chan = np.argmax(np.max(np.mean(waveforms, 0), 1))
tP = self.getParam(waveforms, param='tP')
tT = self.getParam(waveforms, param='tT')
mn_tP = np.mean(tP, 0)
mn_tT = np.mean(tT, 0)
p2t = np.abs(mn_tP[best_chan] - mn_tT[best_chan])
return p2t * 1000
def plotClusterSpace(
self, waveforms, param='Amp', clusts=None, bins=256, **kwargs):
"""
TODO: aspect of plot boxes in ImageGrid not right as scaled by range of
values now
"""
from ephysiopy.dacq2py.tintcolours import colours as tcols
import matplotlib.colors as colors
from itertools import combinations
from mpl_toolkits.axes_grid1 import ImageGrid
self.scaling = np.full(4, 15)
amps = self.getParam(waveforms, param=param)
bad_electrodes = np.setdiff1d(
np.array(range(4)), np.array(np.sum(amps, 0).nonzero())[0])
cmap = np.tile(tcols[0], (bins, 1))
cmap[0] = (1, 1, 1)
cmap = colors.ListedColormap(cmap)
cmap._init()
alpha_vals = np.ones(cmap.N+3)
alpha_vals[0] = 0
cmap._lut[:, -1] = alpha_vals
cmb = combinations(range(4), 2)
if 'fig' in kwargs:
fig = kwargs['fig']
else:
fig = plt.figure(figsize=(8, 6))
grid = ImageGrid(
fig, 111, nrows_ncols=(2, 3), axes_pad=0.1, aspect=False)
if 'Amp' in param:
myRange = np.vstack((self.scaling*0, self.scaling*2))
else:
myRange = None
clustCMap0 = np.tile(tcols[0], (bins, 1))
clustCMap0[0] = (1, 1, 1)
clustCMap0 = colors.ListedColormap(clustCMap0)
clustCMap0._init()
clustCMap0._lut[:, -1] = alpha_vals
for i, c in enumerate(cmb):
if c not in bad_electrodes:
h, ye, xe = np.histogram2d(
amps[:, c[0]], amps[:, c[1]],
range=myRange[:, c].T, bins=bins)
x, y = np.meshgrid(xe[0:-1], ye[0:-1])
grid[i].pcolormesh(x, y, h, cmap=clustCMap0, shading='nearest', edgecolors='face')
h, ye, xe = np.histogram2d(
amps[:, c[0]], amps[:, c[1]],
range=myRange[:, c].T, bins=bins)
clustCMap = np.tile(
tcols[1], (bins, 1))
clustCMap[0] = (1, 1, 1)
clustCMap = colors.ListedColormap(clustCMap)
clustCMap._init()
clustCMap._lut[:, -1] = alpha_vals
grid[i].pcolormesh(
x, y, h, cmap=clustCMap, shading='nearest', edgecolors='face')
s = str(c[0]+1) + ' v ' + str(c[1]+1)
grid[i].text(
0.05, 0.95, s, va='top', ha='left', size='small',
color='k', transform=grid[i].transAxes)
grid[i].set_xlim(xe.min(), xe.max())
grid[i].set_ylim(ye.min(), ye.max())
plt.setp([a.get_xticklabels() for a in grid], visible=False)
plt.setp([a.get_yticklabels() for a in grid], visible=False)
return fig
'''
class SpikeCalcsProbe(SpikeCalcsGeneric):
"""
Encapsulates methods specific to probe-based recordings
"""
def __init__(self):
pass
'''
| 36.713198 | 98 | 0.560733 | [
"MIT"
] | rhayman/ephysiopy | ephysiopy/common/spikecalcs.py | 28,930 | Python |
from .sql import SQL
from .sac import SAC
from .drsac import DRSAC
| 16.75 | 24 | 0.776119 | [
"MIT"
] | bandofstraycats/dr-sac | softlearning/algorithms/__init__.py | 67 | Python |
#!/Users/akshayiyer/Dev/GitHub/udacity-dend/udacity-dend-capstone-etl/bin/python3.7
# $Id: rst2odt.py 5839 2009-01-07 19:09:28Z dkuhlman $
# Author: Dave Kuhlman <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
A front end to the Docutils Publisher, producing OpenOffice documents.
"""
import sys
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline_to_binary, default_description
from docutils.writers.odf_odt import Writer, Reader
description = ('Generates OpenDocument/OpenOffice/ODF documents from '
'standalone reStructuredText sources. ' + default_description)
writer = Writer()
reader = Reader()
output = publish_cmdline_to_binary(reader=reader, writer=writer,
description=description)
| 26.741935 | 83 | 0.757539 | [
"MIT"
] | aiyer16/udacity-dend-capstone-etl | bin/rst2odt.py | 829 | Python |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from tempfile import NamedTemporaryFile
from typing import Optional, Union
import numpy as np
import pandas as pd
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.providers.mysql.hooks.mysql import MySqlHook
from airflow.utils.decorators import apply_defaults
class MySQLToS3Operator(BaseOperator):
"""
Saves data from an specific MySQL query into a file in S3.
:param query: the sql query to be executed. If you want to execute a file, place the absolute path of it,
ending with .sql extension. (templated)
:type query: str
:param s3_bucket: bucket where the data will be stored. (templated)
:type s3_bucket: str
:param s3_key: desired key for the file. It includes the name of the file. (templated)
:type s3_key: str
:param mysql_conn_id: reference to a specific mysql database
:type mysql_conn_id: str
:param aws_conn_id: reference to a specific S3 connection
:type aws_conn_id: str
:param verify: Whether or not to verify SSL certificates for S3 connection.
By default SSL certificates are verified.
You can provide the following values:
- ``False``: do not validate SSL certificates. SSL will still be used
(unless use_ssl is False), but SSL certificates will not be verified.
- ``path/to/cert/bundle.pem``: A filename of the CA cert bundle to uses.
You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
:type verify: bool or str
:param pd_csv_kwargs: arguments to include in pd.to_csv (header, index, columns...)
:type pd_csv_kwargs: dict
:param index: whether to have the index or not in the dataframe
:type index: str
:param header: whether to include header or not into the S3 file
:type header: bool
"""
template_fields = (
's3_bucket',
's3_key',
'query',
)
template_ext = ('.sql',)
@apply_defaults
def __init__(
self,
*,
query: str,
s3_bucket: str,
s3_key: str,
mysql_conn_id: str = 'mysql_default',
aws_conn_id: str = 'aws_default',
verify: Optional[Union[bool, str]] = None,
pd_csv_kwargs: Optional[dict] = None,
index: bool = False,
header: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.query = query
self.s3_bucket = s3_bucket
self.s3_key = s3_key
self.mysql_conn_id = mysql_conn_id
self.aws_conn_id = aws_conn_id
self.verify = verify
self.pd_csv_kwargs = pd_csv_kwargs or {}
if "path_or_buf" in self.pd_csv_kwargs:
raise AirflowException('The argument path_or_buf is not allowed, please remove it')
if "index" not in self.pd_csv_kwargs:
self.pd_csv_kwargs["index"] = index
if "header" not in self.pd_csv_kwargs:
self.pd_csv_kwargs["header"] = header
def _fix_int_dtypes(self, df: pd.DataFrame) -> None:
"""
Mutate DataFrame to set dtypes for int columns containing NaN values."
"""
for col in df:
if "float" in df[col].dtype.name and df[col].hasnans:
# inspect values to determine if dtype of non-null values is int or float
notna_series = df[col].dropna().values
if np.isclose(notna_series, notna_series.astype(int)).all():
# set to dtype that retains integers and supports NaNs
df[col] = np.where(df[col].isnull(), None, df[col])
df[col] = df[col].astype(pd.Int64Dtype())
def execute(self, context) -> None:
mysql_hook = MySqlHook(mysql_conn_id=self.mysql_conn_id)
s3_conn = S3Hook(aws_conn_id=self.aws_conn_id, verify=self.verify)
data_df = mysql_hook.get_pandas_df(self.query)
self.log.info("Data from MySQL obtained")
self._fix_int_dtypes(data_df)
with NamedTemporaryFile(mode='r+', suffix='.csv') as tmp_csv:
data_df.to_csv(tmp_csv.name, **self.pd_csv_kwargs)
s3_conn.load_file(filename=tmp_csv.name, key=self.s3_key, bucket_name=self.s3_bucket)
if s3_conn.check_for_key(self.s3_key, bucket_name=self.s3_bucket):
file_location = os.path.join(self.s3_bucket, self.s3_key)
self.log.info("File saved correctly in %s", file_location)
| 40.839695 | 109 | 0.67028 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | alphasights/airflow | airflow/providers/amazon/aws/transfers/mysql_to_s3.py | 5,350 | Python |
class Vec2:
def __init__(self, x, y):
self.x = x
self.y = y
@property
def xy(self):
return self.x, self.y
| 15.888889 | 29 | 0.496503 | [
"MIT"
] | GitRenhl/PyxelControllerTest | src/vector2.py | 143 | Python |
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Lint as: python3
"""Tests for tensorflow_probability.spinoffs.oryx.util.summary."""
from absl.testing import absltest
from jax import lax
import jax.numpy as jnp
import numpy as np
from oryx.internal import test_util
from oryx.util import summary
class SummaryTest(test_util.TestCase):
def test_can_pull_out_summarized_values_in_strict_mode(self):
def f(x):
return summary.summary(x, name='x')
_, summaries = summary.get_summaries(f)(1.)
self.assertDictEqual(dict(x=1.), summaries)
def test_can_pull_out_non_dependent_values(self):
def f(x):
summary.summary(x ** 2, name='y')
return x
_, summaries = summary.get_summaries(f)(2.)
self.assertDictEqual(dict(y=4.), summaries)
def test_duplicate_names_error_in_strict_mode(self):
def f(x):
summary.summary(x, name='x')
summary.summary(x, name='x')
return x
with self.assertRaisesRegex(ValueError, 'has already been reaped: x'):
summary.get_summaries(f)(2.)
def test_can_append_to_growing_list_with_summary(self):
def f(x):
summary.summary(x + 1., name='x', mode='append')
summary.summary(x + 2., name='x', mode='append')
return x
_, summaries = summary.get_summaries(f)(2.)
self.assertSetEqual(set(summaries.keys()), {'x'})
np.testing.assert_allclose(summaries['x'], np.array([3., 4.]))
def test_can_pull_summaries_out_of_scan_in_append_mode(self):
def f(x):
def body(x, _):
summary.summary(x, name='x', mode='append')
return x + 1, ()
return lax.scan(body, x, jnp.arange(10.))[0]
value, summaries = summary.get_summaries(f)(0.)
self.assertEqual(value, 10.)
np.testing.assert_allclose(summaries['x'], np.arange(10.))
if __name__ == '__main__':
absltest.main()
| 33.726027 | 78 | 0.680747 | [
"Apache-2.0"
] | 8bitmp3/probability | spinoffs/oryx/oryx/util/summary_test.py | 2,462 | Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2014, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
import numpy as np
from numpy.testing import assert_allclose
from vispy.util.transforms import (translate, scale, rotate, ortho, frustum,
perspective)
from vispy.testing import run_tests_if_main, assert_equal
def test_transforms():
"""Test basic transforms"""
xfm = np.random.randn(4, 4).astype(np.float32)
# Do a series of rotations that should end up into the same orientation
# again, to ensure the order of computation is all correct
# i.e. if rotated would return the transposed matrix this would not work
# out (the translation part would be incorrect)
new_xfm = xfm.dot(rotate(180, (1, 0, 0)).dot(rotate(-90, (0, 1, 0))))
new_xfm = new_xfm.dot(rotate(90, (0, 0, 1)).dot(rotate(90, (0, 1, 0))))
new_xfm = new_xfm.dot(rotate(90, (1, 0, 0)))
assert_allclose(xfm, new_xfm)
new_xfm = translate((1, -1, 1)).dot(translate((-1, 1, -1))).dot(xfm)
assert_allclose(xfm, new_xfm)
new_xfm = scale((1, 2, 3)).dot(scale((1, 1. / 2., 1. / 3.))).dot(xfm)
assert_allclose(xfm, new_xfm)
# These could be more complex...
xfm = ortho(-1, 1, -1, 1, -1, 1)
assert_equal(xfm.shape, (4, 4))
xfm = frustum(-1, 1, -1, 1, -1, 1)
assert_equal(xfm.shape, (4, 4))
xfm = perspective(1, 1, -1, 1)
assert_equal(xfm.shape, (4, 4))
run_tests_if_main()
| 34.813953 | 76 | 0.637275 | [
"BSD-3-Clause"
] | izaid/vispy | vispy/util/tests/test_transforms.py | 1,497 | Python |
###############################################################################
#
# Tests for libxlsxwriter.
#
# Copyright 2014-2019, John McNamara, [email protected]
#
import base_test_class
class TestCompareXLSXFiles(base_test_class.XLSXBaseTest):
"""
Test file created with libxlsxwriter against a file created by Excel.
"""
def test_chart_up_down_bars01(self):
self.run_exe_test('test_chart_up_down_bars01')
def test_chart_up_down_bars02(self):
self.run_exe_test('test_chart_up_down_bars02')
| 25.619048 | 79 | 0.637546 | [
"BSD-3-Clause"
] | White-116/xlsxd | libxlsxwriter/test/functional/test_chart_up_down_bars.py | 538 | Python |
import importlib.util
import sys
class VendorImporter:
"""
A PEP 302 meta path importer for finding optionally-vendored
or otherwise naturally-installed packages from root_name.
"""
def __init__(self, root_name, vendored_names=(), vendor_pkg=None):
self.root_name = root_name
self.vendored_names = set(vendored_names)
self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor')
@property
def search_path(self):
"""
Search first the vendor package then as a natural package.
"""
yield self.vendor_pkg + '.'
yield ''
def _module_matches_namespace(self, fullname):
"""Figure out if the target module is vendored."""
root, base, target = fullname.partition(self.root_name + '.')
return not root and any(map(target.startswith, self.vendored_names))
def load_module(self, fullname):
"""
Iterate over the search path to locate and load fullname.
"""
root, base, target = fullname.partition(self.root_name + '.')
for prefix in self.search_path:
try:
extant = prefix + target
__import__(extant)
mod = sys.modules[extant]
sys.modules[fullname] = mod
return mod
except ImportError:
pass
else:
raise ImportError(
"The '{target}' package is required; "
"normally this is bundled with this package so if you get "
"this warning, consult the packager of your "
"distribution.".format(**locals())
)
def create_module(self, spec):
return self.load_module(spec.name)
def exec_module(self, module):
pass
def find_spec(self, fullname, path=None, target=None):
"""Return a module spec for vendored names."""
return (
importlib.util.spec_from_loader(fullname, self)
if self._module_matches_namespace(fullname) else None
)
def install(self):
"""
Install this importer into sys.meta_path if not already present.
"""
if self not in sys.meta_path:
sys.meta_path.append(self)
names = (
'packaging', 'pyparsing', 'ordered_set', 'more_itertools', 'importlib_metadata',
'zipp', 'importlib_resources', 'jaraco', 'typing_extensions', 'nspektr',
)
VendorImporter(__name__, names, 'setuptools._vendor').install()
| 32.649351 | 84 | 0.604614 | [
"Unlicense"
] | MARTIN-OMOLLO/PITCH | virtual/lib/python3.8/site-packages/setuptools/extern/__init__.py | 2,514 | Python |
from django.db import models
from django.contrib.auth import models as authmodels
from django.conf import settings
import os.path
# Models for file attachments uploaded to the site
# basically just a simple container for files
# but allowing for replacement of previously uploaded files
class Attachment(models.Model):
file = models.FileField(upload_to=settings.ATTACHMENT_LOCATION)
description = models.TextField(blank=True)
date = models.DateField(auto_now=True)
uploader = models.ForeignKey(authmodels.User)
def get_absolute_url(self):
return self.file.url
def get_filename(self):
return self.file.name
def __str__(self):
return self.file.name
| 26.185185 | 67 | 0.751061 | [
"BSD-3-Clause"
] | Signbank/Global-signbank | signbank/attachments/models.py | 707 | Python |
import pandas as pd
import numpy as np
import math
from pandasql import sqldf
def add_expression_column(table, new_cols, formulas, expr_type='sqlite'):
_table = table.copy()
print(locals())
for nc, f in zip(new_cols, formulas):
if expr_type == 'sqlite':
_table[nc] = sqldf('select {f} as new_val from _table'.format(f=f))
else:
_table[nc] = _table.eval(f, engine=expr_type)
return {'out_table':_table}
def add_expression_column_if(table, new_col, conditions, values, else_value, expr_type='sqlite'):
_table = table.copy()
_condition_size = min(len(conditions), len(values))
_conditions = conditions[:_condition_size]
_values = values[:_condition_size]
if expr_type == 'sqlite':
casted_else_value = '\'' + str(else_value) + '\'' if isinstance(else_value, str) else str(else_value)
casted_values = ['\'' + str(v) + '\'' if isinstance(v, str) else str(v) for v in values]
case_statement = 'case ' + ' '.join(['''when {c} then {v}'''.format(c=c, v=v) for c, v in zip(_conditions, casted_values)]) + ' else ' + casted_else_value + ' end'
_table[new_col] = sqldf('''select {case_statement} from _table'''.format(case_statement=case_statement))
else:
_eval_conditions = [_table.eval(c, engine=expr_type) for c in _conditions]
_new_col_data = [else_value] * len(_table)
for i in range(len(table)):
_assigned = False
for ci, ec in enumerate(_eval_conditions):
if ec[i] and not _assigned:
_new_col_data[i] = _values[ci]
_assigned = True
_table[new_col] = _new_col_data
return {'out_table':_table}
| 38.5 | 171 | 0.614342 | [
"Apache-2.0"
] | GSByeon/studio | function/python/brightics/function/extraction/add_expression_column.py | 1,771 | Python |
import os
from WMCore.Configuration import Configuration
from CRABClient.UserUtilities import config, getUsernameFromCRIC
config = Configuration()
config.section_("General")
config.General.requestName = '2017_tt_SL-HDAMPdown'
config.General.transferOutputs = True
config.General.transferLogs = True
config.section_("JobType")
config.JobType.allowUndistributedCMSSW = True
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'crab_PSet_2017_tt_SL-HDAMPdown.py'
config.JobType.maxMemoryMB = 3000
config.JobType.maxJobRuntimeMin = 1800
config.JobType.numCores = 1
config.JobType.scriptExe = 'crab_script_2017_tt_SL-HDAMPdown.sh'
config.JobType.inputFiles = ['crab_script_2017_tt_SL-HDAMPdown.py',
os.path.join(os.environ['CMSSW_BASE'],'src/PhysicsTools/NanoAODTools/scripts/haddnano.py'),
]
config.JobType.outputFiles = [] #['hist.root']
config.JobType.sendPythonFolder = True
config.section_("Data")
config.Data.inputDataset = '/TTToSemiLeptonic_hdampDOWN_TuneCP5_PSweights_13TeV-powheg-pythia8/RunIIFall17NanoAODv7-PU2017_12Apr2018_Nano02Apr2020_102X_mc2017_realistic_v8-v1/NANOAODSIM'
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
if config.Data.splitting == 'FileBased':
config.Data.unitsPerJob = 1
# config.Data.totalUnits = $TOTAL_UNITS
# config.Data.userInputFiles = []
# config.Data.outLFNDirBase = '/store/user/{}/NoveCampaign/{}'.format(getUsernameFromCRIC(), "2017")
config.Data.outLFNDirBase = '/store/group/fourtop/NoveCampaign/{}'.format("2017")
config.Data.publication = True
config.Data.outputDatasetTag = 'NoveCampaign'
config.section_("Site")
config.Site.storageSite = 'T2_BE_IIHE'
| 42.5 | 186 | 0.778824 | [
"Apache-2.0"
] | NJManganelli/FourTopNAOD | Kai/crab/NANOv7_NoveCampaign/2017/crab_cfg_2017_tt_SL-HDAMPdown.py | 1,700 | Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Test tools package alone which don't fit into other tests."""
#
# (C) Pywikibot team, 2016-2017
#
# Distributed under the terms of the MIT license.
from __future__ import absolute_import, unicode_literals
import collections
import decimal
import inspect
import os.path
import subprocess
import tempfile
import warnings
try:
import mock
except ImportError as e:
mock = e
from pywikibot import tools
from pywikibot.tools import classproperty
from tests import join_xml_data_path
from tests.aspects import (
unittest, require_modules, DeprecationTestCase, TestCase, MetaTestCaseClass
)
from tests.utils import add_metaclass
class ContextManagerWrapperTestCase(TestCase):
"""Test that ContextManagerWrapper is working correctly."""
class DummyClass(object):
"""A dummy class which has some values and a close method."""
class_var = 42
def __init__(self):
"""Create instance with dummy values."""
self.instance_var = 1337
self.closed = False
def close(self):
"""Just store that it has been closed."""
self.closed = True
net = False
def test_wrapper(self):
"""Create a test instance and verify the wrapper redirects."""
obj = self.DummyClass()
wrapped = tools.ContextManagerWrapper(obj)
self.assertIs(wrapped.class_var, obj.class_var)
self.assertIs(wrapped.instance_var, obj.instance_var)
self.assertIs(wrapped._wrapped, obj)
self.assertFalse(obj.closed)
with wrapped as unwrapped:
self.assertFalse(obj.closed)
self.assertIs(unwrapped, obj)
unwrapped.class_var = 47
self.assertTrue(obj.closed)
self.assertEqual(wrapped.class_var, 47)
def test_exec_wrapper(self):
"""Check that the wrapper permits exceptions."""
wrapper = tools.ContextManagerWrapper(self.DummyClass())
self.assertFalse(wrapper.closed)
with self.assertRaisesRegex(ZeroDivisionError,
'(integer division or modulo by zero|division by zero)'):
with wrapper:
1 / 0
self.assertTrue(wrapper.closed)
class OpenArchiveTestCase(TestCase):
"""
Unit test class for tools.
The tests for open_archive requires that article-pyrus.xml* contain all
the same content after extraction. The content itself is not important.
The file article-pyrus.xml_invalid.7z is not a valid 7z file and
open_archive will fail extracting it using 7za.
"""
net = False
@classmethod
def setUpClass(cls):
"""Define base_file and original_content."""
super(OpenArchiveTestCase, cls).setUpClass()
cls.base_file = join_xml_data_path('article-pyrus.xml')
with open(cls.base_file, 'rb') as f:
cls.original_content = f.read().replace(b'\r\n', b'\n')
def _get_content(self, *args, **kwargs):
"""Use open_archive and return content using a with-statement."""
with tools.open_archive(*args, **kwargs) as f:
return f.read().replace(b'\r\n', b'\n')
def test_open_archive_normal(self):
"""Test open_archive with no compression in the standard library."""
self.assertEqual(self._get_content(self.base_file), self.original_content)
def test_open_archive_bz2(self):
"""Test open_archive with bz2 compressor in the standard library."""
self.assertEqual(self._get_content(self.base_file + '.bz2'), self.original_content)
self.assertEqual(self._get_content(self.base_file + '.bz2', use_extension=False),
self.original_content)
@require_modules('bz2file')
def test_open_archive_with_bz2file(self):
"""Test open_archive when bz2file library."""
old_bz2 = tools.bz2
try:
tools.bz2 = __import__('bz2file')
self.assertEqual(self._get_content(self.base_file + '.bz2'),
self.original_content)
self.assertEqual(self._get_content(self.base_file + '.bz2',
use_extension=False),
self.original_content)
finally:
tools.bz2 = old_bz2
def test_open_archive_without_bz2(self):
"""Test open_archive when bz2 and bz2file are not available."""
old_bz2 = tools.bz2
BZ2_IMPORT_ERROR = ('This is a fake exception message that is '
'used when bz2 and bz2file is not importable')
try:
tools.bz2 = ImportError(BZ2_IMPORT_ERROR)
self.assertRaisesRegex(ImportError,
BZ2_IMPORT_ERROR,
self._get_content,
self.base_file + '.bz2')
finally:
tools.bz2 = old_bz2
def test_open_archive_gz(self):
"""Test open_archive with gz compressor in the standard library."""
self.assertEqual(self._get_content(self.base_file + '.gz'), self.original_content)
def test_open_archive_7z(self):
"""Test open_archive with 7za if installed."""
FAILED_TO_OPEN_7ZA = 'Unexpected STDERR output from 7za '
try:
subprocess.Popen(['7za'], stdout=subprocess.PIPE).stdout.close()
except OSError:
raise unittest.SkipTest('7za not installed')
self.assertEqual(self._get_content(self.base_file + '.7z'), self.original_content)
self.assertRaisesRegex(OSError,
FAILED_TO_OPEN_7ZA,
self._get_content,
self.base_file + '_invalid.7z',
use_extension=True)
class OpenCompressedTestCase(OpenArchiveTestCase, DeprecationTestCase):
"""Test opening files with the deprecated open_compressed."""
net = False
def _get_content(self, *args, **kwargs):
"""Use open_compressed and return content using a with-statement."""
# open_archive default is True, so if it's False it's not the default
# so use the non-default of open_compressed (which is True)
if kwargs.get('use_extension') is False:
kwargs['use_extension'] = True
with tools.open_compressed(*args, **kwargs) as f:
content = f.read().replace(b'\r\n', b'\n')
self.assertOneDeprecation(self.INSTEAD)
return content
class OpenArchiveWriteTestCase(TestCase):
"""Test writing with open_archive."""
net = False
@classmethod
def setUpClass(cls):
"""Define base_file and original_content."""
super(OpenArchiveWriteTestCase, cls).setUpClass()
cls.base_file = join_xml_data_path('article-pyrus.xml')
with open(cls.base_file, 'rb') as f:
cls.original_content = f.read().replace(b'\r\n', b'\n')
def _write_content(self, suffix):
try:
fh, fn = tempfile.mkstemp(suffix)
with tools.open_archive(fn, 'wb') as f:
f.write(self.original_content)
with tools.open_archive(fn, 'rb') as f:
self.assertEqual(f.read(), self.original_content)
with open(fn, 'rb') as f:
return f.read()
finally:
os.close(fh)
os.remove(fn)
def test_invalid_modes(self):
"""Test various invalid mode configurations."""
INVALID_MODE_RA = 'Invalid mode: "ra"'
INVALID_MODE_RT = 'Invalid mode: "rt"'
INVALID_MODE_BR = 'Invalid mode: "br"'
MN_DETECTION_ONLY = 'Magic number detection only when reading'
self.assertRaisesRegex(ValueError,
INVALID_MODE_RA,
tools.open_archive,
'/dev/null', 'ra') # two modes besides
self.assertRaisesRegex(ValueError,
INVALID_MODE_RT,
tools.open_archive,
'/dev/null', 'rt') # text mode
self.assertRaisesRegex(ValueError,
INVALID_MODE_BR,
tools.open_archive,
'/dev/null', 'br') # binary at front
self.assertRaisesRegex(ValueError,
MN_DETECTION_ONLY,
tools.open_archive,
'/dev/null', 'wb', False) # writing without extension
def test_binary_mode(self):
"""Test that it uses binary mode."""
with tools.open_archive(self.base_file, 'r') as f:
self.assertEqual(f.mode, 'rb')
self.assertIsInstance(f.read(), bytes)
def test_write_archive_bz2(self):
"""Test writing a bz2 archive."""
content = self._write_content('.bz2')
with open(self.base_file + '.bz2', 'rb') as f:
self.assertEqual(content, f.read())
def test_write_archive_gz(self):
"""Test writing a gz archive."""
content = self._write_content('.gz')
self.assertEqual(content[:3], b'\x1F\x8B\x08')
def test_write_archive_7z(self):
"""Test writing an archive as a 7z archive."""
FAILED_TO_WRITE_7Z = 'It is not possible to write a 7z file.'
self.assertRaisesRegex(NotImplementedError,
FAILED_TO_WRITE_7Z,
tools.open_archive,
'/dev/null.7z',
mode='wb')
class MergeUniqueDicts(TestCase):
"""Test merge_unique_dicts."""
net = False
dct1 = {'foo': 'bar', '42': 'answer'}
dct2 = {47: 'Star', 74: 'Trek'}
dct_both = dct1.copy()
dct_both.update(dct2)
def test_single(self):
"""Test that it returns the dict itself when there is only one."""
self.assertEqual(tools.merge_unique_dicts(self.dct1), self.dct1)
self.assertEqual(tools.merge_unique_dicts(**self.dct1), self.dct1)
def test_multiple(self):
"""Test that it actually merges dicts."""
self.assertEqual(tools.merge_unique_dicts(self.dct1, self.dct2),
self.dct_both)
self.assertEqual(tools.merge_unique_dicts(self.dct2, **self.dct1),
self.dct_both)
def test_different_type(self):
"""Test that the keys can be different types."""
self.assertEqual(tools.merge_unique_dicts({'1': 'str'}, {1: 'int'}),
{'1': 'str', 1: 'int'})
def test_conflict(self):
"""Test that it detects conflicts."""
self.assertRaisesRegex(
ValueError, '42', tools.merge_unique_dicts, self.dct1, **{'42': 'bad'})
self.assertRaisesRegex(
ValueError, '42', tools.merge_unique_dicts, self.dct1, self.dct1)
self.assertRaisesRegex(
ValueError, '42', tools.merge_unique_dicts, self.dct1, **self.dct1)
class TestIsSliceWithEllipsis(TestCase):
"""Test islice_with_ellipsis."""
net = False
it = ['a', 'b', 'c', 'd', 'f']
it_null = []
def test_show_default_marker(self):
"""Test marker is shown without kwargs."""
stop = 2
it = list(tools.islice_with_ellipsis(self.it, stop))
self.assertEqual(len(it), stop + 1) # +1 to consider marker.
self.assertEqual(it[:-1], self.it[:stop])
self.assertEqual(it[-1], '…')
def test_show_custom_marker(self):
"""Test correct marker is shown with kwargs.."""
stop = 2
it = list(tools.islice_with_ellipsis(self.it, stop, marker='new'))
self.assertEqual(len(it), stop + 1) # +1 to consider marker.
self.assertEqual(it[:-1], self.it[:stop])
self.assertNotEqual(it[-1], '…')
self.assertEqual(it[-1], 'new')
def test_show_marker_with_start_stop(self):
"""Test marker is shown with start and stop without kwargs."""
start = 1
stop = 3
it = list(tools.islice_with_ellipsis(self.it, start, stop))
self.assertEqual(len(it), stop - start + 1) # +1 to consider marker.
self.assertEqual(it[:-1], self.it[start:stop])
self.assertEqual(it[-1], '…')
def test_show_custom_marker_with_start_stop(self):
"""Test marker is shown with start and stop with kwargs."""
start = 1
stop = 3
it = list(tools.islice_with_ellipsis(self.it, start, stop, marker='new'))
self.assertEqual(len(it), stop - start + 1) # +1 to consider marker.
self.assertEqual(it[:-1], self.it[start:stop])
self.assertNotEqual(it[-1], '…')
self.assertEqual(it[-1], 'new')
def test_show_marker_with_stop_zero(self):
"""Test marker is shown with stop for non empty iterable."""
stop = 0
it = list(tools.islice_with_ellipsis(self.it, stop))
self.assertEqual(len(it), stop + 1) # +1 to consider marker.
self.assertEqual(it[-1], '…')
def test_do_not_show_marker_with_stop_zero(self):
"""Test marker is shown with stop for empty iterable."""
stop = 0
it = list(tools.islice_with_ellipsis(self.it_null, stop))
self.assertEqual(len(it), stop)
def test_do_not_show_marker(self):
"""Test marker is not shown when no marker is specified."""
import itertools
stop = 2
it_1 = list(tools.islice_with_ellipsis(self.it, stop, marker=None))
it_2 = list(itertools.islice(self.it, stop))
self.assertEqual(it_1, it_2) # same behavior as islice().
def test_do_not_show_marker_when_get_all(self):
"""Test marker is not shown when all elements are retrieved."""
stop = None
it = list(tools.islice_with_ellipsis(self.it, stop))
self.assertEqual(len(it), len(self.it))
self.assertEqual(it, self.it)
self.assertNotEqual(it[-1], '…')
def test_accept_only_keyword_marker(self):
"""Test that the only kwargs accepted is 'marker'."""
GENERATOR_NOT_CALLABLE = "'generator' object is not callable"
self.assertRaisesRegex(TypeError,
GENERATOR_NOT_CALLABLE,
tools.islice_with_ellipsis(self.it, 1, t=''))
def passthrough(x):
"""Return x."""
return x
class SkipList(set):
"""Container that ignores items."""
skip_list = [1, 3]
def __contains__(self, item):
"""Override to not process some items."""
if item in self.skip_list:
return True
else:
return super(SkipList, self).__contains__(item)
class ProcessAgainList(set):
"""Container that keeps processing certain items."""
process_again_list = [1, 3]
def add(self, item):
"""Override to not add some items."""
if item in self.process_again_list:
return
else:
return super(ProcessAgainList, self).add(item)
class ContainsStopList(set):
"""Container that stops when encountering items."""
stop_list = []
def __contains__(self, item):
"""Override to stop on encountering items."""
if item in self.stop_list:
raise StopIteration
else:
return super(ContainsStopList, self).__contains__(item)
class AddStopList(set):
"""Container that stops when encountering items."""
stop_list = []
def add(self, item):
"""Override to not continue on encountering items."""
if item in self.stop_list:
raise StopIteration
else:
super(AddStopList, self).add(item)
class TestFilterUnique(TestCase):
"""Test filter_unique."""
net = False
ints = [1, 3, 2, 1, 2, 1, 2, 4, 2]
strs = [str(i) for i in ints]
decs = [decimal.Decimal(i) for i in ints]
def _test_dedup_int(self, deduped, deduper, key=None):
"""Test filter_unique results for int."""
if not key:
key = passthrough
self.assertEqual(len(deduped), 0)
self.assertEqual(next(deduper), 1)
self.assertEqual(next(deduper), 3)
if key in (hash, passthrough):
if isinstance(deduped, tools.OrderedDict):
self.assertEqual(list(deduped.keys()), [1, 3])
elif isinstance(deduped, collections.Mapping):
self.assertCountEqual(list(deduped.keys()), [1, 3])
else:
self.assertEqual(deduped, set([1, 3]))
self.assertEqual(next(deduper), 2)
self.assertEqual(next(deduper), 4)
if key in (hash, passthrough):
if isinstance(deduped, tools.OrderedDict):
self.assertEqual(list(deduped.keys()), [1, 3, 2, 4])
elif isinstance(deduped, collections.Mapping):
self.assertCountEqual(list(deduped.keys()), [1, 2, 3, 4])
else:
self.assertEqual(deduped, set([1, 2, 3, 4]))
self.assertRaises(StopIteration, next, deduper)
def _test_dedup_str(self, deduped, deduper, key=None):
"""Test filter_unique results for str."""
if not key:
key = passthrough
self.assertEqual(len(deduped), 0)
self.assertEqual(next(deduper), '1')
self.assertEqual(next(deduper), '3')
if key in (hash, passthrough):
if isinstance(deduped, collections.Mapping):
self.assertEqual(deduped.keys(), [key('1'), key('3')])
else:
self.assertEqual(deduped, set([key('1'), key('3')]))
self.assertEqual(next(deduper), '2')
self.assertEqual(next(deduper), '4')
if key in (hash, passthrough):
if isinstance(deduped, collections.Mapping):
self.assertEqual(deduped.keys(), [key(i) for i in self.strs])
else:
self.assertEqual(deduped, set(key(i) for i in self.strs))
self.assertRaises(StopIteration, next, deduper)
def test_set(self):
"""Test filter_unique with a set."""
deduped = set()
deduper = tools.filter_unique(self.ints, container=deduped)
self._test_dedup_int(deduped, deduper)
def test_dict(self):
"""Test filter_unique with a dict."""
deduped = {}
deduper = tools.filter_unique(self.ints, container=deduped)
self._test_dedup_int(deduped, deduper)
def test_OrderedDict(self):
"""Test filter_unique with a OrderedDict."""
deduped = tools.OrderedDict()
deduper = tools.filter_unique(self.ints, container=deduped)
self._test_dedup_int(deduped, deduper)
def test_int_hash(self):
"""Test filter_unique with ints using hash as key."""
deduped = set()
deduper = tools.filter_unique(self.ints, container=deduped, key=hash)
self._test_dedup_int(deduped, deduper, hash)
def test_int_id(self):
"""Test filter_unique with ints using id as key."""
deduped = set()
deduper = tools.filter_unique(self.ints, container=deduped, key=id)
self._test_dedup_int(deduped, deduper, id)
def test_obj(self):
"""Test filter_unique with objects."""
deduped = set()
deduper = tools.filter_unique(self.decs, container=deduped)
self._test_dedup_int(deduped, deduper)
def test_obj_hash(self):
"""Test filter_unique with objects using hash as key."""
deduped = set()
deduper = tools.filter_unique(self.decs, container=deduped, key=hash)
self._test_dedup_int(deduped, deduper, hash)
def test_obj_id(self):
"""Test filter_unique with objects using id as key, which fails."""
# Two objects which may be equal do not necessary have the same id.
deduped = set()
deduper = tools.filter_unique(self.decs, container=deduped, key=id)
self.assertEqual(len(deduped), 0)
for _ in self.decs:
self.assertEqual(id(next(deduper)), deduped.pop())
self.assertRaises(StopIteration, next, deduper)
# No. of Decimal with distinct ids != no. of Decimal with distinct value.
deduper_ids = list(tools.filter_unique(self.decs, key=id))
self.assertNotEqual(len(deduper_ids), len(set(deduper_ids)))
def test_str(self):
"""Test filter_unique with str."""
deduped = set()
deduper = tools.filter_unique(self.strs, container=deduped)
self._test_dedup_str(deduped, deduper)
def test_str_hash(self):
"""Test filter_unique with str using hash as key."""
deduped = set()
deduper = tools.filter_unique(self.strs, container=deduped, key=hash)
self._test_dedup_str(deduped, deduper, hash)
@unittest.skipIf(not tools.PY2,
'str in Py3 behave like objects and id as key fails')
def test_str_id(self):
"""Test str using id as key."""
deduped = set()
deduper = tools.filter_unique(self.strs, container=deduped, key=id)
self._test_dedup_str(deduped, deduper, id)
def test_for_resumable(self):
"""Test filter_unique is resumable after a for loop."""
gen2 = tools.filter_unique(self.ints)
deduped = []
for item in gen2:
deduped.append(item)
if len(deduped) == 3:
break
self.assertEqual(deduped, [1, 3, 2])
last = next(gen2)
self.assertEqual(last, 4)
self.assertRaises(StopIteration, next, gen2)
def test_skip(self):
"""Test filter_unique with a container that skips items."""
deduped = SkipList()
deduper = tools.filter_unique(self.ints, container=deduped)
deduped_out = list(deduper)
self.assertCountEqual(deduped, deduped_out)
self.assertEqual(deduped, set([2, 4]))
def test_process_again(self):
"""Test filter_unique with an ignoring container."""
deduped = ProcessAgainList()
deduper = tools.filter_unique(self.ints, container=deduped)
deduped_out = list(deduper)
self.assertEqual(deduped_out, [1, 3, 2, 1, 1, 4])
self.assertEqual(deduped, set([2, 4]))
def test_stop(self):
"""Test filter_unique with an ignoring container."""
deduped = ContainsStopList()
deduped.stop_list = [2]
deduper = tools.filter_unique(self.ints, container=deduped)
deduped_out = list(deduper)
self.assertCountEqual(deduped, deduped_out)
self.assertEqual(deduped, set([1, 3]))
# And it should not resume
self.assertRaises(StopIteration, next, deduper)
deduped = AddStopList()
deduped.stop_list = [4]
deduper = tools.filter_unique(self.ints, container=deduped)
deduped_out = list(deduper)
self.assertCountEqual(deduped, deduped_out)
self.assertEqual(deduped, set([1, 2, 3]))
# And it should not resume
self.assertRaises(StopIteration, next, deduper)
class MetaTestArgSpec(MetaTestCaseClass):
"""Metaclass to create dynamically the tests. Set the net flag to false."""
def __new__(cls, name, bases, dct):
"""Create a new test case class."""
def create_test(method):
def test_method(self):
"""Test getargspec."""
# all expect at least self and param
expected = method(1, 2)
returned = self.getargspec(method)
self.assertEqual(returned, expected)
self.assertIsInstance(returned, self.expected_class)
self.assertNoDeprecation()
return test_method
for attr, tested_method in list(dct.items()):
if attr.startswith('_method_test_'):
suffix = attr[len('_method_test_'):]
cls.add_method(dct, 'test_method_' + suffix,
create_test(tested_method),
doc_suffix='on {0}'.format(suffix))
dct['net'] = False
return super(MetaTestArgSpec, cls).__new__(cls, name, bases, dct)
@add_metaclass
class TestArgSpec(DeprecationTestCase):
"""Test getargspec and ArgSpec from tools."""
__metaclass__ = MetaTestArgSpec
expected_class = tools.ArgSpec
def _method_test_args(self, param):
"""Test method with two positional arguments."""
return (['self', 'param'], None, None, None)
def _method_test_kwargs(self, param=42):
"""Test method with one positional and one keyword argument."""
return (['self', 'param'], None, None, (42,))
def _method_test_varargs(self, param, *var):
"""Test method with two positional arguments and var args."""
return (['self', 'param'], 'var', None, None)
def _method_test_varkwargs(self, param, **var):
"""Test method with two positional arguments and var kwargs."""
return (['self', 'param'], None, 'var', None)
def _method_test_vars(self, param, *args, **kwargs):
"""Test method with two positional arguments and both var args."""
return (['self', 'param'], 'args', 'kwargs', None)
def getargspec(self, method):
"""Call tested getargspec function."""
return tools.getargspec(method)
@unittest.skipIf(tools.PYTHON_VERSION >= (3, 6), 'removed in Python 3.6')
class TestPythonArgSpec(TestArgSpec):
"""Test the same tests using Python's implementation."""
expected_class = inspect.ArgSpec
def getargspec(self, method):
"""Call inspect's getargspec function."""
with warnings.catch_warnings():
if tools.PYTHON_VERSION >= (3, 5):
warnings.simplefilter('ignore', DeprecationWarning)
return inspect.getargspec(method)
@require_modules('mock')
class TestFileModeChecker(TestCase):
"""Test parsing password files."""
net = False
def patch(self, name):
"""Patch up <name> in self.setUp."""
patcher = mock.patch(name)
self.addCleanup(patcher.stop)
return patcher.start()
def setUp(self):
"""Patch a variety of dependencies."""
super(TestFileModeChecker, self).setUp()
self.stat = self.patch('os.stat')
self.chmod = self.patch('os.chmod')
self.file = '~FakeFile'
def test_auto_chmod_for_dir(self):
"""Do not chmod files that have mode private_files_permission."""
self.stat.return_value.st_mode = 0o040600 # dir
tools.file_mode_checker(self.file, mode=0o600)
self.stat.assert_called_with(self.file)
self.assertFalse(self.chmod.called)
def test_auto_chmod_OK(self):
"""Do not chmod files that have mode private_files_permission."""
self.stat.return_value.st_mode = 0o100600 # regular file
tools.file_mode_checker(self.file, mode=0o600)
self.stat.assert_called_with(self.file)
self.assertFalse(self.chmod.called)
def test_auto_chmod_not_OK(self):
"""Chmod files that do not have mode private_files_permission."""
self.stat.return_value.st_mode = 0o100644 # regular file
tools.file_mode_checker(self.file, mode=0o600)
self.stat.assert_called_with(self.file)
self.chmod.assert_called_once_with(self.file, 0o600)
class TestFileShaCalculator(TestCase):
r"""Test calculator of sha of a file.
There are two possible hash values for each test. The second one is for
files with windows line endings (\r\n).
"""
net = False
filename = join_xml_data_path('article-pear-0.10.xml')
def setUp(self):
"""Setup tests."""
super(TestFileShaCalculator, self).setUp()
def test_md5_complete_calculation(self):
"""Test md5 of complete file."""
res = tools.compute_file_hash(self.filename, sha='md5')
self.assertIn(res, (
'5d7265e290e6733e1e2020630262a6f3',
'2c941f2fa7e6e629d165708eb02b67f7',
))
def test_md5_partial_calculation(self):
"""Test md5 of partial file (1024 bytes)."""
res = tools.compute_file_hash(self.filename, sha='md5',
bytes_to_read=1024)
self.assertIn(res, (
'edf6e1accead082b6b831a0a600704bc',
'be0227b6d490baa49e6d7e131c7f596b',
))
def test_sha1_complete_calculation(self):
"""Test sha1 of complete file."""
res = tools.compute_file_hash(self.filename, sha='sha1')
self.assertIn(res, (
'1c12696e1119493a625aa818a35c41916ce32d0c',
'146121e6d0461916c9a0fab00dc718acdb6a6b14',
))
def test_sha1_partial_calculation(self):
"""Test sha1 of partial file (1024 bytes)."""
res = tools.compute_file_hash(self.filename, sha='sha1',
bytes_to_read=1024)
self.assertIn(res, (
'e56fa7bd5cfdf6bb7e2d8649dd9216c03e7271e6',
'617ce7d539848885b52355ed597a042dae1e726f',
))
def test_sha224_complete_calculation(self):
"""Test sha224 of complete file."""
res = tools.compute_file_hash(self.filename, sha='sha224')
self.assertIn(res, (
'3d350d9d9eca074bd299cb5ffe1b325a9f589b2bcd7ba1c033ab4d33',
'4a2cf33b7da01f7b0530b2cc624e1180c8651b20198e9387aee0c767',
))
def test_sha224_partial_calculation(self):
"""Test sha224 of partial file (1024 bytes)."""
res = tools.compute_file_hash(self.filename, sha='sha224',
bytes_to_read=1024)
self.assertIn(res, (
'affa8cb79656a9b6244a079f8af91c9271e382aa9d5aa412b599e169',
'486467144e683aefd420d576250c4cc984e6d7bf10c85d36e3d249d2',
))
class Foo(object):
"""Test class to verify classproperty decorator."""
_bar = 'baz'
@classproperty
def bar(cls): # flake8: disable=N805
"""Class property method."""
return cls._bar
class TestClassProperty(TestCase):
"""Test classproperty decorator."""
net = False
def test_classproperty(self):
"""Test for classproperty decorator."""
self.assertEqual(Foo.bar, 'baz')
self.assertEqual(Foo.bar, Foo._bar)
if __name__ == '__main__': # pragma: no cover
try:
unittest.main()
except SystemExit:
pass
| 35.620164 | 93 | 0.611671 | [
"MIT"
] | nasqueron/pywikibot | tests/tools_tests.py | 30,396 | Python |
import logging
logging.basicConfig(filename='mywarninglog.txt',level=logging.WARNING)
print("Displaying Warning level demo: ")
logging.debug('Debug message')
logging.info('Info message')
logging.warning('Warning message')
logging.error('Error message')
logging.critical('Critical message')
| 30 | 71 | 0.773333 | [
"MIT"
] | bpbpublications/Programming-Techniques-using-Python | Chapter 05/Chap05_Example5.11.py | 300 | Python |
import os
import posixpath
import re
from poetry.packages.constraints.constraint import Constraint
from poetry.packages.constraints.multi_constraint import MultiConstraint
from poetry.packages.constraints.union_constraint import UnionConstraint
from poetry.semver import Version
from poetry.semver import VersionUnion
from poetry.version.markers import MarkerUnion
from poetry.version.markers import MultiMarker
from poetry.version.markers import SingleMarker
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
try:
import urllib.request as urllib2
except ImportError:
import urllib2
BZ2_EXTENSIONS = (".tar.bz2", ".tbz")
XZ_EXTENSIONS = (".tar.xz", ".txz", ".tlz", ".tar.lz", ".tar.lzma")
ZIP_EXTENSIONS = (".zip", ".whl")
TAR_EXTENSIONS = (".tar.gz", ".tgz", ".tar")
ARCHIVE_EXTENSIONS = ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS + XZ_EXTENSIONS
SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS
try:
import bz2 # noqa
SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS
except ImportError:
pass
try:
# Only for Python 3.3+
import lzma # noqa
SUPPORTED_EXTENSIONS += XZ_EXTENSIONS
except ImportError:
pass
def path_to_url(path):
"""
Convert a path to a file: URL. The path will be made absolute and have
quoted path parts.
"""
path = os.path.normpath(os.path.abspath(path))
url = urlparse.urljoin("file:", urllib2.pathname2url(path))
return url
def is_url(name):
if ":" not in name:
return False
scheme = name.split(":", 1)[0].lower()
return scheme in [
"http",
"https",
"file",
"ftp",
"ssh",
"git",
"hg",
"bzr",
"sftp",
"svn" "ssh",
]
def strip_extras(path):
m = re.match(r"^(.+)(\[[^\]]+\])$", path)
extras = None
if m:
path_no_extras = m.group(1)
extras = m.group(2)
else:
path_no_extras = path
return path_no_extras, extras
def is_installable_dir(path):
"""Return True if `path` is a directory containing a setup.py file."""
if not os.path.isdir(path):
return False
setup_py = os.path.join(path, "setup.py")
if os.path.isfile(setup_py):
return True
return False
def is_archive_file(name):
"""Return True if `name` is a considered as an archive file."""
ext = splitext(name)[1].lower()
if ext in ARCHIVE_EXTENSIONS:
return True
return False
def splitext(path):
"""Like os.path.splitext, but take off .tar too"""
base, ext = posixpath.splitext(path)
if base.lower().endswith(".tar"):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def group_markers(markers, or_=False):
groups = [[]]
for marker in markers:
if or_:
groups.append([])
if isinstance(marker, (MultiMarker, MarkerUnion)):
groups[-1].append(
group_markers(marker.markers, isinstance(marker, MarkerUnion))
)
elif isinstance(marker, SingleMarker):
lhs, op, rhs = marker.name, marker.operator, marker.value
groups[-1].append((lhs, op, rhs))
return groups
def convert_markers(marker):
groups = group_markers([marker])
requirements = {}
def _group(_groups, or_=False):
for group in _groups:
if isinstance(group, tuple):
variable, op, value = group
group_name = str(variable)
if group_name not in requirements:
requirements[group_name] = [[]]
elif or_:
requirements[group_name].append([])
or_ = False
requirements[group_name][-1].append((str(op), str(value)))
else:
_group(group, or_=True)
_group(groups)
return requirements
def create_nested_marker(name, constraint):
if constraint.is_any():
return ""
if isinstance(constraint, (MultiConstraint, UnionConstraint)):
parts = []
for c in constraint.constraints:
multi = False
if isinstance(c, (MultiConstraint, UnionConstraint)):
multi = True
parts.append((multi, create_nested_marker(name, c)))
glue = " and "
if isinstance(constraint, UnionConstraint):
parts = ["({})".format(part[1]) if part[0] else part[1] for part in parts]
glue = " or "
else:
parts = [part[1] for part in parts]
marker = glue.join(parts)
elif isinstance(constraint, Constraint):
marker = '{} {} "{}"'.format(name, constraint.operator, constraint.version)
elif isinstance(constraint, VersionUnion):
parts = []
for c in constraint.ranges:
parts.append(create_nested_marker(name, c))
glue = " or "
parts = ["({})".format(part) for part in parts]
marker = glue.join(parts)
elif isinstance(constraint, Version):
marker = '{} == "{}"'.format(name, constraint.text)
else:
if constraint.min is not None:
op = ">="
if not constraint.include_min:
op = ">"
version = constraint.min.text
if constraint.max is not None:
text = '{} {} "{}"'.format(name, op, version)
op = "<="
if not constraint.include_max:
op = "<"
version = constraint.max
text += ' and {} {} "{}"'.format(name, op, version)
return text
elif constraint.max is not None:
op = "<="
if not constraint.include_max:
op = "<"
version = constraint.max
else:
return ""
marker = '{} {} "{}"'.format(name, op, version)
return marker
| 25.947137 | 86 | 0.582343 | [
"MIT"
] | jancespivo/poetry | poetry/packages/utils/utils.py | 5,890 | Python |
'''
Using aws fargate to run a fmriprep. Uses our own docker image, which contains a wrapper to download the data from S3 and push it back again.
Rhodri Cusack TCIN 2021-06, [email protected]
'''
from ecs_control import register_task, run_task, wait_for_completion
import boto3
import msgpack
import msgpack_numpy as m
from os import path
def run_subjects(subjlist, input_bucket, do_wait=True):
response=[]
for subj in subjlist:
response.append(run_task(client, command = ['/usr/local/miniconda/bin/fmriprep-cusacklab.bash', input_bucket, subj, 'bids', 'deriv-2_topup']))
if do_wait:
wait_for_completion(client, response)
return response
if __name__=='__main__':
input_bucket='foundcog-adult-pilot'
session = boto3.session.Session()
client = session.client('ecs', region_name='eu-west-1')
response = register_task(client)
print(response)
subjlist = ['sub-06','sub-17','sub-03'] # subjects with small affine shifts between fMRI runs
#subjlist =['sub-04','sub-02','sub-05','sub-07','sub-08','sub-09','sub-10','sub-11','sub-12','sub-13','sub-14','sub-15','sub-16']
response = run_subjects(subjlist, input_bucket=input_bucket)
| 36.606061 | 150 | 0.70447 | [
"MIT"
] | rhodricusack/docker-foundcog-adult-pilot | fmriprep-cusacklab-queue-subjects.py | 1,208 | Python |
from scipy.sparse.linalg import LinearOperator,onenormest,aslinearoperator
from .expm_multiply_parallel_wrapper import (_wrapper_expm_multiply,
_wrapper_csr_trace,_wrapper_csr_1_norm)
from scipy.sparse.construct import eye
from scipy.sparse.linalg._expm_multiply import _fragment_3_1,_exact_1_norm
import scipy.sparse as _sp
import numpy as _np
class expm_multiply_parallel(object):
"""Implements `scipy.sparse.linalg.expm_multiply()` for *openmp*.
Notes
-----
* this is a wrapper over custom c++ code.
* the `dtype` input need not be the same dtype as `A` or `a`; however, it must be possible to cast the result of `a*A` to this `dtype`.
* consider the special case of real-time evolution with a purely-imaginary Hamiltonian, in which case `a=-1j*time` and `A` are both complex-valued, while the resulting matrix exponential is real-valued: in such cases, one can use either one of
>>> expm_multiply_parallel( (1j*H.tocsr()).astype(np.float64), a=-1.0, dtype=np.float64)`
and
>>> expm_multiply_parallel( H.tocsr(), a=-1.0j, dtype=np.complex128)
The more efficient way to compute the matrix exponential in this case is to use a real-valued `dtype`.
Examples
--------
This example shows how to construct the `expm_multiply_parallel` object.
Further code snippets can be found in the examples for the function methods of the class.
The code snippet below initiates the class, and is required to run the example codes for the function methods.
.. literalinclude:: ../../doc_examples/expm_multiply_parallel-example.py
:linenos:
:language: python
:lines: 7-30
"""
def __init__(self,A,a=1.0,dtype=None,copy=False):
"""Initializes `expm_multiply_parallel`.
Parameters
-----------
A : {array_like, scipy.sparse matrix}
The operator (matrix) whose exponential is to be calculated.
a : scalar, optional
scalar value multiplying generator matrix :math:`A` in matrix exponential: :math:`\\mathrm{e}^{aA}`.
dtype : numpy.dtype, optional
data type specified for the total operator :math:`\\mathrm{e}^{aA}`. Default is: `numpy.result_type(A.dtype,min_scalar_type(a),float64)`.
copy : bool, optional
if `True` the matrix is copied otherwise the matrix is stored by reference.
"""
if _np.array(a).ndim == 0:
self._a = a
else:
raise ValueError("a must be scalar value.")
self._A = _sp.csr_matrix(A,copy=copy)
if A.shape[0] != A.shape[1]:
raise ValueError("A must be a square matrix.")
a_dtype_min = _np.min_scalar_type(self._a)
# use double precision by default.
if dtype is None:
self._dtype = _np.result_type(A.dtype,a_dtype_min,_np.float64)
else:
min_dtype = _np.result_type(A.dtype,a_dtype_min,_np.float32)
if not _np.can_cast(min_dtype,dtype):
raise ValueError("dtype not sufficient to represent a*A to at least float32 precision.")
self._dtype = dtype
tol = _np.finfo(self._dtype).eps/2
tol_dtype = _np.finfo(self._dtype).eps.dtype
self._tol = _np.array(tol,dtype=tol_dtype)
mu = _wrapper_csr_trace(self._A.indptr,self._A.indices,self._A.data)/self._A.shape[0]
self._mu = _np.array(mu,dtype=self._dtype)
self._A_1_norm = _wrapper_csr_1_norm(self._A.indptr,self._A.indices,self._A.data,self._mu)
self._calculate_partition()
# shift = eye(A.shape[0],format="csr",dtype=A.dtype)
# shift.data *= mu
# self._A = self._A - shift
@property
def a(self):
"""scalar: value multiplying generator matrix :math:`A` in matrix exponential: :math:`\\mathrm{e}^{aA}`"""
return self._a
@property
def A(self):
"""scipy.sparse.csr_matrix: csr_matrix to be exponentiated."""
return self._A
def set_a(self,a,dtype=None):
"""Sets the value of the property `a`.
Parameters
----------
a : scalar
new value of `a`.
dtype : numpy.dtype, optional
dtype specified for this operator. Default is: result_type(A.dtype,min_scalar_type(a),float64)
Examples
--------
.. literalinclude:: ../../doc_examples/expm_multiply_parallel-example.py
:linenos:
:language: python
:lines: 32-35
"""
if _np.array(a).ndim == 0:
self._a = a
a_dtype_min = _np.min_scalar_type(self._a)
# use double precision by default.
if dtype is None:
self._dtype = _np.result_type(self._A.dtype,a_dtype_min,_np.float64)
else:
min_dtype = _np.result_type(A.dtype,a_dtype_min,_np.float32)
if not _np.can_cast(min_dtype,dtype):
raise ValueError("dtype not sufficient to represent a*A to at least float32 precision.")
self._dtype = dtype
tol = _np.finfo(self._dtype).eps/2
tol_dtype = _np.finfo(self._dtype).eps.dtype
self._tol = _np.array(tol,dtype=tol_dtype)
self._mu = _np.array(self._mu,dtype=self._dtype)
self._calculate_partition()
else:
raise ValueError("expecting 'a' to be scalar.")
def dot(self,v,work_array=None,overwrite_v=False):
"""Calculates the action of :math:`\\mathrm{e}^{aA}` on a vector :math:`v`.
Examples
--------
.. literalinclude:: ../../doc_examples/expm_multiply_parallel-example.py
:linenos:
:language: python
:lines: 37-
Parameters
-----------
v : contiguous numpy.ndarray
array to apply :math:`\\mathrm{e}^{aA}` on.
work_array : contiguous numpy.ndarray, optional
array of `shape = (2*len(v),)` which is used as work_array space for the underlying c-code. This saves extra memory allocation for function operations.
overwrite_v : bool
if set to `True`, the data in `v` is overwritten by the function. This saves extra memory allocation for the results.
Returns
--------
numpy.ndarray
result of :math:`\\mathrm{e}^{aA}v`.
If `overwrite_v = True` the dunction returns `v` with the data overwritten, otherwise the result is stored in a new array.
"""
v = _np.asarray(v)
if v.ndim != 1:
raise ValueError("array must have ndim of 1.")
if v.shape[0] != self._A.shape[1]:
raise ValueError("dimension mismatch {}, {}".format(self._A.shape,v.shape))
v_dtype = _np.result_type(self._dtype,v.dtype)
if overwrite_v:
if v_dtype != v.dtype:
raise ValueError("if overwrite_v is True, the input array must match correct output dtype for matrix multiplication.")
if not v.flags["CARRAY"]:
raise TypeError("input array must a contiguous and writable.")
if v.ndim != 1:
raise ValueError("array must have ndim of 1.")
else:
v = v.astype(v_dtype,order="C",copy=True)
if work_array is None:
work_array = _np.zeros((2*self._A.shape[0],),dtype=v.dtype)
else:
work_array = _np.ascontiguousarray(work_array)
if work_array.shape != (2*self._A.shape[0],):
raise ValueError("work_array array must be an array of shape (2*v.shape[0],) with same dtype as v.")
if work_array.dtype != v_dtype:
raise ValueError("work_array must be array of dtype which matches the result of the matrix-vector multiplication.")
a = _np.array(self._a,dtype=v_dtype)
mu = _np.array(self._mu,dtype=v_dtype)
tol = _np.array(self._tol,dtype=mu.real.dtype)
_wrapper_expm_multiply(self._A.indptr,self._A.indices,self._A.data,
self._s,self._m_star,a,tol,mu,v,work_array)
return v
def _calculate_partition(self):
if _np.abs(self._a)*self._A_1_norm == 0:
self._m_star, self._s = 0, 1
else:
ell = 2
norm_info = LazyOperatorNormInfo(self._A, self._A_1_norm, self._a, self._mu, self._dtype, ell=ell)
self._m_star, self._s = _fragment_3_1(norm_info, 1, self._tol, ell=ell)
##### code below is copied from scipy.sparse.linalg._expm_multiply_core and modified slightly.
def matvec_p(v,A,a,mu,p):
for i in range(p):
v = a * (A.dot(v) - mu*v)
return v
class LazyOperatorNormInfo:
"""
Information about an operator is lazily computed.
The information includes the exact 1-norm of the operator,
in addition to estimates of 1-norms of powers of the operator.
This uses the notation of Computing the Action (2011).
This class is specialized enough to probably not be of general interest
outside of this module.
"""
def __init__(self, A, A_1_norm, a, mu, dtype, ell=2):
"""
Provide the operator and some norm-related information.
Parameters
-----------
A : linear operator
The operator of interest.
A_1_norm : float
The exact 1-norm of A.
ell : int, optional
A technical parameter controlling norm estimation quality.
"""
self._A = A
self._a = a
self._mu = mu
self._dtype = dtype
self._A_1_norm = A_1_norm
self._ell = ell
self._d = {}
def onenorm(self):
"""
Compute the exact 1-norm.
"""
return _np.abs(self._a) * self._A_1_norm
def d(self, p):
"""
Lazily estimate d_p(A) ~= || A^p ||^(1/p) where ||.|| is the 1-norm.
"""
if p not in self._d:
matvec = lambda v: self._a * (self._A.dot(v) - self._mu*v)
rmatvec = lambda v: _np.conj(self._a) * (self._A.H.dot(v) - _np.conj(self._mu)*v)
LO = LinearOperator(self._A.shape,dtype=self._dtype,matvec=matvec,rmatvec=rmatvec)
est = onenormest(LO**p)
# est = onenormest((self._a * aslinearoperator(self._A))**p)
self._d[p] = est ** (1.0 / p)
return self._d[p]
def alpha(self, p):
"""
Lazily compute max(d(p), d(p+1)).
"""
return max(self.d(p), self.d(p+1))
| 35.486577 | 247 | 0.60104 | [
"BSD-3-Clause"
] | markusschmitt/QuSpin | quspin/tools/expm_multiply_parallel_core/expm_multiply_parallel_core.py | 10,575 | Python |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A wrapper class for Spark DataFrame to behave similar to pandas DataFrame.
"""
from collections import defaultdict, namedtuple
from collections.abc import Mapping
import re
import warnings
import inspect
import json
import types
from functools import partial, reduce
import sys
from itertools import zip_longest, chain
from types import TracebackType
from typing import (
Any,
Callable,
Dict,
Generic,
IO,
Iterable,
Iterator,
List,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
no_type_check,
TYPE_CHECKING,
)
import datetime
import numpy as np
import pandas as pd
from pandas.api.types import ( # type: ignore[attr-defined]
is_bool_dtype,
is_list_like,
is_dict_like,
is_scalar,
)
from pandas.tseries.frequencies import DateOffset, to_offset
if TYPE_CHECKING:
from pandas.io.formats.style import Styler
from pandas.core.dtypes.common import infer_dtype_from_object
from pandas.core.accessor import CachedAccessor
from pandas.core.dtypes.inference import is_sequence
from pyspark import StorageLevel
from pyspark.sql import Column, DataFrame as SparkDataFrame, functions as F
from pyspark.sql.functions import pandas_udf
from pyspark.sql.types import (
ArrayType,
BooleanType,
DataType,
DoubleType,
NumericType,
Row,
StringType,
StructField,
StructType,
DecimalType,
TimestampType,
TimestampNTZType,
)
from pyspark.sql.window import Window
from pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.
from pyspark.pandas._typing import Axis, DataFrameOrSeries, Dtype, Label, Name, Scalar, T
from pyspark.pandas.accessors import PandasOnSparkFrameMethods
from pyspark.pandas.config import option_context, get_option
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.spark.accessors import SparkFrameMethods, CachedSparkFrameMethods
from pyspark.pandas.utils import (
align_diff_frames,
column_labels_level,
combine_frames,
default_session,
is_name_like_tuple,
is_name_like_value,
is_testing,
name_like_string,
same_anchor,
scol_for,
validate_arguments_and_invoke_function,
validate_axis,
validate_bool_kwarg,
validate_how,
validate_mode,
verify_temp_column_name,
log_advice,
)
from pyspark.pandas.generic import Frame
from pyspark.pandas.internal import (
InternalField,
InternalFrame,
HIDDEN_COLUMNS,
NATURAL_ORDER_COLUMN_NAME,
SPARK_INDEX_NAME_FORMAT,
SPARK_DEFAULT_INDEX_NAME,
SPARK_DEFAULT_SERIES_NAME,
SPARK_INDEX_NAME_PATTERN,
)
from pyspark.pandas.missing.frame import _MissingPandasLikeDataFrame
from pyspark.pandas.ml import corr
from pyspark.pandas.typedef.typehints import (
as_spark_type,
infer_return_type,
pandas_on_spark_type,
spark_type_to_pandas_dtype,
DataFrameType,
SeriesType,
ScalarType,
create_tuple_for_frame_type,
)
from pyspark.pandas.plot import PandasOnSparkPlotAccessor
if TYPE_CHECKING:
from pyspark.sql._typing import OptionalPrimitiveType
from pyspark.pandas.groupby import DataFrameGroupBy
from pyspark.pandas.indexes import Index
from pyspark.pandas.series import Series
# These regular expression patterns are complied and defined here to avoid to compile the same
# pattern every time it is used in _repr_ and _repr_html_ in DataFrame.
# Two patterns basically seek the footer string from Pandas'
REPR_PATTERN = re.compile(r"\n\n\[(?P<rows>[0-9]+) rows x (?P<columns>[0-9]+) columns\]$")
REPR_HTML_PATTERN = re.compile(
r"\n\<p\>(?P<rows>[0-9]+) rows × (?P<columns>[0-9]+) columns\<\/p\>\n\<\/div\>$"
)
_flex_doc_FRAME = """
Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`).
Equivalent to ``{equiv}``. With reverse version, `{reverse}`.
Among flexible wrappers (`add`, `sub`, `mul`, `div`) to
arithmetic operators: `+`, `-`, `*`, `/`, `//`.
Parameters
----------
other : scalar
Any single data
Returns
-------
DataFrame
Result of the arithmetic operation.
Examples
--------
>>> df = ps.DataFrame({{'angles': [0, 3, 4],
... 'degrees': [360, 180, 360]}},
... index=['circle', 'triangle', 'rectangle'],
... columns=['angles', 'degrees'])
>>> df
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
Add a scalar with operator version which return the same
results. Also reverse version.
>>> df + 1
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
>>> df.add(1)
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
>>> df.add(df)
angles degrees
circle 0 720
triangle 6 360
rectangle 8 720
>>> df + df + df
angles degrees
circle 0 1080
triangle 9 540
rectangle 12 1080
>>> df.radd(1)
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
Divide and true divide by constant with reverse version.
>>> df / 10
angles degrees
circle 0.0 36.0
triangle 0.3 18.0
rectangle 0.4 36.0
>>> df.div(10)
angles degrees
circle 0.0 36.0
triangle 0.3 18.0
rectangle 0.4 36.0
>>> df.rdiv(10)
angles degrees
circle inf 0.027778
triangle 3.333333 0.055556
rectangle 2.500000 0.027778
>>> df.truediv(10)
angles degrees
circle 0.0 36.0
triangle 0.3 18.0
rectangle 0.4 36.0
>>> df.rtruediv(10)
angles degrees
circle inf 0.027778
triangle 3.333333 0.055556
rectangle 2.500000 0.027778
Subtract by constant with reverse version.
>>> df - 1
angles degrees
circle -1 359
triangle 2 179
rectangle 3 359
>>> df.sub(1)
angles degrees
circle -1 359
triangle 2 179
rectangle 3 359
>>> df.rsub(1)
angles degrees
circle 1 -359
triangle -2 -179
rectangle -3 -359
Multiply by constant with reverse version.
>>> df * 1
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
>>> df.mul(1)
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
>>> df.rmul(1)
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
Floor Divide by constant with reverse version.
>>> df // 10
angles degrees
circle 0.0 36.0
triangle 0.0 18.0
rectangle 0.0 36.0
>>> df.floordiv(10)
angles degrees
circle 0.0 36.0
triangle 0.0 18.0
rectangle 0.0 36.0
>>> df.rfloordiv(10) # doctest: +SKIP
angles degrees
circle inf 0.0
triangle 3.0 0.0
rectangle 2.0 0.0
Mod by constant with reverse version.
>>> df % 2
angles degrees
circle 0 0
triangle 1 0
rectangle 0 0
>>> df.mod(2)
angles degrees
circle 0 0
triangle 1 0
rectangle 0 0
>>> df.rmod(2)
angles degrees
circle NaN 2
triangle 2.0 2
rectangle 2.0 2
Power by constant with reverse version.
>>> df ** 2
angles degrees
circle 0.0 129600.0
triangle 9.0 32400.0
rectangle 16.0 129600.0
>>> df.pow(2)
angles degrees
circle 0.0 129600.0
triangle 9.0 32400.0
rectangle 16.0 129600.0
>>> df.rpow(2)
angles degrees
circle 1.0 2.348543e+108
triangle 8.0 1.532496e+54
rectangle 16.0 2.348543e+108
"""
class DataFrame(Frame, Generic[T]):
"""
pandas-on-Spark DataFrame that corresponds to pandas DataFrame logically. This holds Spark
DataFrame internally.
:ivar _internal: an internal immutable Frame to manage metadata.
:type _internal: InternalFrame
Parameters
----------
data : numpy ndarray (structured or homogeneous), dict, pandas DataFrame, Spark DataFrame \
or pandas-on-Spark Series
Dict can contain Series, arrays, constants, or list-like objects
Note that if `data` is a pandas DataFrame, a Spark DataFrame, and a pandas-on-Spark Series,
other arguments should not be used.
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
no indexing information part of input data and no index provided
columns : Index or array-like
Column labels to use for resulting frame. Will default to
RangeIndex (0, 1, 2, ..., n) if no column labels are provided
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
Examples
--------
Constructing DataFrame from a dictionary.
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = ps.DataFrame(data=d, columns=['col1', 'col2'])
>>> df
col1 col2
0 1 3
1 2 4
Constructing DataFrame from pandas DataFrame
>>> df = ps.DataFrame(pd.DataFrame(data=d, columns=['col1', 'col2']))
>>> df
col1 col2
0 1 3
1 2 4
Notice that the inferred dtype is int64.
>>> df.dtypes
col1 int64
col2 int64
dtype: object
To enforce a single dtype:
>>> df = ps.DataFrame(data=d, dtype=np.int8)
>>> df.dtypes
col1 int8
col2 int8
dtype: object
Constructing DataFrame from numpy ndarray:
>>> df2 = ps.DataFrame(np.random.randint(low=0, high=10, size=(5, 5)),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df2 # doctest: +SKIP
a b c d e
0 3 1 4 9 8
1 4 8 4 8 4
2 7 6 5 6 7
3 8 7 9 1 0
4 2 5 4 3 9
"""
def __init__( # type: ignore[no-untyped-def]
self, data=None, index=None, columns=None, dtype=None, copy=False
):
if isinstance(data, InternalFrame):
assert index is None
assert columns is None
assert dtype is None
assert not copy
internal = data
elif isinstance(data, SparkDataFrame):
assert index is None
assert columns is None
assert dtype is None
assert not copy
internal = InternalFrame(spark_frame=data, index_spark_columns=None)
elif isinstance(data, ps.Series):
assert index is None
assert columns is None
assert dtype is None
assert not copy
data = data.to_frame()
internal = data._internal
else:
if isinstance(data, pd.DataFrame):
assert index is None
assert columns is None
assert dtype is None
assert not copy
pdf = data
else:
pdf = pd.DataFrame(data=data, index=index, columns=columns, dtype=dtype, copy=copy)
internal = InternalFrame.from_pandas(pdf)
object.__setattr__(self, "_internal_frame", internal)
@property
def _pssers(self) -> Dict[Label, "Series"]:
"""Return a dict of column label -> Series which anchors `self`."""
from pyspark.pandas.series import Series
if not hasattr(self, "_psseries"):
object.__setattr__(
self,
"_psseries",
{label: Series(data=self, index=label) for label in self._internal.column_labels},
)
else:
psseries = cast(Dict[Label, Series], self._psseries) # type: ignore[has-type]
assert len(self._internal.column_labels) == len(psseries), (
len(self._internal.column_labels),
len(psseries),
)
if any(self is not psser._psdf for psser in psseries.values()):
# Refresh the dict to contain only Series anchoring `self`.
self._psseries = {
label: (
psseries[label]
if self is psseries[label]._psdf
else Series(data=self, index=label)
)
for label in self._internal.column_labels
}
return self._psseries
@property
def _internal(self) -> InternalFrame:
return cast(InternalFrame, self._internal_frame) # type: ignore[has-type]
def _update_internal_frame(
self, internal: InternalFrame, requires_same_anchor: bool = True
) -> None:
"""
Update InternalFrame with the given one.
If the column_label is changed or the new InternalFrame is not the same `anchor`,
disconnect the link to the Series and create a new one.
If `requires_same_anchor` is `False`, checking whether or not the same anchor is ignored
and force to update the InternalFrame, e.g., replacing the internal with the resolved_copy,
updating the underlying Spark DataFrame which need to combine a different Spark DataFrame.
:param internal: the new InternalFrame
:param requires_same_anchor: whether checking the same anchor
"""
from pyspark.pandas.series import Series
if hasattr(self, "_psseries"):
psseries = {}
for old_label, new_label in zip_longest(
self._internal.column_labels, internal.column_labels
):
if old_label is not None:
psser = self._pssers[old_label]
renamed = old_label != new_label
not_same_anchor = requires_same_anchor and not same_anchor(internal, psser)
if renamed or not_same_anchor:
psdf: DataFrame = DataFrame(self._internal.select_column(old_label))
psser._update_anchor(psdf)
psser = None
else:
psser = None
if new_label is not None:
if psser is None:
psser = Series(data=self, index=new_label)
psseries[new_label] = psser
self._psseries = psseries
self._internal_frame = internal
if hasattr(self, "_repr_pandas_cache"):
del self._repr_pandas_cache
@property
def ndim(self) -> int:
"""
Return an int representing the number of array dimensions.
return 2 for DataFrame.
Examples
--------
>>> df = ps.DataFrame([[1, 2], [4, 5], [7, 8]],
... index=['cobra', 'viper', None],
... columns=['max_speed', 'shield'])
>>> df
max_speed shield
cobra 1 2
viper 4 5
NaN 7 8
>>> df.ndim
2
"""
return 2
@property
def axes(self) -> List:
"""
Return a list representing the axes of the DataFrame.
It has the row axis labels and column axis labels as the only members.
They are returned in that order.
Examples
--------
>>> df = ps.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.axes
[Int64Index([0, 1], dtype='int64'), Index(['col1', 'col2'], dtype='object')]
"""
return [self.index, self.columns]
def _reduce_for_stat_function(
self,
sfun: Callable[["Series"], Column],
name: str,
axis: Optional[Axis] = None,
numeric_only: bool = True,
**kwargs: Any,
) -> "Series":
"""
Applies sfun to each column and returns a pd.Series where the number of rows equal the
number of columns.
Parameters
----------
sfun : either an 1-arg function that takes a Column and returns a Column, or
a 2-arg function that takes a Column and its DataType and returns a Column.
axis: used only for sanity check because series only support index axis.
name : original pandas API name.
axis : axis to apply. 0 or 1, or 'index' or 'columns.
numeric_only : bool, default True
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility. Only 'DataFrame.count' uses this parameter
currently.
"""
from pyspark.pandas.series import Series, first_series
axis = validate_axis(axis)
if axis == 0:
min_count = kwargs.get("min_count", 0)
exprs = [SF.lit(None).cast(StringType()).alias(SPARK_DEFAULT_INDEX_NAME)]
new_column_labels = []
for label in self._internal.column_labels:
psser = self._psser_for(label)
is_numeric_or_boolean = isinstance(
psser.spark.data_type, (NumericType, BooleanType)
)
keep_column = not numeric_only or is_numeric_or_boolean
if keep_column:
scol = sfun(psser)
if min_count > 0:
scol = F.when(Frame._count_expr(psser) >= min_count, scol)
exprs.append(scol.alias(name_like_string(label)))
new_column_labels.append(label)
if len(exprs) == 1:
return Series([])
sdf = self._internal.spark_frame.select(*exprs)
# The data is expected to be small so it's fine to transpose/use default index.
with ps.option_context("compute.max_rows", 1):
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)],
column_labels=new_column_labels,
column_label_names=self._internal.column_label_names,
)
return first_series(DataFrame(internal).transpose())
else:
# Here we execute with the first 1000 to get the return type.
# If the records were less than 1000, it uses pandas API directly for a shortcut.
limit = get_option("compute.shortcut_limit")
pdf = self.head(limit + 1)._to_internal_pandas()
pser = getattr(pdf, name)(axis=axis, numeric_only=numeric_only, **kwargs)
if len(pdf) <= limit:
return Series(pser)
@pandas_udf(returnType=as_spark_type(pser.dtype.type)) # type: ignore[call-overload]
def calculate_columns_axis(*cols: pd.Series) -> pd.Series:
return getattr(pd.concat(cols, axis=1), name)(
axis=axis, numeric_only=numeric_only, **kwargs
)
column_name = verify_temp_column_name(
self._internal.spark_frame.select(self._internal.index_spark_columns),
"__calculate_columns_axis__",
)
sdf = self._internal.spark_frame.select(
self._internal.index_spark_columns
+ [calculate_columns_axis(*self._internal.data_spark_columns).alias(column_name)]
)
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in self._internal.index_spark_column_names
],
index_names=self._internal.index_names,
index_fields=self._internal.index_fields,
)
return first_series(DataFrame(internal)).rename(pser.name)
def _psser_for(self, label: Label) -> "Series":
"""
Create Series with a proper column label.
The given label must be verified to exist in `InternalFrame.column_labels`.
For example, in some method, self is like:
>>> self = ps.range(3)
`self._psser_for(label)` can be used with `InternalFrame.column_labels`:
>>> self._psser_for(self._internal.column_labels[0])
0 0
1 1
2 2
Name: id, dtype: int64
`self._psser_for(label)` must not be used directly with user inputs.
In that case, `self[label]` should be used instead, which checks the label exists or not:
>>> self['id']
0 0
1 1
2 2
Name: id, dtype: int64
"""
return self._pssers[label]
def _apply_series_op(
self, op: Callable[["Series"], Union["Series", Column]], should_resolve: bool = False
) -> "DataFrame":
applied = []
for label in self._internal.column_labels:
applied.append(op(self._psser_for(label)))
internal = self._internal.with_new_columns(applied)
if should_resolve:
internal = internal.resolved_copy
return DataFrame(internal)
# Arithmetic Operators
def _map_series_op(self, op: str, other: Any) -> "DataFrame":
from pyspark.pandas.base import IndexOpsMixin
if not isinstance(other, DataFrame) and (
isinstance(other, IndexOpsMixin) or is_sequence(other)
):
raise TypeError(
"%s with a sequence is currently not supported; "
"however, got %s." % (op, type(other).__name__)
)
if isinstance(other, DataFrame):
if self._internal.column_labels_level != other._internal.column_labels_level:
raise ValueError("cannot join with no overlapping index names")
if not same_anchor(self, other):
# Different DataFrames
def apply_op(
psdf: DataFrame,
this_column_labels: List[Label],
that_column_labels: List[Label],
) -> Iterator[Tuple["Series", Label]]:
for this_label, that_label in zip(this_column_labels, that_column_labels):
yield (
getattr(psdf._psser_for(this_label), op)(
psdf._psser_for(that_label)
).rename(this_label),
this_label,
)
return align_diff_frames(apply_op, self, other, fillna=True, how="full")
else:
applied = []
column_labels = []
for label in self._internal.column_labels:
if label in other._internal.column_labels:
applied.append(getattr(self._psser_for(label), op)(other._psser_for(label)))
else:
applied.append(
SF.lit(None)
.cast(self._internal.spark_type_for(label))
.alias(name_like_string(label))
)
column_labels.append(label)
for label in other._internal.column_labels:
if label not in column_labels:
applied.append(
SF.lit(None)
.cast(other._internal.spark_type_for(label))
.alias(name_like_string(label))
)
column_labels.append(label)
internal = self._internal.with_new_columns(applied, column_labels=column_labels)
return DataFrame(internal)
else:
return self._apply_series_op(lambda psser: getattr(psser, op)(other))
def __add__(self, other: Any) -> "DataFrame":
return self._map_series_op("add", other)
def __radd__(self, other: Any) -> "DataFrame":
return self._map_series_op("radd", other)
def __truediv__(self, other: Any) -> "DataFrame":
return self._map_series_op("truediv", other)
def __rtruediv__(self, other: Any) -> "DataFrame":
return self._map_series_op("rtruediv", other)
def __mul__(self, other: Any) -> "DataFrame":
return self._map_series_op("mul", other)
def __rmul__(self, other: Any) -> "DataFrame":
return self._map_series_op("rmul", other)
def __sub__(self, other: Any) -> "DataFrame":
return self._map_series_op("sub", other)
def __rsub__(self, other: Any) -> "DataFrame":
return self._map_series_op("rsub", other)
def __pow__(self, other: Any) -> "DataFrame":
return self._map_series_op("pow", other)
def __rpow__(self, other: Any) -> "DataFrame":
return self._map_series_op("rpow", other)
def __mod__(self, other: Any) -> "DataFrame":
return self._map_series_op("mod", other)
def __rmod__(self, other: Any) -> "DataFrame":
return self._map_series_op("rmod", other)
def __floordiv__(self, other: Any) -> "DataFrame":
return self._map_series_op("floordiv", other)
def __rfloordiv__(self, other: Any) -> "DataFrame":
return self._map_series_op("rfloordiv", other)
def __abs__(self) -> "DataFrame":
return self._apply_series_op(lambda psser: abs(psser))
def __neg__(self) -> "DataFrame":
return self._apply_series_op(lambda psser: -psser)
def add(self, other: Any) -> "DataFrame":
return self + other
# create accessor for plot
plot = CachedAccessor("plot", PandasOnSparkPlotAccessor)
# create accessor for Spark related methods.
spark = CachedAccessor("spark", SparkFrameMethods)
# create accessor for pandas-on-Spark specific methods.
pandas_on_spark = CachedAccessor("pandas_on_spark", PandasOnSparkFrameMethods)
# keep the name "koalas" for backward compatibility.
koalas = CachedAccessor("koalas", PandasOnSparkFrameMethods)
@no_type_check
def hist(self, bins=10, **kwds):
return self.plot.hist(bins, **kwds)
hist.__doc__ = PandasOnSparkPlotAccessor.hist.__doc__
@no_type_check
def kde(self, bw_method=None, ind=None, **kwds):
return self.plot.kde(bw_method, ind, **kwds)
kde.__doc__ = PandasOnSparkPlotAccessor.kde.__doc__
add.__doc__ = _flex_doc_FRAME.format(
desc="Addition", op_name="+", equiv="dataframe + other", reverse="radd"
)
def radd(self, other: Any) -> "DataFrame":
return other + self
radd.__doc__ = _flex_doc_FRAME.format(
desc="Addition", op_name="+", equiv="other + dataframe", reverse="add"
)
def div(self, other: Any) -> "DataFrame":
return self / other
div.__doc__ = _flex_doc_FRAME.format(
desc="Floating division", op_name="/", equiv="dataframe / other", reverse="rdiv"
)
divide = div
def rdiv(self, other: Any) -> "DataFrame":
return other / self
rdiv.__doc__ = _flex_doc_FRAME.format(
desc="Floating division", op_name="/", equiv="other / dataframe", reverse="div"
)
def truediv(self, other: Any) -> "DataFrame":
return self / other
truediv.__doc__ = _flex_doc_FRAME.format(
desc="Floating division", op_name="/", equiv="dataframe / other", reverse="rtruediv"
)
def rtruediv(self, other: Any) -> "DataFrame":
return other / self
rtruediv.__doc__ = _flex_doc_FRAME.format(
desc="Floating division", op_name="/", equiv="other / dataframe", reverse="truediv"
)
def mul(self, other: Any) -> "DataFrame":
return self * other
mul.__doc__ = _flex_doc_FRAME.format(
desc="Multiplication", op_name="*", equiv="dataframe * other", reverse="rmul"
)
multiply = mul
def rmul(self, other: Any) -> "DataFrame":
return other * self
rmul.__doc__ = _flex_doc_FRAME.format(
desc="Multiplication", op_name="*", equiv="other * dataframe", reverse="mul"
)
def sub(self, other: Any) -> "DataFrame":
return self - other
sub.__doc__ = _flex_doc_FRAME.format(
desc="Subtraction", op_name="-", equiv="dataframe - other", reverse="rsub"
)
subtract = sub
def rsub(self, other: Any) -> "DataFrame":
return other - self
rsub.__doc__ = _flex_doc_FRAME.format(
desc="Subtraction", op_name="-", equiv="other - dataframe", reverse="sub"
)
def mod(self, other: Any) -> "DataFrame":
return self % other
mod.__doc__ = _flex_doc_FRAME.format(
desc="Modulo", op_name="%", equiv="dataframe % other", reverse="rmod"
)
def rmod(self, other: Any) -> "DataFrame":
return other % self
rmod.__doc__ = _flex_doc_FRAME.format(
desc="Modulo", op_name="%", equiv="other % dataframe", reverse="mod"
)
def pow(self, other: Any) -> "DataFrame":
return self ** other
pow.__doc__ = _flex_doc_FRAME.format(
desc="Exponential power of series", op_name="**", equiv="dataframe ** other", reverse="rpow"
)
def rpow(self, other: Any) -> "DataFrame":
return other ** self
rpow.__doc__ = _flex_doc_FRAME.format(
desc="Exponential power", op_name="**", equiv="other ** dataframe", reverse="pow"
)
def floordiv(self, other: Any) -> "DataFrame":
return self // other
floordiv.__doc__ = _flex_doc_FRAME.format(
desc="Integer division", op_name="//", equiv="dataframe // other", reverse="rfloordiv"
)
def rfloordiv(self, other: Any) -> "DataFrame":
return other // self
rfloordiv.__doc__ = _flex_doc_FRAME.format(
desc="Integer division", op_name="//", equiv="other // dataframe", reverse="floordiv"
)
# Comparison Operators
def __eq__(self, other: Any) -> "DataFrame": # type: ignore[override]
return self._map_series_op("eq", other)
def __ne__(self, other: Any) -> "DataFrame": # type: ignore[override]
return self._map_series_op("ne", other)
def __lt__(self, other: Any) -> "DataFrame":
return self._map_series_op("lt", other)
def __le__(self, other: Any) -> "DataFrame":
return self._map_series_op("le", other)
def __ge__(self, other: Any) -> "DataFrame":
return self._map_series_op("ge", other)
def __gt__(self, other: Any) -> "DataFrame":
return self._map_series_op("gt", other)
def eq(self, other: Any) -> "DataFrame":
"""
Compare if the current value is equal to the other.
>>> df = ps.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.eq(1)
a b
a True True
b False False
c False True
d False False
"""
return self == other
equals = eq
def gt(self, other: Any) -> "DataFrame":
"""
Compare if the current value is greater than the other.
>>> df = ps.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.gt(2)
a b
a False False
b False False
c True False
d True False
"""
return self > other
def ge(self, other: Any) -> "DataFrame":
"""
Compare if the current value is greater than or equal to the other.
>>> df = ps.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.ge(1)
a b
a True True
b True False
c True True
d True False
"""
return self >= other
def lt(self, other: Any) -> "DataFrame":
"""
Compare if the current value is less than the other.
>>> df = ps.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.lt(1)
a b
a False False
b False False
c False False
d False False
"""
return self < other
def le(self, other: Any) -> "DataFrame":
"""
Compare if the current value is less than or equal to the other.
>>> df = ps.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.le(2)
a b
a True True
b True False
c False True
d False False
"""
return self <= other
def ne(self, other: Any) -> "DataFrame":
"""
Compare if the current value is not equal to the other.
>>> df = ps.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.ne(1)
a b
a False False
b True True
c True False
d True True
"""
return self != other
def applymap(self, func: Callable[[Any], Any]) -> "DataFrame":
"""
Apply a function to a Dataframe elementwise.
This method applies a function that accepts and returns a scalar
to every element of a DataFrame.
.. note:: this API executes the function once to infer the type which is
potentially expensive, for instance, when the dataset is created after
aggregations or sorting.
To avoid this, specify return type in ``func``, for instance, as below:
>>> def square(x) -> np.int32:
... return x ** 2
pandas-on-Spark uses return type hint and does not try to infer the type.
Parameters
----------
func : callable
Python function, returns a single value from a single value.
Returns
-------
DataFrame
Transformed DataFrame.
Examples
--------
>>> df = ps.DataFrame([[1, 2.12], [3.356, 4.567]])
>>> df
0 1
0 1.000 2.120
1 3.356 4.567
>>> def str_len(x) -> int:
... return len(str(x))
>>> df.applymap(str_len)
0 1
0 3 4
1 5 5
>>> def power(x) -> float:
... return x ** 2
>>> df.applymap(power)
0 1
0 1.000000 4.494400
1 11.262736 20.857489
You can omit the type hint and let pandas-on-Spark infer its type.
>>> df.applymap(lambda x: x ** 2)
0 1
0 1.000000 4.494400
1 11.262736 20.857489
"""
# TODO: We can implement shortcut theoretically since it creates new DataFrame
# anyway and we don't have to worry about operations on different DataFrames.
return self._apply_series_op(lambda psser: psser.apply(func))
# TODO: not all arguments are implemented comparing to pandas' for now.
def aggregate(self, func: Union[List[str], Dict[Name, List[str]]]) -> "DataFrame":
"""Aggregate using one or more operations over the specified axis.
Parameters
----------
func : dict or a list
a dict mapping from column name (string) to
aggregate functions (list of strings).
If a list is given, the aggregation is performed against
all columns.
Returns
-------
DataFrame
Notes
-----
`agg` is an alias for `aggregate`. Use the alias.
See Also
--------
DataFrame.apply : Invoke function on DataFrame.
DataFrame.transform : Only perform transforming type operations.
DataFrame.groupby : Perform operations over groups.
Series.aggregate : The equivalent function for Series.
Examples
--------
>>> df = ps.DataFrame([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9],
... [np.nan, np.nan, np.nan]],
... columns=['A', 'B', 'C'])
>>> df
A B C
0 1.0 2.0 3.0
1 4.0 5.0 6.0
2 7.0 8.0 9.0
3 NaN NaN NaN
Aggregate these functions over the rows.
>>> df.agg(['sum', 'min'])[['A', 'B', 'C']].sort_index()
A B C
min 1.0 2.0 3.0
sum 12.0 15.0 18.0
Different aggregations per column.
>>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})[['A', 'B']].sort_index()
A B
max NaN 8.0
min 1.0 2.0
sum 12.0 NaN
For multi-index columns:
>>> df.columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
>>> df.agg(['sum', 'min'])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index()
X Y
A B C
min 1.0 2.0 3.0
sum 12.0 15.0 18.0
>>> aggregated = df.agg({("X", "A") : ['sum', 'min'], ("X", "B") : ['min', 'max']})
>>> aggregated[[("X", "A"), ("X", "B")]].sort_index() # doctest: +NORMALIZE_WHITESPACE
X
A B
max NaN 8.0
min 1.0 2.0
sum 12.0 NaN
"""
from pyspark.pandas.groupby import GroupBy
if isinstance(func, list):
if all((isinstance(f, str) for f in func)):
func = dict([(column, func) for column in self.columns])
else:
raise ValueError(
"If the given function is a list, it "
"should only contains function names as strings."
)
if not isinstance(func, dict) or not all(
is_name_like_value(key)
and (
isinstance(value, str)
or (isinstance(value, list) and all(isinstance(v, str) for v in value))
)
for key, value in func.items()
):
raise ValueError(
"aggs must be a dict mapping from column name to aggregate "
"functions (string or list of strings)."
)
with option_context("compute.default_index_type", "distributed"):
psdf: DataFrame = DataFrame(GroupBy._spark_groupby(self, func))
# The codes below basically converts:
#
# A B
# sum min min max
# 0 12.0 1.0 2.0 8.0
#
# to:
# A B
# max NaN 8.0
# min 1.0 2.0
# sum 12.0 NaN
#
# Aggregated output is usually pretty much small.
return psdf.stack().droplevel(0)[list(func.keys())]
agg = aggregate
def corr(self, method: str = "pearson") -> "DataFrame":
"""
Compute pairwise correlation of columns, excluding NA/null values.
Parameters
----------
method : {'pearson', 'spearman'}
* pearson : standard correlation coefficient
* spearman : Spearman rank correlation
Returns
-------
y : DataFrame
See Also
--------
Series.corr
Examples
--------
>>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.corr('pearson')
dogs cats
dogs 1.000000 -0.851064
cats -0.851064 1.000000
>>> df.corr('spearman')
dogs cats
dogs 1.000000 -0.948683
cats -0.948683 1.000000
Notes
-----
There are behavior differences between pandas-on-Spark and pandas.
* the `method` argument only accepts 'pearson', 'spearman'
* the data should not contain NaNs. pandas-on-Spark will return an error.
* pandas-on-Spark doesn't support the following argument(s).
* `min_periods` argument is not supported
"""
return cast(DataFrame, ps.from_pandas(corr(self, method)))
def iteritems(self) -> Iterator[Tuple[Name, "Series"]]:
"""
Iterator over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
Returns
-------
label : object
The column names for the DataFrame being iterated over.
content : Series
The column entries belonging to each label, as a Series.
Examples
--------
>>> df = ps.DataFrame({'species': ['bear', 'bear', 'marsupial'],
... 'population': [1864, 22000, 80000]},
... index=['panda', 'polar', 'koala'],
... columns=['species', 'population'])
>>> df
species population
panda bear 1864
polar bear 22000
koala marsupial 80000
>>> for label, content in df.iteritems():
... print('label:', label)
... print('content:', content.to_string())
...
label: species
content: panda bear
polar bear
koala marsupial
label: population
content: panda 1864
polar 22000
koala 80000
"""
return (
(label if len(label) > 1 else label[0], self._psser_for(label))
for label in self._internal.column_labels
)
def iterrows(self) -> Iterator[Tuple[Name, pd.Series]]:
"""
Iterate over DataFrame rows as (index, Series) pairs.
Yields
------
index : label or tuple of label
The index of the row. A tuple for a `MultiIndex`.
data : pandas.Series
The data of the row as a Series.
it : generator
A generator that iterates over the rows of the frame.
Notes
-----
1. Because ``iterrows`` returns a Series for each row,
it does **not** preserve dtypes across the rows (dtypes are
preserved across columns for DataFrames). For example,
>>> df = ps.DataFrame([[1, 1.5]], columns=['int', 'float'])
>>> row = next(df.iterrows())[1]
>>> row
int 1.0
float 1.5
Name: 0, dtype: float64
>>> print(row['int'].dtype)
float64
>>> print(df['int'].dtype)
int64
To preserve dtypes while iterating over the rows, it is better
to use :meth:`itertuples` which returns namedtuples of the values
and which is generally faster than ``iterrows``.
2. You should **never modify** something you are iterating over.
This is not guaranteed to work in all cases. Depending on the
data types, the iterator returns a copy and not a view, and writing
to it will have no effect.
"""
columns = self.columns
internal_index_columns = self._internal.index_spark_column_names
internal_data_columns = self._internal.data_spark_column_names
def extract_kv_from_spark_row(row: Row) -> Tuple[Name, Any]:
k = (
row[internal_index_columns[0]]
if len(internal_index_columns) == 1
else tuple(row[c] for c in internal_index_columns)
)
v = [row[c] for c in internal_data_columns]
return k, v
for k, v in map(
extract_kv_from_spark_row, self._internal.resolved_copy.spark_frame.toLocalIterator()
):
s = pd.Series(v, index=columns, name=k)
yield k, s
def itertuples(
self, index: bool = True, name: Optional[str] = "PandasOnSpark"
) -> Iterator[Tuple]:
"""
Iterate over DataFrame rows as namedtuples.
Parameters
----------
index : bool, default True
If True, return the index as the first element of the tuple.
name : str or None, default "PandasOnSpark"
The name of the returned namedtuples or None to return regular
tuples.
Returns
-------
iterator
An object to iterate over namedtuples for each row in the
DataFrame with the first field possibly being the index and
following fields being the column values.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)
pairs.
DataFrame.items : Iterate over (column name, Series) pairs.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
On python versions < 3.7 regular tuples are returned for DataFrames
with a large number of columns (>254).
Examples
--------
>>> df = ps.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},
... index=['dog', 'hawk'])
>>> df
num_legs num_wings
dog 4 0
hawk 2 2
>>> for row in df.itertuples():
... print(row)
...
PandasOnSpark(Index='dog', num_legs=4, num_wings=0)
PandasOnSpark(Index='hawk', num_legs=2, num_wings=2)
By setting the `index` parameter to False we can remove the index
as the first element of the tuple:
>>> for row in df.itertuples(index=False):
... print(row)
...
PandasOnSpark(num_legs=4, num_wings=0)
PandasOnSpark(num_legs=2, num_wings=2)
With the `name` parameter set we set a custom name for the yielded
namedtuples:
>>> for row in df.itertuples(name='Animal'):
... print(row)
...
Animal(Index='dog', num_legs=4, num_wings=0)
Animal(Index='hawk', num_legs=2, num_wings=2)
"""
fields = list(self.columns)
if index:
fields.insert(0, "Index")
index_spark_column_names = self._internal.index_spark_column_names
data_spark_column_names = self._internal.data_spark_column_names
def extract_kv_from_spark_row(row: Row) -> Tuple[Name, Any]:
k = (
row[index_spark_column_names[0]]
if len(index_spark_column_names) == 1
else tuple(row[c] for c in index_spark_column_names)
)
v = [row[c] for c in data_spark_column_names]
return k, v
can_return_named_tuples = sys.version_info >= (3, 7) or len(self.columns) + index < 255
if name is not None and can_return_named_tuples:
itertuple = namedtuple(name, fields, rename=True) # type: ignore[misc]
for k, v in map(
extract_kv_from_spark_row,
self._internal.resolved_copy.spark_frame.toLocalIterator(),
):
yield itertuple._make(([k] if index else []) + list(v))
else:
for k, v in map(
extract_kv_from_spark_row,
self._internal.resolved_copy.spark_frame.toLocalIterator(),
):
yield tuple(([k] if index else []) + list(v))
def items(self) -> Iterator[Tuple[Name, "Series"]]:
"""This is an alias of ``iteritems``."""
return self.iteritems()
def to_clipboard(self, excel: bool = True, sep: Optional[str] = None, **kwargs: Any) -> None:
"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
excel : bool, default True
- True, use the provided separator, writing in a csv format for
allowing easy pasting into excel.
- False, write a string representation of the object to the
clipboard.
sep : str, default ``'\\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules)
- Windows : none
- OS X : none
See Also
--------
read_clipboard : Read text from clipboard.
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = ps.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) # doctest: +SKIP
>>> df.to_clipboard(sep=',') # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
This function also works for Series:
>>> df = ps.Series([1, 2, 3, 4, 5, 6, 7], name='x') # doctest: +SKIP
>>> df.to_clipboard(sep=',') # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # 0, 1
... # 1, 2
... # 2, 3
... # 3, 4
... # 4, 5
... # 5, 6
... # 6, 7
"""
args = locals()
psdf = self
return validate_arguments_and_invoke_function(
psdf._to_internal_pandas(), self.to_clipboard, pd.DataFrame.to_clipboard, args
)
def to_html(
self,
buf: Optional[IO[str]] = None,
columns: Optional[Sequence[Name]] = None,
col_space: Optional[Union[str, int, Dict[Name, Union[str, int]]]] = None,
header: bool = True,
index: bool = True,
na_rep: str = "NaN",
formatters: Optional[
Union[List[Callable[[Any], str]], Dict[Name, Callable[[Any], str]]]
] = None,
float_format: Optional[Callable[[float], str]] = None,
sparsify: Optional[bool] = None,
index_names: bool = True,
justify: Optional[str] = None,
max_rows: Optional[int] = None,
max_cols: Optional[int] = None,
show_dimensions: bool = False,
decimal: str = ".",
bold_rows: bool = True,
classes: Optional[Union[str, list, tuple]] = None,
escape: bool = True,
notebook: bool = False,
border: Optional[int] = None,
table_id: Optional[str] = None,
render_links: bool = False,
) -> Optional[str]:
"""
Render a DataFrame as an HTML table.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
columns : sequence, optional, default None
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool, optional
Write out the column names. If a list of strings is given, it
is assumed to be aliases for the column names
index : bool, optional, default True
Whether to print index (row) labels.
na_rep : str, optional, default 'NaN'
String representation of NAN to use.
formatters : list or dict of one-param. functions, optional
Formatter functions to apply to columns' elements by position or
name.
The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional, default None
Formatter function to apply to columns' elements if they are
floats. The result of this function must be a unicode string.
sparsify : bool, optional, default True
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row.
index_names : bool, optional, default True
Prints the names of the indexes.
justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box. Valid values are
* left
* right
* center
* justify
* justify-all
* start
* end
* inherit
* match-parent
* initial
* unset.
max_rows : int, optional
Maximum number of rows to display in the console.
max_cols : int, optional
Maximum number of columns to display in the console.
show_dimensions : bool, default False
Display DataFrame dimensions (number of rows by number of columns).
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
bold_rows : bool, default True
Make the row labels bold in the output.
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table.
escape : bool, default True
Convert the characters <, >, and & to HTML-safe sequences.
notebook : {True, False}, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.html.border``.
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
render_links : bool, default False
Convert URLs to HTML links (only works with pandas 0.24+).
Returns
-------
str (or unicode, depending on data and options)
String representation of the dataframe.
See Also
--------
to_string : Convert DataFrame to a string.
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
if max_rows is not None:
psdf = self.head(max_rows)
else:
psdf = self
return validate_arguments_and_invoke_function(
psdf._to_internal_pandas(), self.to_html, pd.DataFrame.to_html, args
)
def to_string(
self,
buf: Optional[IO[str]] = None,
columns: Optional[Sequence[Name]] = None,
col_space: Optional[Union[str, int, Dict[Name, Union[str, int]]]] = None,
header: bool = True,
index: bool = True,
na_rep: str = "NaN",
formatters: Optional[
Union[List[Callable[[Any], str]], Dict[Name, Callable[[Any], str]]]
] = None,
float_format: Optional[Callable[[float], str]] = None,
sparsify: Optional[bool] = None,
index_names: bool = True,
justify: Optional[str] = None,
max_rows: Optional[int] = None,
max_cols: Optional[int] = None,
show_dimensions: bool = False,
decimal: str = ".",
line_width: Optional[int] = None,
) -> Optional[str]:
"""
Render a DataFrame to a console-friendly tabular output.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
columns : sequence, optional, default None
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool, optional
Write out the column names. If a list of strings is given, it
is assumed to be aliases for the column names
index : bool, optional, default True
Whether to print index (row) labels.
na_rep : str, optional, default 'NaN'
String representation of NAN to use.
formatters : list or dict of one-param. functions, optional
Formatter functions to apply to columns' elements by position or
name.
The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional, default None
Formatter function to apply to columns' elements if they are
floats. The result of this function must be a unicode string.
sparsify : bool, optional, default True
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row.
index_names : bool, optional, default True
Prints the names of the indexes.
justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box. Valid values are
* left
* right
* center
* justify
* justify-all
* start
* end
* inherit
* match-parent
* initial
* unset.
max_rows : int, optional
Maximum number of rows to display in the console.
max_cols : int, optional
Maximum number of columns to display in the console.
show_dimensions : bool, default False
Display DataFrame dimensions (number of rows by number of columns).
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
line_width : int, optional
Width to wrap a line in characters.
Returns
-------
str (or unicode, depending on data and options)
String representation of the dataframe.
See Also
--------
to_html : Convert DataFrame to HTML.
Examples
--------
>>> df = ps.DataFrame({'col1': [1, 2, 3], 'col2': [4, 5, 6]}, columns=['col1', 'col2'])
>>> print(df.to_string())
col1 col2
0 1 4
1 2 5
2 3 6
>>> print(df.to_string(max_rows=2))
col1 col2
0 1 4
1 2 5
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
if max_rows is not None:
psdf = self.head(max_rows)
else:
psdf = self
return validate_arguments_and_invoke_function(
psdf._to_internal_pandas(), self.to_string, pd.DataFrame.to_string, args
)
def to_dict(self, orient: str = "dict", into: Type = dict) -> Union[List, Mapping]:
"""
Convert the DataFrame to a dictionary.
The type of the key-value pairs can be customized with the parameters
(see below).
.. note:: This method should only be used if the resulting pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
- 'list' : dict like {column -> [values]}
- 'series' : dict like {column -> Series(values)}
- 'split' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.abc.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
dict, list or collections.abc.Mapping
Return a collections.abc.Mapping object representing the DataFrame.
The resulting transformation depends on the `orient` parameter.
Examples
--------
>>> df = ps.DataFrame({'col1': [1, 2],
... 'col2': [0.5, 0.75]},
... index=['row1', 'row2'],
... columns=['col1', 'col2'])
>>> df
col1 col2
row1 1 0.50
row2 2 0.75
>>> df_dict = df.to_dict()
>>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])
[('col1', [('row1', 1), ('row2', 2)]), ('col2', [('row1', 0.5), ('row2', 0.75)])]
You can specify the return orientation.
>>> df_dict = df.to_dict('series')
>>> sorted(df_dict.items())
[('col1', row1 1
row2 2
Name: col1, dtype: int64), ('col2', row1 0.50
row2 0.75
Name: col2, dtype: float64)]
>>> df_dict = df.to_dict('split')
>>> sorted(df_dict.items()) # doctest: +ELLIPSIS
[('columns', ['col1', 'col2']), ('data', [[1..., 0.75]]), ('index', ['row1', 'row2'])]
>>> df_dict = df.to_dict('records')
>>> [sorted(values.items()) for values in df_dict] # doctest: +ELLIPSIS
[[('col1', 1...), ('col2', 0.5)], [('col1', 2...), ('col2', 0.75)]]
>>> df_dict = df.to_dict('index')
>>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])
[('row1', [('col1', 1), ('col2', 0.5)]), ('row2', [('col1', 2), ('col2', 0.75)])]
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), \
('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd) # doctest: +ELLIPSIS
[defaultdict(<class 'list'>, {'col..., 'col...}), \
defaultdict(<class 'list'>, {'col..., 'col...})]
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
psdf = self
return validate_arguments_and_invoke_function(
psdf._to_internal_pandas(), self.to_dict, pd.DataFrame.to_dict, args
)
def to_latex(
self,
buf: Optional[IO[str]] = None,
columns: Optional[List[Name]] = None,
col_space: Optional[int] = None,
header: bool = True,
index: bool = True,
na_rep: str = "NaN",
formatters: Optional[
Union[List[Callable[[Any], str]], Dict[Name, Callable[[Any], str]]]
] = None,
float_format: Optional[Callable[[float], str]] = None,
sparsify: Optional[bool] = None,
index_names: bool = True,
bold_rows: bool = False,
column_format: Optional[str] = None,
longtable: Optional[bool] = None,
escape: Optional[bool] = None,
encoding: Optional[str] = None,
decimal: str = ".",
multicolumn: Optional[bool] = None,
multicolumn_format: Optional[str] = None,
multirow: Optional[bool] = None,
) -> Optional[str]:
r"""
Render an object to a LaTeX tabular environment table.
Render an object to a tabular environment table. You can splice this into a LaTeX
document. Requires usepackage{booktabs}.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, consider alternative formats.
Parameters
----------
buf : file descriptor or None
Buffer to write to. If None, the output is returned as a string.
columns : list of label, optional
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool or list of str, default True
Write out the column names. If a list of strings is given, it is assumed to be aliases
for the column names.
index : bool, default True
Write row names (index).
na_rep : str, default ‘NaN’
Missing data representation.
formatters : list of functions or dict of {str: function}, optional
Formatter functions to apply to columns’ elements by position or name. The result of
each function must be a unicode string. List must be of length equal to the number of
columns.
float_format : str, optional
Format string for floating point numbers.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print every multiindex key at
each row. By default, the value will be read from the config module.
index_names : bool, default True
Prints the names of the indexes.
bold_rows : bool, default False
Make the row labels bold in the output.
column_format : str, optional
The columns format as specified in LaTeX table format e.g. ‘rcl’ for 3 columns. By
default, ‘l’ will be used for all columns except columns of numbers, which default
to ‘r’.
longtable : bool, optional
By default, the value will be read from the pandas config module. Use a longtable
environment instead of tabular. Requires adding a usepackage{longtable} to your LaTeX
preamble.
escape : bool, optional
By default, the value will be read from the pandas config module. When set to False
prevents from escaping latex special characters in column names.
encoding : str, optional
A string representing the encoding to use in the output file, defaults to ‘ascii’ on
Python 2 and ‘utf-8’ on Python 3.
decimal : str, default ‘.’
Character recognized as decimal separator, e.g. ‘,’ in Europe.
multicolumn : bool, default True
Use multicolumn to enhance MultiIndex columns. The default will be read from the config
module.
multicolumn_format : str, default ‘l’
The alignment for multicolumns, similar to column_format The default will be read from
the config module.
multirow : bool, default False
Use multirow to enhance MultiIndex rows. Requires adding a usepackage{multirow} to your
LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained
rows, separating groups via clines. The default will be read from the pandas config
module.
Returns
-------
str or None
If buf is None, returns the resulting LateX format as a string. Otherwise returns None.
See Also
--------
DataFrame.to_string : Render a DataFrame to a console-friendly
tabular output.
DataFrame.to_html : Render a DataFrame as an HTML table.
Examples
--------
>>> df = ps.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']},
... columns=['name', 'mask', 'weapon'])
>>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE
\begin{tabular}{lll}
\toprule
name & mask & weapon \\
\midrule
Raphael & red & sai \\
Donatello & purple & bo staff \\
\bottomrule
\end{tabular}
<BLANKLINE>
"""
args = locals()
psdf = self
return validate_arguments_and_invoke_function(
psdf._to_internal_pandas(), self.to_latex, pd.DataFrame.to_latex, args
)
# TODO: enable doctests once we drop Spark 2.3.x (due to type coercion logic
# when creating arrays)
def transpose(self) -> "DataFrame":
"""
Transpose index and columns.
Reflect the DataFrame over its main diagonal by writing rows as columns
and vice-versa. The property :attr:`.T` is an accessor to the method
:meth:`transpose`.
.. note:: This method is based on an expensive operation due to the nature
of big data. Internally it needs to generate each row for each value, and
then group twice - it is a huge operation. To prevent misusage, this method
has the 'compute.max_rows' default limit of input length, and raises a ValueError.
>>> from pyspark.pandas.config import option_context
>>> with option_context('compute.max_rows', 1000): # doctest: +NORMALIZE_WHITESPACE
... ps.DataFrame({'a': range(1001)}).transpose()
Traceback (most recent call last):
...
ValueError: Current DataFrame has more then the given limit 1000 rows.
Please set 'compute.max_rows' by using 'pyspark.pandas.config.set_option'
to retrieve to retrieve more than 1000 rows. Note that, before changing the
'compute.max_rows', this operation is considerably expensive.
Returns
-------
DataFrame
The transposed DataFrame.
Notes
-----
Transposing a DataFrame with mixed dtypes will result in a homogeneous
DataFrame with the coerced dtype. For instance, if int and float have
to be placed in same column, it becomes float. If type coercion is not
possible, it fails.
Also, note that the values in index should be unique because they become
unique column names.
In addition, if Spark 2.3 is used, the types should always be exactly same.
Examples
--------
**Square DataFrame with homogeneous dtype**
>>> d1 = {'col1': [1, 2], 'col2': [3, 4]}
>>> df1 = ps.DataFrame(data=d1, columns=['col1', 'col2'])
>>> df1
col1 col2
0 1 3
1 2 4
>>> df1_transposed = df1.T.sort_index() # doctest: +SKIP
>>> df1_transposed # doctest: +SKIP
0 1
col1 1 2
col2 3 4
When the dtype is homogeneous in the original DataFrame, we get a
transposed DataFrame with the same dtype:
>>> df1.dtypes
col1 int64
col2 int64
dtype: object
>>> df1_transposed.dtypes # doctest: +SKIP
0 int64
1 int64
dtype: object
**Non-square DataFrame with mixed dtypes**
>>> d2 = {'score': [9.5, 8],
... 'kids': [0, 0],
... 'age': [12, 22]}
>>> df2 = ps.DataFrame(data=d2, columns=['score', 'kids', 'age'])
>>> df2
score kids age
0 9.5 0 12
1 8.0 0 22
>>> df2_transposed = df2.T.sort_index() # doctest: +SKIP
>>> df2_transposed # doctest: +SKIP
0 1
age 12.0 22.0
kids 0.0 0.0
score 9.5 8.0
When the DataFrame has mixed dtypes, we get a transposed DataFrame with
the coerced dtype:
>>> df2.dtypes
score float64
kids int64
age int64
dtype: object
>>> df2_transposed.dtypes # doctest: +SKIP
0 float64
1 float64
dtype: object
"""
max_compute_count = get_option("compute.max_rows")
if max_compute_count is not None:
pdf = self.head(max_compute_count + 1)._to_internal_pandas()
if len(pdf) > max_compute_count:
raise ValueError(
"Current DataFrame has more then the given limit {0} rows. "
"Please set 'compute.max_rows' by using 'pyspark.pandas.config.set_option' "
"to retrieve to retrieve more than {0} rows. Note that, before changing the "
"'compute.max_rows', this operation is considerably expensive.".format(
max_compute_count
)
)
return DataFrame(pdf.transpose())
# Explode the data to be pairs.
#
# For instance, if the current input DataFrame is as below:
#
# +------+------+------+------+------+
# |index1|index2|(a,x1)|(a,x2)|(b,x3)|
# +------+------+------+------+------+
# | y1| z1| 1| 0| 0|
# | y2| z2| 0| 50| 0|
# | y3| z3| 3| 2| 1|
# +------+------+------+------+------+
#
# Output of `exploded_df` becomes as below:
#
# +-----------------+-----------------+-----------------+-----+
# | index|__index_level_0__|__index_level_1__|value|
# +-----------------+-----------------+-----------------+-----+
# |{"a":["y1","z1"]}| a| x1| 1|
# |{"a":["y1","z1"]}| a| x2| 0|
# |{"a":["y1","z1"]}| b| x3| 0|
# |{"a":["y2","z2"]}| a| x1| 0|
# |{"a":["y2","z2"]}| a| x2| 50|
# |{"a":["y2","z2"]}| b| x3| 0|
# |{"a":["y3","z3"]}| a| x1| 3|
# |{"a":["y3","z3"]}| a| x2| 2|
# |{"a":["y3","z3"]}| b| x3| 1|
# +-----------------+-----------------+-----------------+-----+
pairs = F.explode(
F.array(
*[
F.struct(
*[
SF.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i))
for i, col in enumerate(label)
],
*[self._internal.spark_column_for(label).alias("value")],
)
for label in self._internal.column_labels
]
)
)
exploded_df = self._internal.spark_frame.withColumn("pairs", pairs).select(
[
F.to_json(
F.struct(
F.array(*[scol for scol in self._internal.index_spark_columns]).alias("a")
)
).alias("index"),
F.col("pairs.*"),
]
)
# After that, executes pivot with key and its index column.
# Note that index column should contain unique values since column names
# should be unique.
internal_index_columns = [
SPARK_INDEX_NAME_FORMAT(i) for i in range(self._internal.column_labels_level)
]
pivoted_df = exploded_df.groupBy(internal_index_columns).pivot("index")
transposed_df = pivoted_df.agg(F.first(F.col("value")))
new_data_columns = list(
filter(lambda x: x not in internal_index_columns, transposed_df.columns)
)
column_labels = [
None if len(label) == 1 and label[0] is None else label
for label in (tuple(json.loads(col)["a"]) for col in new_data_columns)
]
internal = InternalFrame(
spark_frame=transposed_df,
index_spark_columns=[scol_for(transposed_df, col) for col in internal_index_columns],
index_names=self._internal.column_label_names,
column_labels=column_labels,
data_spark_columns=[scol_for(transposed_df, col) for col in new_data_columns],
column_label_names=self._internal.index_names,
)
return DataFrame(internal)
T = property(transpose)
def apply(
self, func: Callable, axis: Axis = 0, args: Sequence[Any] = (), **kwds: Any
) -> Union["Series", "DataFrame", "Index"]:
"""
Apply a function along an axis of the DataFrame.
Objects passed to the function are Series objects whose index is
either the DataFrame's index (``axis=0``) or the DataFrame's columns
(``axis=1``).
See also `Transform and apply a function
<https://koalas.readthedocs.io/en/latest/user_guide/transform_apply.html>`_.
.. note:: when `axis` is 0 or 'index', the `func` is unable to access
to the whole input series. pandas-on-Spark internally splits the input series into
multiple batches and calls `func` with each batch multiple times. Therefore, operations
such as global aggregations are impossible. See the example below.
>>> # This case does not return the length of whole series but of the batch internally
... # used.
... def length(s) -> int:
... return len(s)
...
>>> df = ps.DataFrame({'A': range(1000)})
>>> df.apply(length, axis=0) # doctest: +SKIP
0 83
1 83
2 83
...
10 83
11 83
dtype: int32
.. note:: this API executes the function once to infer the type which is
potentially expensive, for instance, when the dataset is created after
aggregations or sorting.
To avoid this, specify the return type as `Series` or scalar value in ``func``,
for instance, as below:
>>> def square(s) -> ps.Series[np.int32]:
... return s ** 2
pandas-on-Spark uses return type hint and does not try to infer the type.
In case when axis is 1, it requires to specify `DataFrame` or scalar value
with type hints as below:
>>> def plus_one(x) -> ps.DataFrame[int, [float, float]]:
... return x + 1
If the return type is specified as `DataFrame`, the output column names become
`c0, c1, c2 ... cn`. These names are positionally mapped to the returned
DataFrame in ``func``.
To specify the column names, you can assign them in a pandas friendly style as below:
>>> def plus_one(x) -> ps.DataFrame[("index", int), [("a", float), ("b", float)]]:
... return x + 1
>>> pdf = pd.DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
>>> def plus_one(x) -> ps.DataFrame[
... (pdf.index.name, pdf.index.dtype), zip(pdf.dtypes, pdf.columns)]:
... return x + 1
Parameters
----------
func : function
Function to apply to each column or row.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis along which the function is applied:
* 0 or 'index': apply function to each column.
* 1 or 'columns': apply function to each row.
args : tuple
Positional arguments to pass to `func` in addition to the
array/series.
**kwds
Additional keyword arguments to pass as keywords arguments to
`func`.
Returns
-------
Series or DataFrame
Result of applying ``func`` along the given axis of the
DataFrame.
See Also
--------
DataFrame.applymap : For elementwise operations.
DataFrame.aggregate : Only perform aggregating type operations.
DataFrame.transform : Only perform transforming type operations.
Series.apply : The equivalent function for Series.
Examples
--------
>>> df = ps.DataFrame([[4, 9]] * 3, columns=['A', 'B'])
>>> df
A B
0 4 9
1 4 9
2 4 9
Using a numpy universal function (in this case the same as
``np.sqrt(df)``):
>>> def sqrt(x) -> ps.Series[float]:
... return np.sqrt(x)
...
>>> df.apply(sqrt, axis=0)
A B
0 2.0 3.0
1 2.0 3.0
2 2.0 3.0
You can omit the type hint and let pandas-on-Spark infer its type.
>>> df.apply(np.sqrt, axis=0)
A B
0 2.0 3.0
1 2.0 3.0
2 2.0 3.0
When `axis` is 1 or 'columns', it applies the function for each row.
>>> def summation(x) -> np.int64:
... return np.sum(x)
...
>>> df.apply(summation, axis=1)
0 13
1 13
2 13
dtype: int64
Likewise, you can omit the type hint and let pandas-on-Spark infer its type.
>>> df.apply(np.sum, axis=1)
0 13
1 13
2 13
dtype: int64
>>> df.apply(max, axis=1)
0 9
1 9
2 9
dtype: int64
Returning a list-like will result in a Series
>>> df.apply(lambda x: [1, 2], axis=1)
0 [1, 2]
1 [1, 2]
2 [1, 2]
dtype: object
In order to specify the types when `axis` is '1', it should use DataFrame[...]
annotation. In this case, the column names are automatically generated.
>>> def identify(x) -> ps.DataFrame[('index', int), [('A', np.int64), ('B', np.int64)]]:
... return x
...
>>> df.apply(identify, axis=1) # doctest: +NORMALIZE_WHITESPACE
A B
index
0 4 9
1 4 9
2 4 9
You can also specify extra arguments.
>>> def plus_two(a, b, c) -> ps.DataFrame[np.int64, [np.int64, np.int64]]:
... return a + b + c
...
>>> df.apply(plus_two, axis=1, args=(1,), c=3)
c0 c1
0 8 13
1 8 13
2 8 13
"""
from pyspark.pandas.groupby import GroupBy
from pyspark.pandas.series import first_series
if not isinstance(func, types.FunctionType):
assert callable(func), "the first argument should be a callable function."
f = func
# Note that the return type hint specified here affects actual return
# type in Spark (e.g., infer_return_type). And, MyPy does not allow
# redefinition of a function.
func = lambda *args, **kwargs: f(*args, **kwargs) # noqa: E731
axis = validate_axis(axis)
should_return_series = False
spec = inspect.getfullargspec(func)
return_sig = spec.annotations.get("return", None)
should_infer_schema = return_sig is None
should_retain_index = should_infer_schema
def apply_func(pdf: pd.DataFrame) -> pd.DataFrame:
pdf_or_pser = pdf.apply(func, axis=axis, args=args, **kwds) # type: ignore[arg-type]
if isinstance(pdf_or_pser, pd.Series):
return pdf_or_pser.to_frame()
else:
return pdf_or_pser
self_applied: DataFrame = DataFrame(self._internal.resolved_copy)
column_labels: Optional[List[Label]] = None
if should_infer_schema:
# Here we execute with the first 1000 to get the return type.
# If the records were less than 1000, it uses pandas API directly for a shortcut.
log_advice(
"If the type hints is not specified for `apply`, "
"it is expensive to infer the data type internally."
)
limit = get_option("compute.shortcut_limit")
pdf = self_applied.head(limit + 1)._to_internal_pandas()
applied = pdf.apply(func, axis=axis, args=args, **kwds) # type: ignore[arg-type]
psser_or_psdf = ps.from_pandas(applied)
if len(pdf) <= limit:
return psser_or_psdf
psdf = psser_or_psdf
if isinstance(psser_or_psdf, ps.Series):
should_return_series = True
psdf = psser_or_psdf._psdf
index_fields = [field.normalize_spark_type() for field in psdf._internal.index_fields]
data_fields = [field.normalize_spark_type() for field in psdf._internal.data_fields]
return_schema = StructType([field.struct_field for field in index_fields + data_fields])
output_func = GroupBy._make_pandas_df_builder_func(
self_applied, apply_func, return_schema, retain_index=should_retain_index
)
sdf = self_applied._internal.to_internal_spark_frame.mapInPandas(
lambda iterator: map(output_func, iterator), schema=return_schema
)
# If schema is inferred, we can restore indexes too.
internal = psdf._internal.with_new_sdf(
spark_frame=sdf, index_fields=index_fields, data_fields=data_fields
)
else:
return_type = infer_return_type(func)
require_index_axis = isinstance(return_type, SeriesType)
require_column_axis = isinstance(return_type, DataFrameType)
index_fields = None
if require_index_axis:
if axis != 0:
raise TypeError(
"The given function should specify a scalar or a series as its type "
"hints when axis is 0 or 'index'; however, the return type "
"was %s" % return_sig
)
dtype = cast(SeriesType, return_type).dtype
spark_type = cast(SeriesType, return_type).spark_type
data_fields = [
InternalField(
dtype=dtype, struct_field=StructField(name=name, dataType=spark_type)
)
for name in self_applied.columns
]
return_schema = StructType([field.struct_field for field in data_fields])
elif require_column_axis:
if axis != 1:
raise TypeError(
"The given function should specify a scalar or a frame as its type "
"hints when axis is 1 or 'column'; however, the return type "
"was %s" % return_sig
)
index_fields = cast(DataFrameType, return_type).index_fields
should_retain_index = len(index_fields) > 0
data_fields = cast(DataFrameType, return_type).data_fields
return_schema = cast(DataFrameType, return_type).spark_type
else:
# any axis is fine.
should_return_series = True
spark_type = cast(ScalarType, return_type).spark_type
dtype = cast(ScalarType, return_type).dtype
data_fields = [
InternalField(
dtype=dtype,
struct_field=StructField(
name=SPARK_DEFAULT_SERIES_NAME, dataType=spark_type
),
)
]
return_schema = StructType([field.struct_field for field in data_fields])
column_labels = [None]
output_func = GroupBy._make_pandas_df_builder_func(
self_applied, apply_func, return_schema, retain_index=should_retain_index
)
sdf = self_applied._internal.to_internal_spark_frame.mapInPandas(
lambda iterator: map(output_func, iterator), schema=return_schema
)
index_spark_columns = None
index_names: Optional[List[Optional[Tuple[Any, ...]]]] = None
if should_retain_index:
index_spark_columns = [
scol_for(sdf, index_field.struct_field.name) for index_field in index_fields
]
if not any(
[
SPARK_INDEX_NAME_PATTERN.match(index_field.struct_field.name)
for index_field in index_fields
]
):
index_names = [(index_field.struct_field.name,) for index_field in index_fields]
internal = InternalFrame(
spark_frame=sdf,
index_names=index_names,
index_spark_columns=index_spark_columns,
index_fields=index_fields,
data_fields=data_fields,
column_labels=column_labels,
)
result: DataFrame = DataFrame(internal)
if should_return_series:
return first_series(result)
else:
return result
def transform(
self, func: Callable[..., "Series"], axis: Axis = 0, *args: Any, **kwargs: Any
) -> "DataFrame":
"""
Call ``func`` on self producing a Series with transformed values
and that has the same length as its input.
See also `Transform and apply a function
<https://koalas.readthedocs.io/en/latest/user_guide/transform_apply.html>`_.
.. note:: this API executes the function once to infer the type which is
potentially expensive, for instance, when the dataset is created after
aggregations or sorting.
To avoid this, specify return type in ``func``, for instance, as below:
>>> def square(x) -> ps.Series[np.int32]:
... return x ** 2
pandas-on-Spark uses return type hint and does not try to infer the type.
.. note:: the series within ``func`` is actually multiple pandas series as the
segments of the whole pandas-on-Spark series; therefore, the length of each series
is not guaranteed. As an example, an aggregation against each series
does work as a global aggregation but an aggregation of each segment. See
below:
>>> def func(x) -> ps.Series[np.int32]:
... return x + sum(x)
Parameters
----------
func : function
Function to use for transforming the data. It must work when pandas Series
is passed.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
*args
Positional arguments to pass to func.
**kwargs
Keyword arguments to pass to func.
Returns
-------
DataFrame
A DataFrame that must have the same length as self.
Raises
------
Exception : If the returned DataFrame has a different length than self.
See Also
--------
DataFrame.aggregate : Only perform aggregating type operations.
DataFrame.apply : Invoke function on DataFrame.
Series.transform : The equivalent function for Series.
Examples
--------
>>> df = ps.DataFrame({'A': range(3), 'B': range(1, 4)}, columns=['A', 'B'])
>>> df
A B
0 0 1
1 1 2
2 2 3
>>> def square(x) -> ps.Series[np.int32]:
... return x ** 2
>>> df.transform(square)
A B
0 0 1
1 1 4
2 4 9
You can omit the type hint and let pandas-on-Spark infer its type.
>>> df.transform(lambda x: x ** 2)
A B
0 0 1
1 1 4
2 4 9
For multi-index columns:
>>> df.columns = [('X', 'A'), ('X', 'B')]
>>> df.transform(square) # doctest: +NORMALIZE_WHITESPACE
X
A B
0 0 1
1 1 4
2 4 9
>>> (df * -1).transform(abs) # doctest: +NORMALIZE_WHITESPACE
X
A B
0 0 1
1 1 2
2 2 3
You can also specify extra arguments.
>>> def calculation(x, y, z) -> ps.Series[int]:
... return x ** y + z
>>> df.transform(calculation, y=10, z=20) # doctest: +NORMALIZE_WHITESPACE
X
A B
0 20 21
1 21 1044
2 1044 59069
"""
if not isinstance(func, types.FunctionType):
assert callable(func), "the first argument should be a callable function."
f = func
# Note that the return type hint specified here affects actual return
# type in Spark (e.g., infer_return_type). And, MyPy does not allow
# redefinition of a function.
func = lambda *args, **kwargs: f(*args, **kwargs) # noqa: E731
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
spec = inspect.getfullargspec(func)
return_sig = spec.annotations.get("return", None)
should_infer_schema = return_sig is None
if should_infer_schema:
# Here we execute with the first 1000 to get the return type.
# If the records were less than 1000, it uses pandas API directly for a shortcut.
log_advice(
"If the type hints is not specified for `transform`, "
"it is expensive to infer the data type internally."
)
limit = get_option("compute.shortcut_limit")
pdf = self.head(limit + 1)._to_internal_pandas()
transformed = pdf.transform(func, axis, *args, **kwargs) # type: ignore[arg-type]
psdf: DataFrame = DataFrame(transformed)
if len(pdf) <= limit:
return psdf
applied = []
data_fields = []
for input_label, output_label in zip(
self._internal.column_labels, psdf._internal.column_labels
):
psser = self._psser_for(input_label)
field = psdf._internal.field_for(output_label).normalize_spark_type()
data_fields.append(field)
return_schema = field.spark_type
applied.append(
psser.pandas_on_spark._transform_batch(
func=lambda c: func(c, *args, **kwargs),
return_type=SeriesType(field.dtype, return_schema),
)
)
internal = self._internal.with_new_columns(applied, data_fields=data_fields)
return DataFrame(internal)
else:
return self._apply_series_op(
lambda psser: psser.pandas_on_spark.transform_batch(func, *args, **kwargs)
)
def pop(self, item: Name) -> "DataFrame":
"""
Return item and drop from frame. Raise KeyError if not found.
Parameters
----------
item : str
Label of column to be popped.
Returns
-------
Series
Examples
--------
>>> df = ps.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey','mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> df
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('class')
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
Also support for MultiIndex
>>> df = ps.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey','mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> columns = [('a', 'name'), ('a', 'class'), ('b', 'max_speed')]
>>> df.columns = pd.MultiIndex.from_tuples(columns)
>>> df
a b
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('a')
name class
0 falcon bird
1 parrot bird
2 lion mammal
3 monkey mammal
>>> df
b
max_speed
0 389.0
1 24.0
2 80.5
3 NaN
"""
result = self[item]
self._update_internal_frame(self.drop(columns=item)._internal)
return result
# TODO: add axis parameter can work when '1' or 'columns'
def xs(self, key: Name, axis: Axis = 0, level: Optional[int] = None) -> DataFrameOrSeries:
"""
Return cross-section from the DataFrame.
This method takes a `key` argument to select data at a particular
level of a MultiIndex.
Parameters
----------
key : label or tuple of label
Label contained in the index, or partially in a MultiIndex.
axis : 0 or 'index', default 0
Axis to retrieve cross-section on.
currently only support 0 or 'index'
level : object, defaults to first n levels (n=1 or len(key))
In case of a key partially contained in a MultiIndex, indicate
which levels are used. Levels can be referred by label or position.
Returns
-------
DataFrame or Series
Cross-section from the original DataFrame
corresponding to the selected index levels.
See Also
--------
DataFrame.loc : Access a group of rows and columns
by label(s) or a boolean array.
DataFrame.iloc : Purely integer-location based indexing
for selection by position.
Examples
--------
>>> d = {'num_legs': [4, 4, 2, 2],
... 'num_wings': [0, 0, 2, 2],
... 'class': ['mammal', 'mammal', 'mammal', 'bird'],
... 'animal': ['cat', 'dog', 'bat', 'penguin'],
... 'locomotion': ['walks', 'walks', 'flies', 'walks']}
>>> df = ps.DataFrame(data=d)
>>> df = df.set_index(['class', 'animal', 'locomotion'])
>>> df # doctest: +NORMALIZE_WHITESPACE
num_legs num_wings
class animal locomotion
mammal cat walks 4 0
dog walks 4 0
bat flies 2 2
bird penguin walks 2 2
Get values at specified index
>>> df.xs('mammal') # doctest: +NORMALIZE_WHITESPACE
num_legs num_wings
animal locomotion
cat walks 4 0
dog walks 4 0
bat flies 2 2
Get values at several indexes
>>> df.xs(('mammal', 'dog')) # doctest: +NORMALIZE_WHITESPACE
num_legs num_wings
locomotion
walks 4 0
>>> df.xs(('mammal', 'dog', 'walks')) # doctest: +NORMALIZE_WHITESPACE
num_legs 4
num_wings 0
Name: (mammal, dog, walks), dtype: int64
Get values at specified index and level
>>> df.xs('cat', level=1) # doctest: +NORMALIZE_WHITESPACE
num_legs num_wings
class locomotion
mammal walks 4 0
"""
from pyspark.pandas.series import first_series
if not is_name_like_value(key):
raise TypeError("'key' should be a scalar value or tuple that contains scalar values")
if level is not None and is_name_like_tuple(key):
raise KeyError(key)
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
if not is_name_like_tuple(key):
key = (key,)
if len(key) > self._internal.index_level:
raise KeyError(
"Key length ({}) exceeds index depth ({})".format(
len(key), self._internal.index_level
)
)
if level is None:
level = 0
rows = [
self._internal.index_spark_columns[lvl] == index for lvl, index in enumerate(key, level)
]
internal = self._internal.with_filter(reduce(lambda x, y: x & y, rows))
if len(key) == self._internal.index_level:
psdf: DataFrame = DataFrame(internal)
pdf = psdf.head(2)._to_internal_pandas()
if len(pdf) == 0:
raise KeyError(key)
elif len(pdf) > 1:
return psdf
else:
return first_series(DataFrame(pdf.transpose()))
else:
index_spark_columns = (
internal.index_spark_columns[:level]
+ internal.index_spark_columns[level + len(key) :]
)
index_names = internal.index_names[:level] + internal.index_names[level + len(key) :]
index_fields = internal.index_fields[:level] + internal.index_fields[level + len(key) :]
internal = internal.copy(
index_spark_columns=index_spark_columns,
index_names=index_names,
index_fields=index_fields,
).resolved_copy
return DataFrame(internal)
def between_time(
self,
start_time: Union[datetime.time, str],
end_time: Union[datetime.time, str],
include_start: bool = True,
include_end: bool = True,
axis: Axis = 0,
) -> "DataFrame":
"""
Select values between particular times of the day (example: 9:00-9:30 AM).
By setting ``start_time`` to be later than ``end_time``,
you can get the times that are *not* between the two times.
Parameters
----------
start_time : datetime.time or str
Initial time as a time filter limit.
end_time : datetime.time or str
End time as a time filter limit.
include_start : bool, default True
Whether the start time needs to be included in the result.
include_end : bool, default True
Whether the end time needs to be included in the result.
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine range time on index or columns value.
Returns
-------
DataFrame
Data from the original object filtered to the specified dates range.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
at_time : Select values at a particular time of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_between_time : Get just the index locations for
values between particular times of the day.
Examples
--------
>>> idx = pd.date_range('2018-04-09', periods=4, freq='1D20min')
>>> psdf = ps.DataFrame({'A': [1, 2, 3, 4]}, index=idx)
>>> psdf
A
2018-04-09 00:00:00 1
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
2018-04-12 01:00:00 4
>>> psdf.between_time('0:15', '0:45')
A
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
You get the times that are *not* between two times by setting
``start_time`` later than ``end_time``:
>>> psdf.between_time('0:45', '0:15')
A
2018-04-09 00:00:00 1
2018-04-12 01:00:00 4
"""
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError("between_time currently only works for axis=0")
if not isinstance(self.index, ps.DatetimeIndex):
raise TypeError("Index must be DatetimeIndex")
psdf = self.copy()
psdf.index.name = verify_temp_column_name(psdf, "__index_name__")
return_types = [psdf.index.dtype] + list(psdf.dtypes)
def pandas_between_time( # type: ignore[no-untyped-def]
pdf,
) -> ps.DataFrame[return_types]: # type: ignore[valid-type]
return pdf.between_time(start_time, end_time, include_start, include_end).reset_index()
# apply_batch will remove the index of the pandas-on-Spark DataFrame and attach a
# default index, which will never be used. So use "distributed" index as a dummy to
# avoid overhead.
with option_context("compute.default_index_type", "distributed"):
psdf = psdf.pandas_on_spark.apply_batch(pandas_between_time)
return DataFrame(
self._internal.copy(
spark_frame=psdf._internal.spark_frame,
index_spark_columns=psdf._internal.data_spark_columns[:1],
index_fields=psdf._internal.data_fields[:1],
data_spark_columns=psdf._internal.data_spark_columns[1:],
data_fields=psdf._internal.data_fields[1:],
)
)
# TODO: implement axis=1
def at_time(
self, time: Union[datetime.time, str], asof: bool = False, axis: Axis = 0
) -> "DataFrame":
"""
Select values at particular time of day (example: 9:30AM).
Parameters
----------
time : datetime.time or str
axis : {0 or 'index', 1 or 'columns'}, default 0
Returns
-------
DataFrame
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
between_time : Select values between particular times of the day.
DatetimeIndex.indexer_at_time : Get just the index locations for
values at particular time of the day.
Examples
--------
>>> idx = pd.date_range('2018-04-09', periods=4, freq='12H')
>>> psdf = ps.DataFrame({'A': [1, 2, 3, 4]}, index=idx)
>>> psdf
A
2018-04-09 00:00:00 1
2018-04-09 12:00:00 2
2018-04-10 00:00:00 3
2018-04-10 12:00:00 4
>>> psdf.at_time('12:00')
A
2018-04-09 12:00:00 2
2018-04-10 12:00:00 4
"""
if asof:
raise NotImplementedError("'asof' argument is not supported")
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError("at_time currently only works for axis=0")
if not isinstance(self.index, ps.DatetimeIndex):
raise TypeError("Index must be DatetimeIndex")
psdf = self.copy()
psdf.index.name = verify_temp_column_name(psdf, "__index_name__")
return_types = [psdf.index.dtype] + list(psdf.dtypes)
def pandas_at_time( # type: ignore[no-untyped-def]
pdf,
) -> ps.DataFrame[return_types]: # type: ignore[valid-type]
return pdf.at_time(time, asof, axis).reset_index()
# apply_batch will remove the index of the pandas-on-Spark DataFrame and attach
# a default index, which will never be used. So use "distributed" index as a dummy
# to avoid overhead.
with option_context("compute.default_index_type", "distributed"):
psdf = psdf.pandas_on_spark.apply_batch(pandas_at_time)
return DataFrame(
self._internal.copy(
spark_frame=psdf._internal.spark_frame,
index_spark_columns=psdf._internal.data_spark_columns[:1],
index_fields=psdf._internal.data_fields[:1],
data_spark_columns=psdf._internal.data_spark_columns[1:],
data_fields=psdf._internal.data_fields[1:],
)
)
def where(
self,
cond: DataFrameOrSeries,
other: Union[DataFrameOrSeries, Any] = np.nan,
axis: Axis = None,
) -> "DataFrame":
"""
Replace values where the condition is False.
Parameters
----------
cond : boolean DataFrame
Where cond is True, keep the original value. Where False,
replace with corresponding value from other.
other : scalar, DataFrame
Entries where cond is False are replaced with corresponding value from other.
axis : int, default None
Can only be set to 0 at the moment for compatibility with pandas.
Returns
-------
DataFrame
Examples
--------
>>> from pyspark.pandas.config import set_option, reset_option
>>> set_option("compute.ops_on_diff_frames", True)
>>> df1 = ps.DataFrame({'A': [0, 1, 2, 3, 4], 'B':[100, 200, 300, 400, 500]})
>>> df2 = ps.DataFrame({'A': [0, -1, -2, -3, -4], 'B':[-100, -200, -300, -400, -500]})
>>> df1
A B
0 0 100
1 1 200
2 2 300
3 3 400
4 4 500
>>> df2
A B
0 0 -100
1 -1 -200
2 -2 -300
3 -3 -400
4 -4 -500
>>> df1.where(df1 > 0).sort_index()
A B
0 NaN 100.0
1 1.0 200.0
2 2.0 300.0
3 3.0 400.0
4 4.0 500.0
>>> df1.where(df1 > 1, 10).sort_index()
A B
0 10 100
1 10 200
2 2 300
3 3 400
4 4 500
>>> df1.where(df1 > 1, df1 + 100).sort_index()
A B
0 100 100
1 101 200
2 2 300
3 3 400
4 4 500
>>> df1.where(df1 > 1, df2).sort_index()
A B
0 0 100
1 -1 200
2 2 300
3 3 400
4 4 500
When the column name of cond is different from self, it treats all values are False
>>> cond = ps.DataFrame({'C': [0, -1, -2, -3, -4], 'D':[4, 3, 2, 1, 0]}) % 3 == 0
>>> cond
C D
0 True False
1 False True
2 False False
3 True False
4 False True
>>> df1.where(cond).sort_index()
A B
0 NaN NaN
1 NaN NaN
2 NaN NaN
3 NaN NaN
4 NaN NaN
When the type of cond is Series, it just check boolean regardless of column name
>>> cond = ps.Series([1, 2]) > 1
>>> cond
0 False
1 True
dtype: bool
>>> df1.where(cond).sort_index()
A B
0 NaN NaN
1 1.0 200.0
2 NaN NaN
3 NaN NaN
4 NaN NaN
>>> reset_option("compute.ops_on_diff_frames")
"""
from pyspark.pandas.series import Series
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
tmp_cond_col_name = "__tmp_cond_col_{}__".format
tmp_other_col_name = "__tmp_other_col_{}__".format
psdf = self.copy()
tmp_cond_col_names = [
tmp_cond_col_name(name_like_string(label)) for label in self._internal.column_labels
]
if isinstance(cond, DataFrame):
cond = cond[
[
(
cond._internal.spark_column_for(label)
if label in cond._internal.column_labels
else SF.lit(False)
).alias(name)
for label, name in zip(self._internal.column_labels, tmp_cond_col_names)
]
]
psdf[tmp_cond_col_names] = cond
elif isinstance(cond, Series):
cond = cond.to_frame()
cond = cond[
[cond._internal.data_spark_columns[0].alias(name) for name in tmp_cond_col_names]
]
psdf[tmp_cond_col_names] = cond
else:
raise TypeError("type of cond must be a DataFrame or Series")
tmp_other_col_names = [
tmp_other_col_name(name_like_string(label)) for label in self._internal.column_labels
]
if isinstance(other, DataFrame):
other = other[
[
(
other._internal.spark_column_for(label)
if label in other._internal.column_labels
else SF.lit(np.nan)
).alias(name)
for label, name in zip(self._internal.column_labels, tmp_other_col_names)
]
]
psdf[tmp_other_col_names] = other
elif isinstance(other, Series):
other = other.to_frame()
other = other[
[other._internal.data_spark_columns[0].alias(name) for name in tmp_other_col_names]
]
psdf[tmp_other_col_names] = other
else:
for label in self._internal.column_labels:
psdf[tmp_other_col_name(name_like_string(label))] = other
# above logic make spark dataframe looks like below:
# +-----------------+---+---+------------------+-------------------+------------------+--...
# |__index_level_0__| A| B|__tmp_cond_col_A__|__tmp_other_col_A__|__tmp_cond_col_B__|__...
# +-----------------+---+---+------------------+-------------------+------------------+--...
# | 0| 0|100| true| 0| false| ...
# | 1| 1|200| false| -1| false| ...
# | 3| 3|400| true| -3| false| ...
# | 2| 2|300| false| -2| true| ...
# | 4| 4|500| false| -4| false| ...
# +-----------------+---+---+------------------+-------------------+------------------+--...
data_spark_columns = []
for label in self._internal.column_labels:
data_spark_columns.append(
F.when(
psdf[tmp_cond_col_name(name_like_string(label))].spark.column,
psdf._internal.spark_column_for(label),
)
.otherwise(psdf[tmp_other_col_name(name_like_string(label))].spark.column)
.alias(psdf._internal.spark_column_name_for(label))
)
return DataFrame(
psdf._internal.with_new_columns(
data_spark_columns, column_labels=self._internal.column_labels # TODO: dtypes?
)
)
def mask(
self, cond: DataFrameOrSeries, other: Union[DataFrameOrSeries, Any] = np.nan
) -> "DataFrame":
"""
Replace values where the condition is True.
Parameters
----------
cond : boolean DataFrame
Where cond is False, keep the original value. Where True,
replace with corresponding value from other.
other : scalar, DataFrame
Entries where cond is True are replaced with corresponding value from other.
Returns
-------
DataFrame
Examples
--------
>>> from pyspark.pandas.config import set_option, reset_option
>>> set_option("compute.ops_on_diff_frames", True)
>>> df1 = ps.DataFrame({'A': [0, 1, 2, 3, 4], 'B':[100, 200, 300, 400, 500]})
>>> df2 = ps.DataFrame({'A': [0, -1, -2, -3, -4], 'B':[-100, -200, -300, -400, -500]})
>>> df1
A B
0 0 100
1 1 200
2 2 300
3 3 400
4 4 500
>>> df2
A B
0 0 -100
1 -1 -200
2 -2 -300
3 -3 -400
4 -4 -500
>>> df1.mask(df1 > 0).sort_index()
A B
0 0.0 NaN
1 NaN NaN
2 NaN NaN
3 NaN NaN
4 NaN NaN
>>> df1.mask(df1 > 1, 10).sort_index()
A B
0 0 10
1 1 10
2 10 10
3 10 10
4 10 10
>>> df1.mask(df1 > 1, df1 + 100).sort_index()
A B
0 0 200
1 1 300
2 102 400
3 103 500
4 104 600
>>> df1.mask(df1 > 1, df2).sort_index()
A B
0 0 -100
1 1 -200
2 -2 -300
3 -3 -400
4 -4 -500
>>> reset_option("compute.ops_on_diff_frames")
"""
from pyspark.pandas.series import Series
if not isinstance(cond, (DataFrame, Series)):
raise TypeError("type of cond must be a DataFrame or Series")
cond_inversed = cond._apply_series_op(lambda psser: ~psser)
return self.where(cond_inversed, other)
@property
def index(self) -> "Index":
"""The index (row labels) Column of the DataFrame.
Currently not supported when the DataFrame has no index.
See Also
--------
Index
"""
from pyspark.pandas.indexes.base import Index
return Index._new_instance(self)
@property
def empty(self) -> bool:
"""
Returns true if the current DataFrame is empty. Otherwise, returns false.
Examples
--------
>>> ps.range(10).empty
False
>>> ps.range(0).empty
True
>>> ps.DataFrame({}, index=list('abc')).empty
True
"""
return (
len(self._internal.column_labels) == 0
or self._internal.resolved_copy.spark_frame.rdd.isEmpty()
)
@property
def style(self) -> "Styler":
"""
Property returning a Styler object containing methods for
building a styled HTML representation for the DataFrame.
.. note:: currently it collects top 1000 rows and return its
pandas `pandas.io.formats.style.Styler` instance.
Examples
--------
>>> ps.range(1001).style # doctest: +SKIP
<pandas.io.formats.style.Styler object at ...>
"""
max_results = get_option("compute.max_rows")
pdf = self.head(max_results + 1)._to_internal_pandas()
if len(pdf) > max_results:
warnings.warn("'style' property will only use top %s rows." % max_results, UserWarning)
return pdf.head(max_results).style
def set_index(
self,
keys: Union[Name, List[Name]],
drop: bool = True,
append: bool = False,
inplace: bool = False,
) -> Optional["DataFrame"]:
"""Set the DataFrame index (row labels) using one or more existing columns.
Set the DataFrame index (row labels) using one or more existing
columns or arrays (of the correct length). The index can replace the
existing index or expand on it.
Parameters
----------
keys : label or array-like or list of labels/arrays
This parameter can be either a single column key, a single array of
the same length as the calling DataFrame, or a list containing an
arbitrary combination of column keys and arrays. Here, "array"
encompasses :class:`Series`, :class:`Index` and ``np.ndarray``.
drop : bool, default True
Delete columns to be used as the new index.
append : bool, default False
Whether to append columns to existing index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
Returns
-------
DataFrame
Changed row labels.
See Also
--------
DataFrame.reset_index : Opposite of set_index.
Examples
--------
>>> df = ps.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale': [55, 40, 84, 31]},
... columns=['month', 'year', 'sale'])
>>> df
month year sale
0 1 2012 55
1 4 2014 40
2 7 2013 84
3 10 2014 31
Set the index to become the 'month' column:
>>> df.set_index('month') # doctest: +NORMALIZE_WHITESPACE
year sale
month
1 2012 55
4 2014 40
7 2013 84
10 2014 31
Create a MultiIndex using columns 'year' and 'month':
>>> df.set_index(['year', 'month']) # doctest: +NORMALIZE_WHITESPACE
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
"""
inplace = validate_bool_kwarg(inplace, "inplace")
key_list: List[Label]
if is_name_like_tuple(keys):
key_list = [cast(Label, keys)]
elif is_name_like_value(keys):
key_list = [(keys,)]
else:
key_list = [key if is_name_like_tuple(key) else (key,) for key in keys]
columns = set(self._internal.column_labels)
for key in key_list:
if key not in columns:
raise KeyError(name_like_string(key))
if drop:
column_labels = [
label for label in self._internal.column_labels if label not in key_list
]
else:
column_labels = self._internal.column_labels
if append:
index_spark_columns = self._internal.index_spark_columns + [
self._internal.spark_column_for(label) for label in key_list
]
index_names = self._internal.index_names + key_list
index_fields = self._internal.index_fields + [
self._internal.field_for(label) for label in key_list
]
else:
index_spark_columns = [self._internal.spark_column_for(label) for label in key_list]
index_names = key_list
index_fields = [self._internal.field_for(label) for label in key_list]
internal = self._internal.copy(
index_spark_columns=index_spark_columns,
index_names=index_names,
index_fields=index_fields,
column_labels=column_labels,
data_spark_columns=[self._internal.spark_column_for(label) for label in column_labels],
data_fields=[self._internal.field_for(label) for label in column_labels],
)
if inplace:
self._update_internal_frame(internal)
return None
else:
return DataFrame(internal)
def reset_index(
self,
level: Optional[Union[int, Name, Sequence[Union[int, Name]]]] = None,
drop: bool = False,
inplace: bool = False,
col_level: int = 0,
col_fill: str = "",
) -> Optional["DataFrame"]:
"""Reset the index, or a level of it.
For DataFrame with multi-level index, return new DataFrame with labeling information in
the columns under the index names, defaulting to 'level_0', 'level_1', etc. if any are None.
For a standard index, the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default.
drop : bool, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
DataFrame
DataFrame with the new index.
See Also
--------
DataFrame.set_index : Opposite of reset_index.
Examples
--------
>>> df = ps.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column. Unlike pandas, pandas-on-Spark
does not automatically add a sequential index. The following 0, 1, 2, 3 are only
there when we display the DataFrame.
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
You can also use `reset_index` with `MultiIndex`.
>>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
... ('bird', 'parrot'),
... ('mammal', 'lion'),
... ('mammal', 'monkey')],
... names=['class', 'name'])
>>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),
... ('species', 'type')])
>>> df = ps.DataFrame([(389.0, 'fly'),
... ( 24.0, 'fly'),
... ( 80.5, 'run'),
... (np.nan, 'jump')],
... index=index,
... columns=columns)
>>> df # doctest: +NORMALIZE_WHITESPACE
speed species
max type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey NaN jump
If the index has multiple levels, we can reset a subset of them:
>>> df.reset_index(level='class') # doctest: +NORMALIZE_WHITESPACE
class speed species
max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we are not dropping the index, by default, it is placed in the top
level. We can place it in another level:
>>> df.reset_index(level='class', col_level=1) # doctest: +NORMALIZE_WHITESPACE
speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
When the index is inserted under another level, we can specify under
which one with the parameter `col_fill`:
>>> df.reset_index(level='class', col_level=1,
... col_fill='species') # doctest: +NORMALIZE_WHITESPACE
species speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we specify a nonexistent level for `col_fill`, it is created:
>>> df.reset_index(level='class', col_level=1,
... col_fill='genus') # doctest: +NORMALIZE_WHITESPACE
genus speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
"""
inplace = validate_bool_kwarg(inplace, "inplace")
multi_index = self._internal.index_level > 1
def rename(index: int) -> Label:
if multi_index:
return ("level_{}".format(index),)
else:
if ("index",) not in self._internal.column_labels:
return ("index",)
else:
return ("level_{}".format(index),)
if level is None:
new_column_labels = [
name if name is not None else rename(i)
for i, name in enumerate(self._internal.index_names)
]
new_data_spark_columns = [
scol.alias(name_like_string(label))
for scol, label in zip(self._internal.index_spark_columns, new_column_labels)
]
new_data_fields = self._internal.index_fields
index_spark_columns = []
index_names = []
index_fields = []
else:
if is_list_like(level):
level = list(cast(Sequence[Union[int, Name]], level))
if isinstance(level, int) or is_name_like_tuple(level):
level_list = [cast(Union[int, Label], level)]
elif is_name_like_value(level):
level_list = [(level,)]
else:
level_list = [
lvl if isinstance(lvl, int) or is_name_like_tuple(lvl) else (lvl,)
for lvl in level
]
if all(isinstance(lvl, int) for lvl in level_list):
int_level_list = cast(List[int], level_list)
for lev in int_level_list:
if lev >= self._internal.index_level:
raise IndexError(
"Too many levels: Index has only {} level, not {}".format(
self._internal.index_level, lev + 1
)
)
idx = int_level_list
elif all(is_name_like_tuple(lev) for lev in level_list):
idx = []
for label in cast(List[Label], level_list):
try:
i = self._internal.index_names.index(label)
idx.append(i)
except ValueError:
if multi_index:
raise KeyError("Level unknown not found")
else:
raise KeyError(
"Level unknown must be same as name ({})".format(
name_like_string(self._internal.index_names[0])
)
)
else:
raise ValueError("Level should be all int or all string.")
idx.sort()
new_column_labels = []
new_data_spark_columns = []
new_data_fields = []
index_spark_columns = self._internal.index_spark_columns.copy()
index_names = self._internal.index_names.copy()
index_fields = self._internal.index_fields.copy()
for i in idx[::-1]:
name = index_names.pop(i)
new_column_labels.insert(0, name if name is not None else rename(i))
scol = index_spark_columns.pop(i)
new_data_spark_columns.insert(0, scol.alias(name_like_string(name)))
new_data_fields.insert(0, index_fields.pop(i).copy(name=name_like_string(name)))
if drop:
new_data_spark_columns = []
new_column_labels = []
new_data_fields = []
for label in new_column_labels:
if label in self._internal.column_labels:
raise ValueError("cannot insert {}, already exists".format(name_like_string(label)))
if self._internal.column_labels_level > 1:
column_depth = len(self._internal.column_labels[0])
if col_level >= column_depth:
raise IndexError(
"Too many levels: Index has only {} levels, not {}".format(
column_depth, col_level + 1
)
)
if any(col_level + len(label) > column_depth for label in new_column_labels):
raise ValueError("Item must have length equal to number of levels.")
new_column_labels = [
tuple(
([col_fill] * col_level)
+ list(label)
+ ([col_fill] * (column_depth - (len(label) + col_level)))
)
for label in new_column_labels
]
internal = self._internal.copy(
index_spark_columns=index_spark_columns,
index_names=index_names,
index_fields=index_fields,
column_labels=new_column_labels + self._internal.column_labels,
data_spark_columns=new_data_spark_columns + self._internal.data_spark_columns,
data_fields=new_data_fields + self._internal.data_fields,
)
if inplace:
self._update_internal_frame(internal)
return None
else:
return DataFrame(internal)
def isnull(self) -> "DataFrame":
"""
Detects missing values for items in the current Dataframe.
Return a boolean same-sized Dataframe indicating if the values are NA.
NA values, such as None or numpy.NaN, gets mapped to True values.
Everything else gets mapped to False values.
See Also
--------
DataFrame.notnull
Examples
--------
>>> df = ps.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])
>>> df.isnull()
0 1
0 False False
1 False True
2 False True
3 False False
>>> df = ps.DataFrame([[None, 'bee', None], ['dog', None, 'fly']])
>>> df.isnull()
0 1 2
0 True False True
1 False True False
"""
return self._apply_series_op(lambda psser: psser.isnull())
isna = isnull
def notnull(self) -> "DataFrame":
"""
Detects non-missing values for items in the current Dataframe.
This function takes a dataframe and indicates whether it's
values are valid (not missing, which is ``NaN`` in numeric
datatypes, ``None`` or ``NaN`` in objects and ``NaT`` in datetimelike).
See Also
--------
DataFrame.isnull
Examples
--------
>>> df = ps.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])
>>> df.notnull()
0 1
0 True True
1 True False
2 True False
3 True True
>>> df = ps.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
>>> df.notnull()
0 1 2
0 True True True
1 True False True
"""
return self._apply_series_op(lambda psser: psser.notnull())
notna = notnull
def insert(
self,
loc: int,
column: Name,
value: Union[Scalar, "Series", Iterable],
allow_duplicates: bool = False,
) -> None:
"""
Insert column into DataFrame at specified location.
Raises a ValueError if `column` is already contained in the DataFrame,
unless `allow_duplicates` is set to True.
Parameters
----------
loc : int
Insertion index. Must verify 0 <= loc <= len(columns).
column : str, number, or hashable object
Label of the inserted column.
value : int, Series, or array-like
allow_duplicates : bool, optional
Examples
--------
>>> psdf = ps.DataFrame([1, 2, 3])
>>> psdf.sort_index()
0
0 1
1 2
2 3
>>> psdf.insert(0, 'x', 4)
>>> psdf.sort_index()
x 0
0 4 1
1 4 2
2 4 3
>>> from pyspark.pandas.config import set_option, reset_option
>>> set_option("compute.ops_on_diff_frames", True)
>>> psdf.insert(1, 'y', [5, 6, 7])
>>> psdf.sort_index()
x y 0
0 4 5 1
1 4 6 2
2 4 7 3
>>> psdf.insert(2, 'z', ps.Series([8, 9, 10]))
>>> psdf.sort_index()
x y z 0
0 4 5 8 1
1 4 6 9 2
2 4 7 10 3
>>> reset_option("compute.ops_on_diff_frames")
"""
if not isinstance(loc, int):
raise TypeError("loc must be int")
assert 0 <= loc <= len(self.columns)
assert allow_duplicates is False
if not is_name_like_value(column):
raise TypeError(
'"column" should be a scalar value or tuple that contains scalar values'
)
# TODO(SPARK-37723): Support tuple for non-MultiIndex column name.
if is_name_like_tuple(column):
if self._internal.column_labels_level > 1:
if len(column) != len(self.columns.levels): # type: ignore[attr-defined]
# To be consistent with pandas
raise ValueError('"column" must have length equal to number of column levels.')
else:
raise NotImplementedError(
"Assigning column name as tuple is only supported for MultiIndex columns "
"for now."
)
if column in self.columns:
raise ValueError("cannot insert %s, already exists" % str(column))
psdf = self.copy()
psdf[column] = value
columns = psdf.columns[:-1].insert(loc, psdf.columns[-1])
psdf = psdf[columns]
self._update_internal_frame(psdf._internal)
# TODO: add frep and axis parameter
def shift(self, periods: int = 1, fill_value: Optional[Any] = None) -> "DataFrame":
"""
Shift DataFrame by desired number of periods.
.. note:: the current implementation of shift uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int
Number of periods to shift. Can be positive or negative.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
The default depends on the dtype of self. For numeric data, np.nan is used.
Returns
-------
Copy of input DataFrame, shifted.
Examples
--------
>>> df = ps.DataFrame({'Col1': [10, 20, 15, 30, 45],
... 'Col2': [13, 23, 18, 33, 48],
... 'Col3': [17, 27, 22, 37, 52]},
... columns=['Col1', 'Col2', 'Col3'])
>>> df.shift(periods=3)
Col1 Col2 Col3
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 10.0 13.0 17.0
4 20.0 23.0 27.0
>>> df.shift(periods=3, fill_value=0)
Col1 Col2 Col3
0 0 0 0
1 0 0 0
2 0 0 0
3 10 13 17
4 20 23 27
"""
return self._apply_series_op(
lambda psser: psser._shift(periods, fill_value), should_resolve=True
)
# TODO: axis should support 1 or 'columns' either at this moment
def diff(self, periods: int = 1, axis: Axis = 0) -> "DataFrame":
"""
First discrete difference of element.
Calculates the difference of a DataFrame element compared with another element in the
DataFrame (default is the element in the same column of the previous row).
.. note:: the current implementation of diff uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative values.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
Returns
-------
diffed : DataFrame
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]}, columns=['a', 'b', 'c'])
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.diff()
a b c
0 NaN NaN NaN
1 1.0 0.0 3.0
2 1.0 1.0 5.0
3 1.0 1.0 7.0
4 1.0 2.0 9.0
5 1.0 3.0 11.0
Difference with previous column
>>> df.diff(periods=3)
a b c
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 3.0 2.0 15.0
4 3.0 4.0 21.0
5 3.0 6.0 27.0
Difference with following row
>>> df.diff(periods=-1)
a b c
0 -1.0 0.0 -3.0
1 -1.0 -1.0 -5.0
2 -1.0 -1.0 -7.0
3 -1.0 -2.0 -9.0
4 -1.0 -3.0 -11.0
5 NaN NaN NaN
"""
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
return self._apply_series_op(lambda psser: psser._diff(periods), should_resolve=True)
# TODO: axis should support 1 or 'columns' either at this moment
def nunique(
self,
axis: Axis = 0,
dropna: bool = True,
approx: bool = False,
rsd: float = 0.05,
) -> "Series":
"""
Return number of unique elements in the object.
Excludes NA values by default.
Parameters
----------
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
dropna : bool, default True
Don’t include NaN in the count.
approx: bool, default False
If False, will use the exact algorithm and return the exact number of unique.
If True, it uses the HyperLogLog approximate algorithm, which is significantly faster
for large amount of data.
Note: This parameter is specific to pandas-on-Spark and is not found in pandas.
rsd: float, default 0.05
Maximum estimation error allowed in the HyperLogLog algorithm.
Note: Just like ``approx`` this parameter is specific to pandas-on-Spark.
Returns
-------
The number of unique values per column as a pandas-on-Spark Series.
Examples
--------
>>> df = ps.DataFrame({'A': [1, 2, 3], 'B': [np.nan, 3, np.nan]})
>>> df.nunique()
A 3
B 1
dtype: int64
>>> df.nunique(dropna=False)
A 3
B 2
dtype: int64
On big data, we recommend using the approximate algorithm to speed up this function.
The result will be very close to the exact unique count.
>>> df.nunique(approx=True)
A 3
B 1
dtype: int64
"""
from pyspark.pandas.series import first_series
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
sdf = self._internal.spark_frame.select(
[SF.lit(None).cast(StringType()).alias(SPARK_DEFAULT_INDEX_NAME)]
+ [
self._psser_for(label)._nunique(dropna, approx, rsd)
for label in self._internal.column_labels
]
)
# The data is expected to be small so it's fine to transpose/use default index.
with ps.option_context("compute.max_rows", 1):
internal = self._internal.copy(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)],
index_names=[None],
index_fields=[None],
data_spark_columns=[
scol_for(sdf, col) for col in self._internal.data_spark_column_names
],
data_fields=None,
)
return first_series(DataFrame(internal).transpose())
def round(self, decimals: Union[int, Dict[Name, int], "Series"] = 0) -> "DataFrame":
"""
Round a DataFrame to a variable number of decimal places.
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
.. note:: If `decimals` is a Series, it is expected to be small,
as all the data is loaded into the driver's memory.
Returns
-------
DataFrame
See Also
--------
Series.round
Examples
--------
>>> df = ps.DataFrame({'A':[0.028208, 0.038683, 0.877076],
... 'B':[0.992815, 0.645646, 0.149370],
... 'C':[0.173891, 0.577595, 0.491027]},
... columns=['A', 'B', 'C'],
... index=['first', 'second', 'third'])
>>> df
A B C
first 0.028208 0.992815 0.173891
second 0.038683 0.645646 0.577595
third 0.877076 0.149370 0.491027
>>> df.round(2)
A B C
first 0.03 0.99 0.17
second 0.04 0.65 0.58
third 0.88 0.15 0.49
>>> df.round({'A': 1, 'C': 2})
A B C
first 0.0 0.992815 0.17
second 0.0 0.645646 0.58
third 0.9 0.149370 0.49
>>> decimals = ps.Series([1, 0, 2], index=['A', 'B', 'C'])
>>> df.round(decimals)
A B C
first 0.0 1.0 0.17
second 0.0 1.0 0.58
third 0.9 0.0 0.49
"""
if isinstance(decimals, ps.Series):
decimals_dict = {
k if isinstance(k, tuple) else (k,): v
for k, v in decimals._to_internal_pandas().items()
}
elif isinstance(decimals, dict):
decimals_dict = {k if is_name_like_tuple(k) else (k,): v for k, v in decimals.items()}
elif isinstance(decimals, int):
decimals_dict = {k: decimals for k in self._internal.column_labels}
else:
raise TypeError("decimals must be an integer, a dict-like or a Series")
def op(psser: ps.Series) -> Union[ps.Series, Column]:
label = psser._column_label
if label in decimals_dict:
return F.round(psser.spark.column, decimals_dict[label])
else:
return psser
return self._apply_series_op(op)
def _mark_duplicates(
self,
subset: Optional[Union[Name, List[Name]]] = None,
keep: Union[bool, str] = "first",
) -> Tuple[SparkDataFrame, str]:
if subset is None:
subset_list = self._internal.column_labels
else:
if is_name_like_tuple(subset):
subset_list = [cast(Label, subset)]
elif is_name_like_value(subset):
subset_list = [(subset,)]
else:
subset_list = [sub if is_name_like_tuple(sub) else (sub,) for sub in subset]
diff = set(subset_list).difference(set(self._internal.column_labels))
if len(diff) > 0:
raise KeyError(", ".join([name_like_string(d) for d in diff]))
group_cols = [self._internal.spark_column_name_for(label) for label in subset_list]
sdf = self._internal.resolved_copy.spark_frame
column = verify_temp_column_name(sdf, "__duplicated__")
if keep == "first" or keep == "last":
if keep == "first":
ord_func = F.asc
else:
ord_func = F.desc
window = (
Window.partitionBy(*group_cols)
.orderBy(ord_func(NATURAL_ORDER_COLUMN_NAME))
.rowsBetween(Window.unboundedPreceding, Window.currentRow)
)
sdf = sdf.withColumn(column, F.row_number().over(window) > 1)
elif not keep:
window = Window.partitionBy(*group_cols).rowsBetween(
Window.unboundedPreceding, Window.unboundedFollowing
)
sdf = sdf.withColumn(column, F.count("*").over(window) > 1)
else:
raise ValueError("'keep' only supports 'first', 'last' and False")
return sdf, column
def duplicated(
self,
subset: Optional[Union[Name, List[Name]]] = None,
keep: Union[bool, str] = "first",
) -> "Series":
"""
Return boolean Series denoting duplicate rows, optionally only considering certain columns.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates,
by default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
duplicated : Series
Examples
--------
>>> df = ps.DataFrame({'a': [1, 1, 1, 3], 'b': [1, 1, 1, 4], 'c': [1, 1, 1, 5]},
... columns = ['a', 'b', 'c'])
>>> df
a b c
0 1 1 1
1 1 1 1
2 1 1 1
3 3 4 5
>>> df.duplicated().sort_index()
0 False
1 True
2 True
3 False
dtype: bool
Mark duplicates as ``True`` except for the last occurrence.
>>> df.duplicated(keep='last').sort_index()
0 True
1 True
2 False
3 False
dtype: bool
Mark all duplicates as ``True``.
>>> df.duplicated(keep=False).sort_index()
0 True
1 True
2 True
3 False
dtype: bool
"""
from pyspark.pandas.series import first_series
sdf, column = self._mark_duplicates(subset, keep)
sdf = sdf.select(
self._internal.index_spark_columns
+ [scol_for(sdf, column).alias(SPARK_DEFAULT_SERIES_NAME)]
)
return first_series(
DataFrame(
InternalFrame(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in self._internal.index_spark_column_names
],
index_names=self._internal.index_names,
index_fields=self._internal.index_fields,
column_labels=[None],
data_spark_columns=[scol_for(sdf, SPARK_DEFAULT_SERIES_NAME)],
)
)
)
# TODO: support other as DataFrame or array-like
def dot(self, other: "Series") -> "Series":
"""
Compute the matrix multiplication between the DataFrame and other.
This method computes the matrix product between the DataFrame and the
values of an other Series
It can also be called using ``self @ other`` in Python >= 3.5.
.. note:: This method is based on an expensive operation due to the nature
of big data. Internally it needs to generate each row for each value, and
then group twice - it is a huge operation. To prevent misusage, this method
has the 'compute.max_rows' default limit of input length, and raises a ValueError.
>>> from pyspark.pandas.config import option_context
>>> with option_context(
... 'compute.max_rows', 1000, "compute.ops_on_diff_frames", True
... ): # doctest: +NORMALIZE_WHITESPACE
... psdf = ps.DataFrame({'a': range(1001)})
... psser = ps.Series([2], index=['a'])
... psdf.dot(psser)
Traceback (most recent call last):
...
ValueError: Current DataFrame has more then the given limit 1000 rows.
Please set 'compute.max_rows' by using 'pyspark.pandas.config.set_option'
to retrieve to retrieve more than 1000 rows. Note that, before changing the
'compute.max_rows', this operation is considerably expensive.
Parameters
----------
other : Series
The other object to compute the matrix product with.
Returns
-------
Series
Return the matrix product between self and other as a Series.
See Also
--------
Series.dot: Similar method for Series.
Notes
-----
The dimensions of DataFrame and other must be compatible in order to
compute the matrix multiplication. In addition, the column names of
DataFrame and the index of other must contain the same values, as they
will be aligned prior to the multiplication.
The dot method for Series computes the inner product, instead of the
matrix product here.
Examples
--------
>>> from pyspark.pandas.config import set_option, reset_option
>>> set_option("compute.ops_on_diff_frames", True)
>>> psdf = ps.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])
>>> psser = ps.Series([1, 1, 2, 1])
>>> psdf.dot(psser)
0 -4
1 5
dtype: int64
Note how shuffling of the objects does not change the result.
>>> psser2 = psser.reindex([1, 0, 2, 3])
>>> psdf.dot(psser2)
0 -4
1 5
dtype: int64
>>> psdf @ psser2
0 -4
1 5
dtype: int64
>>> reset_option("compute.ops_on_diff_frames")
"""
if not isinstance(other, ps.Series):
raise TypeError("Unsupported type {}".format(type(other).__name__))
else:
return cast(ps.Series, other.dot(self.transpose())).rename(None)
def __matmul__(self, other: "Series") -> "Series":
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.dot(other)
def to_table(
self,
name: str,
format: Optional[str] = None,
mode: str = "w",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any,
) -> None:
if index_col is None:
log_advice(
"If `index_col` is not specified for `to_table`, "
"the existing index is lost when converting to table."
)
mode = validate_mode(mode)
return self.spark.to_table(name, format, mode, partition_cols, index_col, **options)
to_table.__doc__ = SparkFrameMethods.to_table.__doc__
def to_delta(
self,
path: str,
mode: str = "w",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: "OptionalPrimitiveType",
) -> None:
"""
Write the DataFrame out as a Delta Lake table.
Parameters
----------
path : str, required
Path to write to.
mode : str
Python write mode, default 'w'.
.. note:: mode can accept the strings for Spark writing mode.
Such as 'append', 'overwrite', 'ignore', 'error', 'errorifexists'.
- 'append' (equivalent to 'a'): Append the new data to existing data.
- 'overwrite' (equivalent to 'w'): Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options : dict
All other options passed directly into Delta Lake.
See Also
--------
read_delta
DataFrame.to_parquet
DataFrame.to_table
DataFrame.to_spark_io
Examples
--------
>>> df = ps.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
Create a new Delta Lake table, partitioned by one column:
>>> df.to_delta('%s/to_delta/foo' % path, partition_cols='date') # doctest: +SKIP
Partitioned by two columns:
>>> df.to_delta('%s/to_delta/bar' % path,
... partition_cols=['date', 'country']) # doctest: +SKIP
Overwrite an existing table's partitions, using the 'replaceWhere' capability in Delta:
>>> df.to_delta('%s/to_delta/bar' % path,
... mode='overwrite', replaceWhere='date >= "2012-01-01"') # doctest: +SKIP
"""
if index_col is None:
log_advice(
"If `index_col` is not specified for `to_delta`, "
"the existing index is lost when converting to Delta."
)
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore[assignment]
mode = validate_mode(mode)
self.spark.to_spark_io(
path=path,
mode=mode,
format="delta",
partition_cols=partition_cols,
index_col=index_col,
**options,
)
def to_parquet(
self,
path: str,
mode: str = "w",
partition_cols: Optional[Union[str, List[str]]] = None,
compression: Optional[str] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any,
) -> None:
"""
Write the DataFrame out as a Parquet file or directory.
Parameters
----------
path : str, required
Path to write to.
mode : str
Python write mode, default 'w'.
.. note:: mode can accept the strings for Spark writing mode.
Such as 'append', 'overwrite', 'ignore', 'error', 'errorifexists'.
- 'append' (equivalent to 'a'): Append the new data to existing data.
- 'overwrite' (equivalent to 'w'): Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
compression : str {'none', 'uncompressed', 'snappy', 'gzip', 'lzo', 'brotli', 'lz4', 'zstd'}
Compression codec to use when saving to file. If None is set, it uses the
value specified in `spark.sql.parquet.compression.codec`.
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options : dict
All other options passed directly into Spark's data source.
See Also
--------
read_parquet
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_spark_io
Examples
--------
>>> df = ps.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
>>> df.to_parquet('%s/to_parquet/foo.parquet' % path, partition_cols='date')
>>> df.to_parquet(
... '%s/to_parquet/foo.parquet' % path,
... mode = 'overwrite',
... partition_cols=['date', 'country'])
"""
if index_col is None:
log_advice(
"If `index_col` is not specified for `to_parquet`, "
"the existing index is lost when converting to Parquet."
)
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options")
mode = validate_mode(mode)
builder = self.to_spark(index_col=index_col).write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
if compression is not None:
builder.option("compression", compression)
builder.options(**options).format("parquet").save(path)
def to_orc(
self,
path: str,
mode: str = "w",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: "OptionalPrimitiveType",
) -> None:
"""
Write the DataFrame out as a ORC file or directory.
Parameters
----------
path : str, required
Path to write to.
mode : str
Python write mode, default 'w'.
.. note:: mode can accept the strings for Spark writing mode.
Such as 'append', 'overwrite', 'ignore', 'error', 'errorifexists'.
- 'append' (equivalent to 'a'): Append the new data to existing data.
- 'overwrite' (equivalent to 'w'): Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent pandas-on-Spark's index. The index name
in pandas-on-Spark is ignored. By default, the index is always lost.
options : dict
All other options passed directly into Spark's data source.
See Also
--------
read_orc
DataFrame.to_delta
DataFrame.to_parquet
DataFrame.to_table
DataFrame.to_spark_io
Examples
--------
>>> df = ps.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
>>> df.to_orc('%s/to_orc/foo.orc' % path, partition_cols='date')
>>> df.to_orc(
... '%s/to_orc/foo.orc' % path,
... mode = 'overwrite',
... partition_cols=['date', 'country'])
"""
if index_col is None:
log_advice(
"If `index_col` is not specified for `to_orc`, "
"the existing index is lost when converting to ORC."
)
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore[assignment]
mode = validate_mode(mode)
self.spark.to_spark_io(
path=path,
mode=mode,
format="orc",
partition_cols=partition_cols,
index_col=index_col,
**options,
)
def to_spark_io(
self,
path: Optional[str] = None,
format: Optional[str] = None,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: "OptionalPrimitiveType",
) -> None:
"""An alias for :func:`DataFrame.spark.to_spark_io`.
See :meth:`pyspark.pandas.spark.accessors.SparkFrameMethods.to_spark_io`.
.. deprecated:: 3.2.0
Use :func:`DataFrame.spark.to_spark_io` instead.
"""
warnings.warn("Deprecated in 3.2, Use DataFrame.spark.to_spark_io instead.", FutureWarning)
return self.spark.to_spark_io(path, format, mode, partition_cols, index_col, **options)
to_spark_io.__doc__ = SparkFrameMethods.to_spark_io.__doc__
def to_spark(self, index_col: Optional[Union[str, List[str]]] = None) -> SparkDataFrame:
if index_col is None:
log_advice(
"If `index_col` is not specified for `to_spark`, "
"the existing index is lost when converting to Spark DataFrame."
)
return self._to_spark(index_col)
to_spark.__doc__ = SparkFrameMethods.__doc__
def _to_spark(self, index_col: Optional[Union[str, List[str]]] = None) -> SparkDataFrame:
"""
Same as `to_spark()`, without issueing the advice log when `index_col` is not specified
for internal usage.
"""
return self.spark.frame(index_col)
def to_pandas(self) -> pd.DataFrame:
"""
Return a pandas DataFrame.
.. note:: This method should only be used if the resulting pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.to_pandas()
dogs cats
0 0.2 0.3
1 0.0 0.6
2 0.6 0.0
3 0.2 0.1
"""
log_advice(
"`to_pandas` loads all data into the driver's memory. "
"It should only be used if the resulting pandas DataFrame is expected to be small."
)
return self._to_pandas()
def _to_pandas(self) -> pd.DataFrame:
"""
Same as `to_pandas()`, without issueing the advice log for internal usage.
"""
return self._internal.to_pandas_frame.copy()
def assign(self, **kwargs: Any) -> "DataFrame":
"""
Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
**kwargs : dict of {str: callable, Series or Index}
The column names are keywords. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though pandas-on-Spark doesn't check it).
If the values are not callable, (e.g. a Series or a literal),
they are simply assigned.
Returns
-------
DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Examples
--------
>>> df = ps.DataFrame({'temp_c': [17.0, 25.0]},
... index=['Portland', 'Berkeley'])
>>> df
temp_c
Portland 17.0
Berkeley 25.0
Where the value is a callable, evaluated on `df`:
>>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
Alternatively, the same behavior can be achieved by directly
referencing an existing Series or sequence and you can also
create multiple columns within the same assign.
>>> assigned = df.assign(temp_f=df['temp_c'] * 9 / 5 + 32,
... temp_k=df['temp_c'] + 273.15,
... temp_idx=df.index)
>>> assigned[['temp_c', 'temp_f', 'temp_k', 'temp_idx']]
temp_c temp_f temp_k temp_idx
Portland 17.0 62.6 290.15 Portland
Berkeley 25.0 77.0 298.15 Berkeley
Notes
-----
Assigning multiple columns within the same ``assign`` is possible
but you cannot refer to newly created or modified columns. This
feature is supported in pandas for Python 3.6 and later but not in
pandas-on-Spark. In pandas-on-Spark, all items are computed first,
and then assigned.
"""
return self._assign(kwargs)
def _assign(self, kwargs: Any) -> "DataFrame":
assert isinstance(kwargs, dict)
from pyspark.pandas.indexes import MultiIndex
from pyspark.pandas.series import IndexOpsMixin
for k, v in kwargs.items():
is_invalid_assignee = (
not (isinstance(v, (IndexOpsMixin, Column)) or callable(v) or is_scalar(v))
) or isinstance(v, MultiIndex)
if is_invalid_assignee:
raise TypeError(
"Column assignment doesn't support type " "{0}".format(type(v).__name__)
)
if callable(v):
kwargs[k] = v(self)
pairs = {
(k if is_name_like_tuple(k) else (k,)): (
(v.spark.column, v._internal.data_fields[0])
if isinstance(v, IndexOpsMixin) and not isinstance(v, MultiIndex)
else (v, None)
if isinstance(v, Column)
else (SF.lit(v), None)
)
for k, v in kwargs.items()
}
scols = []
data_fields = []
for label in self._internal.column_labels:
for i in range(len(label)):
if label[: len(label) - i] in pairs:
scol, field = pairs[label[: len(label) - i]]
name = self._internal.spark_column_name_for(label)
scol = scol.alias(name)
if field is not None:
field = field.copy(name=name)
break
else:
scol = self._internal.spark_column_for(label)
field = self._internal.field_for(label)
scols.append(scol)
data_fields.append(field)
column_labels = self._internal.column_labels.copy()
for label, (scol, field) in pairs.items():
if label not in set(i[: len(label)] for i in self._internal.column_labels):
name = name_like_string(label)
scols.append(scol.alias(name))
if field is not None:
field = field.copy(name=name)
data_fields.append(field)
column_labels.append(label)
level = self._internal.column_labels_level
column_labels = [
tuple(list(label) + ([""] * (level - len(label)))) for label in column_labels
]
internal = self._internal.with_new_columns(
scols, column_labels=column_labels, data_fields=data_fields
)
return DataFrame(internal)
@staticmethod
def from_records(
data: Union[np.ndarray, List[tuple], dict, pd.DataFrame],
index: Union[str, list, np.ndarray] = None,
exclude: list = None,
columns: list = None,
coerce_float: bool = False,
nrows: int = None,
) -> "DataFrame":
"""
Convert structured or record ndarray to DataFrame.
Parameters
----------
data : ndarray (structured dtype), list of tuples, dict, or DataFrame
index : string, list of fields, array-like
Field of array to use as the index, alternately a specific set of input labels to use
exclude : sequence, default None
Columns or fields to exclude
columns : sequence, default None
Column names to use. If the passed data do not have names associated with them, this
argument provides names for the columns. Otherwise this argument indicates the order of
the columns in the result (any names not found in the data will become all-NA columns)
coerce_float : boolean, default False
Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to
floating point, useful for SQL result sets
nrows : int, default None
Number of rows to read if data is an iterator
Returns
-------
df : DataFrame
Examples
--------
Use dict as input
>>> ps.DataFrame.from_records({'A': [1, 2, 3]})
A
0 1
1 2
2 3
Use list of tuples as input
>>> ps.DataFrame.from_records([(1, 2), (3, 4)])
0 1
0 1 2
1 3 4
Use NumPy array as input
>>> ps.DataFrame.from_records(np.eye(3))
0 1 2
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
"""
return DataFrame(
pd.DataFrame.from_records(data, index, exclude, columns, coerce_float, nrows)
)
def to_records(
self,
index: bool = True,
column_dtypes: Optional[Union[str, Dtype, Dict[Name, Union[str, Dtype]]]] = None,
index_dtypes: Optional[Union[str, Dtype, Dict[Name, Union[str, Dtype]]]] = None,
) -> np.recarray:
"""
Convert DataFrame to a NumPy record array.
Index will be included as the first field of the record array if
requested.
.. note:: This method should only be used if the resulting NumPy ndarray is
expected to be small, as all the data is loaded into the driver's memory.
Parameters
----------
index : bool, default True
Include index in resulting record array, stored in 'index'
field or using the index label, if set.
column_dtypes : str, type, dict, default None
If a string or type, the data type to store all columns. If
a dictionary, a mapping of column names and indices (zero-indexed)
to specific data types.
index_dtypes : str, type, dict, default None
If a string or type, the data type to store all index levels. If
a dictionary, a mapping of index level names and indices
(zero-indexed) to specific data types.
This mapping is applied only if `index=True`.
Returns
-------
numpy.recarray
NumPy ndarray with the DataFrame labels as fields and each row
of the DataFrame as entries.
See Also
--------
DataFrame.from_records: Convert structured or record ndarray
to DataFrame.
numpy.recarray: An ndarray that allows field access using
attributes, analogous to typed columns in a
spreadsheet.
Examples
--------
>>> df = ps.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},
... index=['a', 'b'])
>>> df
A B
a 1 0.50
b 2 0.75
>>> df.to_records() # doctest: +SKIP
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])
The index can be excluded from the record array:
>>> df.to_records(index=False) # doctest: +SKIP
rec.array([(1, 0.5 ), (2, 0.75)],
dtype=[('A', '<i8'), ('B', '<f8')])
Specification of dtype for columns is new in pandas 0.24.0.
Data types can be specified for the columns:
>>> df.to_records(column_dtypes={"A": "int32"}) # doctest: +SKIP
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i4'), ('B', '<f8')])
Specification of dtype for index is new in pandas 0.24.0.
Data types can also be specified for the index:
>>> df.to_records(index_dtypes="<S2") # doctest: +SKIP
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('index', 'S2'), ('A', '<i8'), ('B', '<f8')])
"""
args = locals()
psdf = self
return validate_arguments_and_invoke_function(
psdf._to_internal_pandas(), self.to_records, pd.DataFrame.to_records, args
)
def copy(self, deep: bool = True) -> "DataFrame":
"""
Make a copy of this object's indices and data.
Parameters
----------
deep : bool, default True
this parameter is not supported but just dummy parameter to match pandas.
Returns
-------
copy : DataFrame
Examples
--------
>>> df = ps.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]},
... columns=['x', 'y', 'z', 'w'])
>>> df
x y z w
0 1 3 5 7
1 2 4 6 8
>>> df_copy = df.copy()
>>> df_copy
x y z w
0 1 3 5 7
1 2 4 6 8
"""
return DataFrame(self._internal)
def dropna(
self,
axis: Axis = 0,
how: str = "any",
thresh: Optional[int] = None,
subset: Optional[Union[Name, List[Name]]] = None,
inplace: bool = False,
) -> Optional["DataFrame"]:
"""
Remove missing values.
Parameters
----------
axis : {0 or 'index'}, default 0
Determine if rows or columns which contain missing values are
removed.
* 0, or 'index' : Drop rows which contain missing values.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
thresh : int, optional
Require that many non-NA values.
subset : array-like, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
DataFrame
DataFrame with NA entries dropped from it.
See Also
--------
DataFrame.drop : Drop specified labels from columns.
DataFrame.isnull: Indicate missing values.
DataFrame.notnull : Indicate existing (non-missing) values.
Examples
--------
>>> df = ps.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [None, 'Batmobile', 'Bullwhip'],
... "born": [None, "1940-04-25", None]},
... columns=['name', 'toy', 'born'])
>>> df
name toy born
0 Alfred None None
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Drop the rows where at least one element is missing.
>>> df.dropna()
name toy born
1 Batman Batmobile 1940-04-25
Drop the columns where at least one element is missing.
>>> df.dropna(axis='columns')
name
0 Alfred
1 Batman
2 Catwoman
Drop the rows where all elements are missing.
>>> df.dropna(how='all')
name toy born
0 Alfred None None
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Keep only the rows with at least 2 non-NA values.
>>> df.dropna(thresh=2)
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Define in which columns to look for missing values.
>>> df.dropna(subset=['name', 'born'])
name toy born
1 Batman Batmobile 1940-04-25
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25
"""
axis = validate_axis(axis)
inplace = validate_bool_kwarg(inplace, "inplace")
if thresh is None:
if how is None:
raise TypeError("must specify how or thresh")
elif how not in ("any", "all"):
raise ValueError("invalid how option: {h}".format(h=how))
labels: Optional[List[Label]]
if subset is not None:
if isinstance(subset, str):
labels = [(subset,)]
elif isinstance(subset, tuple):
labels = [subset]
else:
labels = [sub if isinstance(sub, tuple) else (sub,) for sub in subset]
else:
labels = None
if axis == 0:
if labels is not None:
invalids = [label for label in labels if label not in self._internal.column_labels]
if len(invalids) > 0:
raise KeyError(invalids)
else:
labels = self._internal.column_labels
cnt = reduce(
lambda x, y: x + y,
[
F.when(self._psser_for(label).notna().spark.column, 1).otherwise(0)
for label in labels
],
SF.lit(0),
)
if thresh is not None:
pred = cnt >= SF.lit(int(thresh))
elif how == "any":
pred = cnt == SF.lit(len(labels))
elif how == "all":
pred = cnt > SF.lit(0)
internal = self._internal.with_filter(pred)
if inplace:
self._update_internal_frame(internal)
return None
else:
return DataFrame(internal)
else:
assert axis == 1
internal = self._internal.resolved_copy
if labels is not None:
if any(len(lbl) != internal.index_level for lbl in labels):
raise ValueError(
"The length of each subset must be the same as the index size."
)
cond = reduce(
lambda x, y: x | y,
[
reduce(
lambda x, y: x & y,
[
scol == SF.lit(part)
for part, scol in zip(lbl, internal.index_spark_columns)
],
)
for lbl in labels
],
)
internal = internal.with_filter(cond)
psdf: DataFrame = DataFrame(internal)
null_counts = []
for label in internal.column_labels:
psser = psdf._psser_for(label)
cond = psser.isnull().spark.column
null_counts.append(
F.sum(F.when(~cond, 1).otherwise(0)).alias(name_like_string(label))
)
counts = internal.spark_frame.select(null_counts + [F.count("*")]).head()
if thresh is not None:
column_labels = [
label
for label, cnt in zip(internal.column_labels, counts)
if (cnt or 0) >= int(thresh)
]
elif how == "any":
column_labels = [
label
for label, cnt in zip(internal.column_labels, counts)
if (cnt or 0) == counts[-1]
]
elif how == "all":
column_labels = [
label for label, cnt in zip(internal.column_labels, counts) if (cnt or 0) > 0
]
psdf = self[column_labels]
if inplace:
self._update_internal_frame(psdf._internal)
return None
else:
return psdf
# TODO: add 'limit' when value parameter exists
def fillna(
self,
value: Optional[Union[Any, Dict[Name, Any]]] = None,
method: Optional[str] = None,
axis: Optional[Axis] = None,
inplace: bool = False,
limit: Optional[int] = None,
) -> Optional["DataFrame"]:
"""Fill NA/NaN values.
.. note:: the current implementation of 'method' parameter in fillna uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
value : scalar, dict, Series
Value to use to fill holes. alternately a dict/Series of values
specifying which value to use for each column.
DataFrame is not supported.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series pad / ffill: propagate last valid
observation forward to next valid backfill / bfill:
use NEXT valid observation to fill gap
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame
DataFrame with NA entries filled.
Examples
--------
>>> df = ps.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Replace all NaN elements with 0s.
>>> df.fillna(0)
A B C D
0 0.0 2.0 0.0 0
1 3.0 4.0 0.0 1
2 0.0 0.0 0.0 5
3 0.0 3.0 1.0 4
We can also propagate non-null values forward or backward.
>>> df.fillna(method='ffill')
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,
2, and 3 respectively.
>>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}
>>> df.fillna(value=values)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 2.0 1
2 0.0 1.0 2.0 5
3 0.0 3.0 1.0 4
"""
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError("fillna currently only works for axis=0 or axis='index'")
if value is not None:
if not isinstance(value, (float, int, str, bool, dict, pd.Series)):
raise TypeError("Unsupported type %s" % type(value).__name__)
if limit is not None:
raise ValueError("limit parameter for value is not support now")
if isinstance(value, pd.Series):
value = value.to_dict()
if isinstance(value, dict):
for v in value.values():
if not isinstance(v, (float, int, str, bool)):
raise TypeError("Unsupported type %s" % type(v).__name__)
value = {k if is_name_like_tuple(k) else (k,): v for k, v in value.items()}
def op(psser: ps.Series) -> ps.Series:
label = psser._column_label
for k, v in value.items():
if k == label[: len(k)]:
return psser._fillna(
value=value[k], method=method, axis=axis, limit=limit
)
else:
return psser
else:
def op(psser: ps.Series) -> ps.Series:
return psser._fillna(value=value, method=method, axis=axis, limit=limit)
elif method is not None:
def op(psser: ps.Series) -> ps.Series:
return psser._fillna(value=value, method=method, axis=axis, limit=limit)
else:
raise ValueError("Must specify a fillna 'value' or 'method' parameter.")
psdf = self._apply_series_op(op, should_resolve=(method is not None))
inplace = validate_bool_kwarg(inplace, "inplace")
if inplace:
self._update_internal_frame(psdf._internal, requires_same_anchor=False)
return None
else:
return psdf
def replace(
self,
to_replace: Optional[Union[Any, List, Tuple, Dict]] = None,
value: Optional[Any] = None,
inplace: bool = False,
limit: Optional[int] = None,
regex: bool = False,
method: str = "pad",
) -> Optional["DataFrame"]:
"""
Returns a new DataFrame replacing a value with another value.
Parameters
----------
to_replace : int, float, string, list, tuple or dict
Value to be replaced.
value : int, float, string, list or tuple
Value to use to replace holes. The replacement value must be an int, float,
or string.
If value is a list or tuple, value should be of the same length with to_replace.
inplace : boolean, default False
Fill in place (do not create a new object)
Returns
-------
DataFrame
Object after replacement.
Examples
--------
>>> df = ps.DataFrame({"name": ['Ironman', 'Captain America', 'Thor', 'Hulk'],
... "weapon": ['Mark-45', 'Shield', 'Mjolnir', 'Smash']},
... columns=['name', 'weapon'])
>>> df
name weapon
0 Ironman Mark-45
1 Captain America Shield
2 Thor Mjolnir
3 Hulk Smash
Scalar `to_replace` and `value`
>>> df.replace('Ironman', 'War-Machine')
name weapon
0 War-Machine Mark-45
1 Captain America Shield
2 Thor Mjolnir
3 Hulk Smash
List like `to_replace` and `value`
>>> df.replace(['Ironman', 'Captain America'], ['Rescue', 'Hawkeye'], inplace=True)
>>> df
name weapon
0 Rescue Mark-45
1 Hawkeye Shield
2 Thor Mjolnir
3 Hulk Smash
Dicts can be used to specify different replacement values for different existing values
To use a dict in this way the value parameter should be None
>>> df.replace({'Mjolnir': 'Stormbuster'})
name weapon
0 Rescue Mark-45
1 Hawkeye Shield
2 Thor Stormbuster
3 Hulk Smash
Dict can specify that different values should be replaced in different columns
The value parameter should not be None in this case
>>> df.replace({'weapon': 'Mjolnir'}, 'Stormbuster')
name weapon
0 Rescue Mark-45
1 Hawkeye Shield
2 Thor Stormbuster
3 Hulk Smash
Nested dictionaries
The value parameter should be None to use a nested dict in this way
>>> df.replace({'weapon': {'Mjolnir': 'Stormbuster'}})
name weapon
0 Rescue Mark-45
1 Hawkeye Shield
2 Thor Stormbuster
3 Hulk Smash
"""
if method != "pad":
raise NotImplementedError("replace currently works only for method='pad")
if limit is not None:
raise NotImplementedError("replace currently works only when limit=None")
if regex is not False:
raise NotImplementedError("replace currently doesn't supports regex")
inplace = validate_bool_kwarg(inplace, "inplace")
if value is not None and not isinstance(value, (int, float, str, list, tuple, dict)):
raise TypeError("Unsupported type {}".format(type(value).__name__))
if to_replace is not None and not isinstance(
to_replace, (int, float, str, list, tuple, dict)
):
raise TypeError("Unsupported type {}".format(type(to_replace).__name__))
if isinstance(value, (list, tuple)) and isinstance(to_replace, (list, tuple)):
if len(value) != len(to_replace):
raise ValueError("Length of to_replace and value must be same")
if isinstance(to_replace, dict) and (
value is not None or all(isinstance(i, dict) for i in to_replace.values())
):
to_replace_dict = to_replace
def op(psser: ps.Series) -> ps.Series:
if psser.name in to_replace_dict:
return psser.replace(
to_replace=to_replace_dict[psser.name], value=value, regex=regex
)
else:
return psser
else:
def op(psser: ps.Series) -> ps.Series:
return psser.replace(to_replace=to_replace, value=value, regex=regex)
psdf = self._apply_series_op(op)
if inplace:
self._update_internal_frame(psdf._internal)
return None
else:
return psdf
def clip(self, lower: Union[float, int] = None, upper: Union[float, int] = None) -> "DataFrame":
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values.
Parameters
----------
lower : float or int, default None
Minimum threshold value. All values below this threshold will be set to it.
upper : float or int, default None
Maximum threshold value. All values above this threshold will be set to it.
Returns
-------
DataFrame
DataFrame with the values outside the clip boundaries replaced.
Examples
--------
>>> ps.DataFrame({'A': [0, 2, 4]}).clip(1, 3)
A
0 1
1 2
2 3
Notes
-----
One difference between this implementation and pandas is that running
pd.DataFrame({'A': ['a', 'b']}).clip(0, 1) will crash with "TypeError: '<=' not supported
between instances of 'str' and 'int'" while ps.DataFrame({'A': ['a', 'b']}).clip(0, 1)
will output the original DataFrame, simply ignoring the incompatible types.
"""
if is_list_like(lower) or is_list_like(upper):
raise TypeError(
"List-like value are not supported for 'lower' and 'upper' at the " + "moment"
)
if lower is None and upper is None:
return self
return self._apply_series_op(lambda psser: psser.clip(lower=lower, upper=upper))
def head(self, n: int = 5) -> "DataFrame":
"""
Return the first `n` rows.
This function returns the first `n` rows for the object based
on position. It is useful for quickly testing if your object
has the right type of data in it.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
obj_head : same type as caller
The first `n` rows of the caller object.
Examples
--------
>>> df = ps.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the first 5 lines
>>> df.head()
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
Viewing the first `n` lines (three in this case)
>>> df.head(3)
animal
0 alligator
1 bee
2 falcon
"""
if n < 0:
n = len(self) + n
if n <= 0:
return DataFrame(self._internal.with_filter(SF.lit(False)))
else:
sdf = self._internal.resolved_copy.spark_frame
if get_option("compute.ordered_head"):
sdf = sdf.orderBy(NATURAL_ORDER_COLUMN_NAME)
return DataFrame(self._internal.with_new_sdf(sdf.limit(n)))
def last(self, offset: Union[str, DateOffset]) -> "DataFrame":
"""
Select final periods of time series data based on a date offset.
When having a DataFrame with dates as index, this function can
select the last few rows based on a date offset.
Parameters
----------
offset : str or DateOffset
The offset length of the data that will be selected. For instance,
'3D' will display all the rows having their index within the last 3 days.
Returns
-------
DataFrame
A subset of the caller.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
Examples
--------
>>> index = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> psdf = ps.DataFrame({'A': [1, 2, 3, 4]}, index=index)
>>> psdf
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
Get the rows for the last 3 days:
>>> psdf.last('3D')
A
2018-04-13 3
2018-04-15 4
Notice the data for 3 last calendar days were returned, not the last
3 observed days in the dataset, and therefore data for 2018-04-11 was
not returned.
"""
# Check index type should be format DateTime
if not isinstance(self.index, ps.DatetimeIndex):
raise TypeError("'last' only supports a DatetimeIndex")
offset_: Optional[DateOffset] = to_offset(offset)
assert offset_ is not None
from_date = cast(datetime.datetime, self.index.max()) - offset_ # type: ignore[operator]
return cast(DataFrame, self.loc[from_date:])
def first(self, offset: Union[str, DateOffset]) -> "DataFrame":
"""
Select first periods of time series data based on a date offset.
When having a DataFrame with dates as index, this function can
select the first few rows based on a date offset.
Parameters
----------
offset : str or DateOffset
The offset length of the data that will be selected. For instance,
'3D' will display all the rows having their index within the first 3 days.
Returns
-------
DataFrame
A subset of the caller.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
Examples
--------
>>> index = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> psdf = ps.DataFrame({'A': [1, 2, 3, 4]}, index=index)
>>> psdf
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
Get the rows for the last 3 days:
>>> psdf.first('3D')
A
2018-04-09 1
2018-04-11 2
Notice the data for 3 first calendar days were returned, not the first
3 observed days in the dataset, and therefore data for 2018-04-13 was
not returned.
"""
# Check index type should be format DatetimeIndex
if not isinstance(self.index, ps.DatetimeIndex):
raise TypeError("'first' only supports a DatetimeIndex")
offset_: Optional[DateOffset] = to_offset(offset)
assert offset_ is not None
to_date = cast(datetime.datetime, self.index.min()) + offset_ # type: ignore[operator]
return cast(DataFrame, self.loc[:to_date]) # type: ignore[misc]
def pivot_table(
self,
values: Optional[Union[Name, List[Name]]] = None,
index: Optional[List[Name]] = None,
columns: Optional[Name] = None,
aggfunc: Union[str, Dict[Name, str]] = "mean",
fill_value: Optional[Any] = None,
) -> "DataFrame":
"""
Create a spreadsheet-style pivot table as a DataFrame. The levels in
the pivot table will be stored in MultiIndex objects (hierarchical
indexes) on the index and columns of the result DataFrame.
Parameters
----------
values : column to aggregate.
They should be either a list less than three or a string.
index : column (string) or list of columns
If an array is passed, it must be the same length as the data.
The list should contain string.
columns : column
Columns used in the pivot operation. Only one column is supported and
it should be a string.
aggfunc : function (string), dict, default mean
If dict is passed, the key is column to aggregate and value
is function or list of functions.
fill_value : scalar, default None
Value to replace missing values with.
Returns
-------
table : DataFrame
Examples
--------
>>> df = ps.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo",
... "bar", "bar", "bar", "bar"],
... "B": ["one", "one", "one", "two", "two",
... "one", "one", "two", "two"],
... "C": ["small", "large", "large", "small",
... "small", "large", "small", "small",
... "large"],
... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]},
... columns=['A', 'B', 'C', 'D', 'E'])
>>> df
A B C D E
0 foo one small 1 2
1 foo one large 2 4
2 foo one large 2 5
3 foo two small 3 5
4 foo two small 3 6
5 bar one large 4 6
6 bar one small 5 8
7 bar two small 6 9
8 bar two large 7 9
This first example aggregates values by taking the sum.
>>> table = df.pivot_table(values='D', index=['A', 'B'],
... columns='C', aggfunc='sum')
>>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE
C large small
A B
bar one 4.0 5
two 7.0 6
foo one 4.0 1
two NaN 6
We can also fill missing values using the `fill_value` parameter.
>>> table = df.pivot_table(values='D', index=['A', 'B'],
... columns='C', aggfunc='sum', fill_value=0)
>>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE
C large small
A B
bar one 4 5
two 7 6
foo one 4 1
two 0 6
We can also calculate multiple types of aggregations for any given
value column.
>>> table = df.pivot_table(values=['D'], index =['C'],
... columns="A", aggfunc={'D': 'mean'})
>>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE
D
A bar foo
C
large 5.5 2.000000
small 5.5 2.333333
The next example aggregates on multiple values.
>>> table = df.pivot_table(index=['C'], columns="A", values=['D', 'E'],
... aggfunc={'D': 'mean', 'E': 'sum'})
>>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE
D E
A bar foo bar foo
C
large 5.5 2.000000 15 9
small 5.5 2.333333 17 13
"""
if not is_name_like_value(columns):
raise TypeError("columns should be one column name.")
if not is_name_like_value(values) and not (
isinstance(values, list) and all(is_name_like_value(v) for v in values)
):
raise TypeError("values should be one column or list of columns.")
if not isinstance(aggfunc, str) and (
not isinstance(aggfunc, dict)
or not all(
is_name_like_value(key) and isinstance(value, str) for key, value in aggfunc.items()
)
):
raise TypeError(
"aggfunc must be a dict mapping from column name "
"to aggregate functions (string)."
)
if isinstance(aggfunc, dict) and index is None:
raise NotImplementedError(
"pivot_table doesn't support aggfunc" " as dict and without index."
)
if isinstance(values, list) and index is None:
raise NotImplementedError("values can't be a list without index.")
if columns not in self.columns:
raise ValueError("Wrong columns {}.".format(name_like_string(columns)))
if not is_name_like_tuple(columns):
columns = (columns,)
if isinstance(values, list):
values = [col if is_name_like_tuple(col) else (col,) for col in values]
if not all(
isinstance(self._internal.spark_type_for(col), NumericType) for col in values
):
raise TypeError("values should be a numeric type.")
else:
values = values if is_name_like_tuple(values) else (values,)
if not isinstance(self._internal.spark_type_for(values), NumericType):
raise TypeError("values should be a numeric type.")
if isinstance(aggfunc, str):
if isinstance(values, list):
agg_cols = [
F.expr(
"{1}(`{0}`) as `{0}`".format(
self._internal.spark_column_name_for(value), aggfunc
)
)
for value in values
]
else:
agg_cols = [
F.expr(
"{1}(`{0}`) as `{0}`".format(
self._internal.spark_column_name_for(values), aggfunc
)
)
]
elif isinstance(aggfunc, dict):
aggfunc = {
key if is_name_like_tuple(key) else (key,): value for key, value in aggfunc.items()
}
agg_cols = [
F.expr(
"{1}(`{0}`) as `{0}`".format(self._internal.spark_column_name_for(key), value)
)
for key, value in aggfunc.items()
]
agg_columns = [key for key, _ in aggfunc.items()]
if set(agg_columns) != set(values):
raise ValueError("Columns in aggfunc must be the same as values.")
sdf = self._internal.resolved_copy.spark_frame
if index is None:
sdf = (
sdf.groupBy()
.pivot(pivot_col=self._internal.spark_column_name_for(columns))
.agg(*agg_cols)
)
elif isinstance(index, list):
index = [label if is_name_like_tuple(label) else (label,) for label in index]
sdf = (
sdf.groupBy([self._internal.spark_column_name_for(label) for label in index])
.pivot(pivot_col=self._internal.spark_column_name_for(columns))
.agg(*agg_cols)
)
else:
raise TypeError("index should be a None or a list of columns.")
if fill_value is not None and isinstance(fill_value, (int, float)):
sdf = sdf.fillna(fill_value)
psdf: DataFrame
if index is not None:
index_columns = [self._internal.spark_column_name_for(label) for label in index]
index_fields = [self._internal.field_for(label) for label in index]
if isinstance(values, list):
data_columns = [column for column in sdf.columns if column not in index_columns]
if len(values) > 1:
# If we have two values, Spark will return column's name
# in this format: column_values, where column contains
# their values in the DataFrame and values is
# the column list passed to the pivot_table().
# E.g. if column is b and values is ['b','e'],
# then ['2_b', '2_e', '3_b', '3_e'].
# We sort the columns of Spark DataFrame by values.
data_columns.sort(key=lambda x: x.split("_", 1)[1])
sdf = sdf.select(index_columns + data_columns)
column_name_to_index = dict(
zip(self._internal.data_spark_column_names, self._internal.column_labels)
)
column_labels = [
tuple(list(column_name_to_index[name.split("_")[1]]) + [name.split("_")[0]])
for name in data_columns
]
column_label_names = (
[cast(Optional[Name], None)] * column_labels_level(values)
) + [columns]
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in index_columns],
index_names=index,
index_fields=index_fields,
column_labels=column_labels,
data_spark_columns=[scol_for(sdf, col) for col in data_columns],
column_label_names=column_label_names,
)
psdf = DataFrame(internal)
else:
column_labels = [tuple(list(values[0]) + [column]) for column in data_columns]
column_label_names = ([cast(Optional[Name], None)] * len(values[0])) + [columns]
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in index_columns],
index_names=index,
index_fields=index_fields,
column_labels=column_labels,
data_spark_columns=[scol_for(sdf, col) for col in data_columns],
column_label_names=column_label_names,
)
psdf = DataFrame(internal)
else:
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in index_columns],
index_names=index,
index_fields=index_fields,
column_label_names=[columns],
)
psdf = DataFrame(internal)
else:
if isinstance(values, list):
index_values = values[-1]
else:
index_values = values
index_map: Dict[str, Optional[Label]] = {}
for i, index_value in enumerate(index_values):
colname = SPARK_INDEX_NAME_FORMAT(i)
sdf = sdf.withColumn(colname, SF.lit(index_value))
index_map[colname] = None
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in index_map.keys()],
index_names=list(index_map.values()),
column_label_names=[columns],
)
psdf = DataFrame(internal)
psdf_columns = psdf.columns
if isinstance(psdf_columns, pd.MultiIndex):
psdf.columns = psdf_columns.set_levels(
psdf_columns.levels[-1].astype( # type: ignore[index]
spark_type_to_pandas_dtype(self._psser_for(columns).spark.data_type)
),
level=-1,
)
else:
psdf.columns = psdf_columns.astype(
spark_type_to_pandas_dtype(self._psser_for(columns).spark.data_type)
)
return psdf
def pivot(
self,
index: Optional[Name] = None,
columns: Optional[Name] = None,
values: Optional[Name] = None,
) -> "DataFrame":
"""
Return reshaped DataFrame organized by given index / column values.
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from specified `index` / `columns` to form axes of the
resulting DataFrame. This function does not support data
aggregation.
Parameters
----------
index : string, optional
Column to use to make new frame's index. If None, uses
existing index.
columns : string
Column to use to make new frame's columns.
values : string, object or a list of the previous
Column(s) to use for populating new frame's values.
Returns
-------
DataFrame
Returns reshaped DataFrame.
See Also
--------
DataFrame.pivot_table : Generalization of pivot that can handle
duplicate values for one index/column pair.
Examples
--------
>>> df = ps.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two',
... 'two'],
... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
... 'baz': [1, 2, 3, 4, 5, 6],
... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']},
... columns=['foo', 'bar', 'baz', 'zoo'])
>>> df
foo bar baz zoo
0 one A 1 x
1 one B 2 y
2 one C 3 z
3 two A 4 q
4 two B 5 w
5 two C 6 t
>>> df.pivot(index='foo', columns='bar', values='baz').sort_index()
... # doctest: +NORMALIZE_WHITESPACE
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(columns='bar', values='baz').sort_index() # doctest: +NORMALIZE_WHITESPACE
bar A B C
0 1.0 NaN NaN
1 NaN 2.0 NaN
2 NaN NaN 3.0
3 4.0 NaN NaN
4 NaN 5.0 NaN
5 NaN NaN 6.0
Notice that, unlike pandas raises an ValueError when duplicated values are found,
pandas-on-Spark's pivot still works with its first value it meets during operation because
pivot is an expensive operation and it is preferred to permissively execute over failing
fast when processing large data.
>>> df = ps.DataFrame({"foo": ['one', 'one', 'two', 'two'],
... "bar": ['A', 'A', 'B', 'C'],
... "baz": [1, 2, 3, 4]}, columns=['foo', 'bar', 'baz'])
>>> df
foo bar baz
0 one A 1
1 one A 2
2 two B 3
3 two C 4
>>> df.pivot(index='foo', columns='bar', values='baz').sort_index()
... # doctest: +NORMALIZE_WHITESPACE
bar A B C
foo
one 1.0 NaN NaN
two NaN 3.0 4.0
It also support multi-index and multi-index column.
>>> df.columns = pd.MultiIndex.from_tuples([('a', 'foo'), ('a', 'bar'), ('b', 'baz')])
>>> df = df.set_index(('a', 'bar'), append=True)
>>> df # doctest: +NORMALIZE_WHITESPACE
a b
foo baz
(a, bar)
0 A one 1
1 A one 2
2 B two 3
3 C two 4
>>> df.pivot(columns=('a', 'foo'), values=('b', 'baz')).sort_index()
... # doctest: +NORMALIZE_WHITESPACE
('a', 'foo') one two
(a, bar)
0 A 1.0 NaN
1 A 2.0 NaN
2 B NaN 3.0
3 C NaN 4.0
"""
if columns is None:
raise ValueError("columns should be set.")
if values is None:
raise ValueError("values should be set.")
should_use_existing_index = index is not None
if should_use_existing_index:
df = self
index_labels = [index]
else:
# The index after `reset_index()` will never be used, so use "distributed" index
# as a dummy to avoid overhead.
with option_context("compute.default_index_type", "distributed"):
df = self.reset_index()
index_labels = df._internal.column_labels[: self._internal.index_level]
df = df.pivot_table(index=index_labels, columns=columns, values=values, aggfunc="first")
if should_use_existing_index:
return df
else:
internal = df._internal.copy(index_names=self._internal.index_names)
return DataFrame(internal)
@property
def columns(self) -> pd.Index:
"""The column labels of the DataFrame."""
names = [
name if name is None or len(name) > 1 else name[0]
for name in self._internal.column_label_names
]
if self._internal.column_labels_level > 1:
columns = pd.MultiIndex.from_tuples(self._internal.column_labels, names=names)
else:
columns = pd.Index([label[0] for label in self._internal.column_labels], name=names[0])
return columns
@columns.setter
def columns(self, columns: Union[pd.Index, List[Name]]) -> None:
if isinstance(columns, pd.MultiIndex):
column_labels = columns.tolist()
else:
column_labels = [
col if is_name_like_tuple(col, allow_none=False) else (col,) for col in columns
]
if len(self._internal.column_labels) != len(column_labels):
raise ValueError(
"Length mismatch: Expected axis has {} elements, "
"new values have {} elements".format(
len(self._internal.column_labels), len(column_labels)
)
)
column_label_names: Optional[List]
if isinstance(columns, pd.Index):
column_label_names = [
name if is_name_like_tuple(name) else (name,) for name in columns.names
]
else:
column_label_names = None
pssers = [
self._psser_for(label).rename(name)
for label, name in zip(self._internal.column_labels, column_labels)
]
self._update_internal_frame(
self._internal.with_new_columns(pssers, column_label_names=column_label_names)
)
@property
def dtypes(self) -> pd.Series:
"""Return the dtypes in the DataFrame.
This returns a Series with the data type of each column. The result's index is the original
DataFrame's columns. Columns with mixed types are stored with the object dtype.
Returns
-------
pd.Series
The data type of each column.
Examples
--------
>>> df = ps.DataFrame({'a': list('abc'),
... 'b': list(range(1, 4)),
... 'c': np.arange(3, 6).astype('i1'),
... 'd': np.arange(4.0, 7.0, dtype='float64'),
... 'e': [True, False, True],
... 'f': pd.date_range('20130101', periods=3)},
... columns=['a', 'b', 'c', 'd', 'e', 'f'])
>>> df.dtypes
a object
b int64
c int8
d float64
e bool
f datetime64[ns]
dtype: object
"""
return pd.Series(
[self._psser_for(label).dtype for label in self._internal.column_labels],
index=pd.Index(
[label if len(label) > 1 else label[0] for label in self._internal.column_labels]
),
)
def select_dtypes(
self,
include: Optional[Union[str, List[str]]] = None,
exclude: Optional[Union[str, List[str]]] = None,
) -> "DataFrame":
"""
Return a subset of the DataFrame's columns based on the column dtypes.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied. It also takes Spark SQL
DDL type strings, for instance, 'string' and 'date'.
Returns
-------
DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
>>> df = ps.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df.select_dtypes()
Traceback (most recent call last):
...
ValueError: at least one of include or exclude must be nonempty
* If ``include`` and ``exclude`` have overlapping elements
>>> df = ps.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df.select_dtypes(include='a', exclude='a')
Traceback (most recent call last):
...
ValueError: include and exclude overlap on {'a'}
Notes
-----
* To select datetimes, use ``np.datetime64``, ``'datetime'`` or
``'datetime64'``
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3,
... 'd': ['a', 'b'] * 3}, columns=['a', 'b', 'c', 'd'])
>>> df
a b c d
0 1 True 1.0 a
1 2 False 2.0 b
2 1 True 1.0 a
3 2 False 2.0 b
4 1 True 1.0 a
5 2 False 2.0 b
>>> df.select_dtypes(include='bool')
b
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'], exclude=['int'])
c
0 1.0
1 2.0
2 1.0
3 2.0
4 1.0
5 2.0
>>> df.select_dtypes(exclude=['int'])
b c d
0 True 1.0 a
1 False 2.0 b
2 True 1.0 a
3 False 2.0 b
4 True 1.0 a
5 False 2.0 b
Spark SQL DDL type strings can be used as well.
>>> df.select_dtypes(exclude=['string'])
a b c
0 1 True 1.0
1 2 False 2.0
2 1 True 1.0
3 2 False 2.0
4 1 True 1.0
5 2 False 2.0
"""
from pyspark.sql.types import _parse_datatype_string
include_list: List[str]
if not is_list_like(include):
include_list = [cast(str, include)] if include is not None else []
else:
include_list = list(include)
exclude_list: List[str]
if not is_list_like(exclude):
exclude_list = [cast(str, exclude)] if exclude is not None else []
else:
exclude_list = list(exclude)
if not any((include_list, exclude_list)):
raise ValueError("at least one of include or exclude must be " "nonempty")
# can't both include AND exclude!
if set(include_list).intersection(set(exclude_list)):
raise ValueError(
"include and exclude overlap on {inc_ex}".format(
inc_ex=set(include_list).intersection(set(exclude_list))
)
)
# Handle Spark types
include_spark_type = []
for inc in include_list:
try:
include_spark_type.append(_parse_datatype_string(inc))
except BaseException:
pass
exclude_spark_type = []
for exc in exclude_list:
try:
exclude_spark_type.append(_parse_datatype_string(exc))
except BaseException:
pass
# Handle pandas types
include_numpy_type = []
for inc in include_list:
try:
include_numpy_type.append(infer_dtype_from_object(inc))
except BaseException:
pass
exclude_numpy_type = []
for exc in exclude_list:
try:
exclude_numpy_type.append(infer_dtype_from_object(exc))
except BaseException:
pass
column_labels = []
for label in self._internal.column_labels:
if len(include_list) > 0:
should_include = (
infer_dtype_from_object(self._psser_for(label).dtype.name) in include_numpy_type
or self._internal.spark_type_for(label) in include_spark_type
)
else:
should_include = not (
infer_dtype_from_object(self._psser_for(label).dtype.name) in exclude_numpy_type
or self._internal.spark_type_for(label) in exclude_spark_type
)
if should_include:
column_labels.append(label)
return DataFrame(
self._internal.with_new_columns([self._psser_for(label) for label in column_labels])
)
def droplevel(
self, level: Union[int, Name, List[Union[int, Name]]], axis: Axis = 0
) -> "DataFrame":
"""
Return DataFrame with requested index / column level(s) removed.
Parameters
----------
level: int, str, or list-like
If a string is given, must be the name of a level If list-like, elements must
be names or positional indexes of levels.
axis: {0 or ‘index’, 1 or ‘columns’}, default 0
Returns
-------
DataFrame with requested index / column level(s) removed.
Examples
--------
>>> df = ps.DataFrame(
... [[3, 4], [7, 8], [11, 12]],
... index=pd.MultiIndex.from_tuples([(1, 2), (5, 6), (9, 10)], names=["a", "b"]),
... )
>>> df.columns = pd.MultiIndex.from_tuples([
... ('c', 'e'), ('d', 'f')
... ], names=['level_1', 'level_2'])
>>> df # doctest: +NORMALIZE_WHITESPACE
level_1 c d
level_2 e f
a b
1 2 3 4
5 6 7 8
9 10 11 12
>>> df.droplevel('a') # doctest: +NORMALIZE_WHITESPACE
level_1 c d
level_2 e f
b
2 3 4
6 7 8
10 11 12
>>> df.droplevel('level_2', axis=1) # doctest: +NORMALIZE_WHITESPACE
level_1 c d
a b
1 2 3 4
5 6 7 8
9 10 11 12
"""
axis = validate_axis(axis)
if axis == 0:
if not isinstance(level, (tuple, list)): # huh?
level = [level]
names = self.index.names
nlevels = self._internal.index_level
int_level = set()
for n in level:
if isinstance(n, int):
if n < 0:
n = n + nlevels
if n < 0:
raise IndexError(
"Too many levels: Index has only {} levels, "
"{} is not a valid level number".format(nlevels, (n - nlevels))
)
if n >= nlevels:
raise IndexError(
"Too many levels: Index has only {} levels, not {}".format(
nlevels, (n + 1)
)
)
else:
if n not in names:
raise KeyError("Level {} not found".format(n))
n = names.index(n)
int_level.add(n)
if len(level) >= nlevels:
raise ValueError(
"Cannot remove {} levels from an index with {} levels: "
"at least one level must be left.".format(len(level), nlevels)
)
index_spark_columns, index_names, index_fields = zip(
*[
item
for i, item in enumerate(
zip(
self._internal.index_spark_columns,
self._internal.index_names,
self._internal.index_fields,
)
)
if i not in int_level
]
)
internal = self._internal.copy(
index_spark_columns=list(index_spark_columns),
index_names=list(index_names),
index_fields=list(index_fields),
)
return DataFrame(internal)
else:
psdf = self.copy()
psdf.columns = psdf.columns.droplevel(level) # type: ignore[arg-type]
return psdf
def drop(
self,
labels: Optional[Union[Name, List[Name]]] = None,
axis: Optional[Axis] = 0,
index: Union[Name, List[Name]] = None,
columns: Union[Name, List[Name]] = None,
) -> "DataFrame":
"""
Drop specified labels from columns.
Remove rows and/or columns by specifying label names and corresponding axis,
or by specifying directly index and/or column names.
Drop rows of a MultiIndex DataFrame is not supported yet.
Parameters
----------
labels : single label or list-like
Column labels to drop.
axis : {0 or 'index', 1 or 'columns'}, default 0
.. versionchanged:: 3.3
Set dropping by index by default.
index : single label or list-like
Alternative to specifying axis (``labels, axis=0``
is quivalent to ``index=columns``).
.. versionchanged:: 3.3
Added dropping rows by 'index'.
columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
Returns
-------
dropped : DataFrame
See Also
--------
Series.dropna
Examples
--------
>>> df = ps.DataFrame(np.arange(12).reshape(3, 4), columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 0 1 2 3
1 4 5 6 7
2 8 9 10 11
Drop columns
>>> df.drop(['B', 'C'], axis=1)
A D
0 0 3
1 4 7
2 8 11
>>> df.drop(columns=['B', 'C'])
A D
0 0 3
1 4 7
2 8 11
Drop a row by index
>>> df.drop([0, 1])
A B C D
2 8 9 10 11
>>> df.drop(index=[0, 1], columns='A')
B C D
2 9 10 11
Also support dropping columns for MultiIndex
>>> df = ps.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]},
... columns=['x', 'y', 'z', 'w'])
>>> columns = [('a', 'x'), ('a', 'y'), ('b', 'z'), ('b', 'w')]
>>> df.columns = pd.MultiIndex.from_tuples(columns)
>>> df # doctest: +NORMALIZE_WHITESPACE
a b
x y z w
0 1 3 5 7
1 2 4 6 8
>>> df.drop(labels='a', axis=1) # doctest: +NORMALIZE_WHITESPACE
b
z w
0 5 7
1 6 8
Notes
-----
Currently, dropping rows of a MultiIndex DataFrame is not supported yet.
"""
if labels is not None:
if index is not None or columns is not None:
raise ValueError("Cannot specify both 'labels' and 'index'/'columns'")
axis = validate_axis(axis)
if axis == 1:
return self.drop(index=index, columns=labels)
else:
return self.drop(index=labels, columns=columns)
else:
if index is None and columns is None:
raise ValueError("Need to specify at least one of 'labels' or 'columns' or 'index'")
internal = self._internal
if index is not None:
if is_name_like_tuple(index) or is_name_like_value(index):
index = [index]
if len(index) > 0:
if internal.index_level == 1:
internal = internal.resolved_copy
if len(index) <= ps.get_option("compute.isin_limit"):
self_index_type = self.index.spark.data_type
cond = ~internal.index_spark_columns[0].isin(
[SF.lit(label).cast(self_index_type) for label in index]
)
internal = internal.with_filter(cond)
else:
index_sdf_col = "__index"
index_sdf = default_session().createDataFrame(
pd.DataFrame({index_sdf_col: index})
)
joined_sdf = internal.spark_frame.join(
other=F.broadcast(index_sdf),
on=(
internal.index_spark_columns[0]
== scol_for(index_sdf, index_sdf_col)
),
how="anti",
)
internal = internal.with_new_sdf(joined_sdf)
else:
raise NotImplementedError(
"Drop rows of MultiIndex DataFrame is not supported yet"
)
if columns is not None:
if is_name_like_tuple(columns):
columns = [columns]
elif is_name_like_value(columns):
columns = [(columns,)]
else:
columns = [col if is_name_like_tuple(col) else (col,) for col in columns]
if len(columns) > 0:
drop_column_labels = set(
label
for label in internal.column_labels
for col in columns
if label[: len(col)] == col
)
if len(drop_column_labels) == 0:
raise KeyError(columns)
keep_columns_and_labels = [
(column, label)
for column, label in zip(
self._internal.data_spark_column_names, self._internal.column_labels
)
if label not in drop_column_labels
]
cols, labels = (
zip(*keep_columns_and_labels)
if len(keep_columns_and_labels) > 0
else ([], [])
)
internal = internal.with_new_columns(
[self._psser_for(label) for label in labels]
)
return DataFrame(internal)
def _prepare_sort_by_scols(self, by: Union[Name, List[Name]]) -> List[Column]:
if is_name_like_value(by):
by = [by]
else:
assert is_list_like(by), type(by)
new_by = []
for colname in by:
ser = self[colname]
if not isinstance(ser, ps.Series):
raise ValueError(
"The column %s is not unique. For a multi-index, the label must be a tuple "
"with elements corresponding to each level." % name_like_string(colname)
)
new_by.append(ser.spark.column)
return new_by
def _sort(
self,
by: List[Column],
ascending: Union[bool, List[bool]],
na_position: str,
keep: str = "first",
) -> "DataFrame":
if isinstance(ascending, bool):
ascending = [ascending] * len(by)
if len(ascending) != len(by):
raise ValueError(
"Length of ascending ({}) != length of by ({})".format(len(ascending), len(by))
)
if na_position not in ("first", "last"):
raise ValueError("invalid na_position: '{}'".format(na_position))
# Mapper: Get a spark column function for (ascending, na_position) combination
mapper = {
(True, "first"): Column.asc_nulls_first,
(True, "last"): Column.asc_nulls_last,
(False, "first"): Column.desc_nulls_first,
(False, "last"): Column.desc_nulls_last,
}
by = [mapper[(asc, na_position)](scol) for scol, asc in zip(by, ascending)]
natural_order_scol = F.col(NATURAL_ORDER_COLUMN_NAME)
if keep == "last":
natural_order_scol = Column.desc(natural_order_scol)
elif keep == "all":
raise NotImplementedError("`keep`=all is not implemented yet.")
elif keep != "first":
raise ValueError('keep must be either "first", "last" or "all".')
sdf = self._internal.resolved_copy.spark_frame.sort(*by, natural_order_scol)
return DataFrame(self._internal.with_new_sdf(sdf))
def sort_values(
self,
by: Union[Name, List[Name]],
ascending: Union[bool, List[bool]] = True,
inplace: bool = False,
na_position: str = "last",
ignore_index: bool = False,
) -> Optional["DataFrame"]:
"""
Sort by the values along either axis.
Parameters
----------
by : str or list of str
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
if True, perform operation in-place
na_position : {'first', 'last'}, default 'last'
`first` puts NaNs at the beginning, `last` puts NaNs at the end
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
Returns
-------
sorted_obj : DataFrame
Examples
--------
>>> df = ps.DataFrame({
... 'col1': ['A', 'B', None, 'D', 'C'],
... 'col2': [2, 9, 8, 7, 4],
... 'col3': [0, 9, 4, 2, 3],
... },
... columns=['col1', 'col2', 'col3'],
... index=['a', 'b', 'c', 'd', 'e'])
>>> df
col1 col2 col3
a A 2 0
b B 9 9
c None 8 4
d D 7 2
e C 4 3
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3
a A 2 0
b B 9 9
e C 4 3
d D 7 2
c None 8 4
Ignore index for the resulting axis
>>> df.sort_values(by=['col1'], ignore_index=True)
col1 col2 col3
0 A 2 0
1 B 9 9
2 C 4 3
3 D 7 2
4 None 8 4
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3
d D 7 2
e C 4 3
b B 9 9
a A 2 0
c None 8 4
Sort by multiple columns
>>> df = ps.DataFrame({
... 'col1': ['A', 'A', 'B', None, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... },
... columns=['col1', 'col2', 'col3'])
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 None 8 4
"""
inplace = validate_bool_kwarg(inplace, "inplace")
new_by = self._prepare_sort_by_scols(by)
psdf = self._sort(by=new_by, ascending=ascending, na_position=na_position)
if inplace:
if ignore_index:
psdf.reset_index(drop=True, inplace=inplace)
self._update_internal_frame(psdf._internal)
return None
else:
return psdf.reset_index(drop=True) if ignore_index else psdf
def sort_index(
self,
axis: Axis = 0,
level: Optional[Union[int, List[int]]] = None,
ascending: bool = True,
inplace: bool = False,
kind: str = None,
na_position: str = "last",
) -> Optional["DataFrame"]:
"""
Sort object by labels (along an axis)
Parameters
----------
axis : index, columns to direct sorting. Currently, only axis = 0 is supported.
level : int or level name or list of ints or list of level names
if not None, sort on values in specified index level(s)
ascending : boolean, default True
Sort ascending vs. descending
inplace : bool, default False
if True, perform operation in-place
kind : str, default None
pandas-on-Spark does not allow specifying the sorting algorithm at the moment,
default None
na_position : {‘first’, ‘last’}, default ‘last’
first puts NaNs at the beginning, last puts NaNs at the end. Not implemented for
MultiIndex.
Returns
-------
sorted_obj : DataFrame
Examples
--------
>>> df = ps.DataFrame({'A': [2, 1, np.nan]}, index=['b', 'a', np.nan])
>>> df.sort_index()
A
a 1.0
b 2.0
NaN NaN
>>> df.sort_index(ascending=False)
A
b 2.0
a 1.0
NaN NaN
>>> df.sort_index(na_position='first')
A
NaN NaN
a 1.0
b 2.0
>>> df.sort_index(inplace=True)
>>> df
A
a 1.0
b 2.0
NaN NaN
>>> df = ps.DataFrame({'A': range(4), 'B': range(4)[::-1]},
... index=[['b', 'b', 'a', 'a'], [1, 0, 1, 0]],
... columns=['A', 'B'])
>>> df.sort_index()
A B
a 0 3 0
1 2 1
b 0 1 2
1 0 3
>>> df.sort_index(level=1) # doctest: +SKIP
A B
a 0 3 0
b 0 1 2
a 1 2 1
b 1 0 3
>>> df.sort_index(level=[1, 0])
A B
a 0 3 0
b 0 1 2
a 1 2 1
b 1 0 3
"""
inplace = validate_bool_kwarg(inplace, "inplace")
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError("No other axis than 0 are supported at the moment")
if kind is not None:
raise NotImplementedError(
"Specifying the sorting algorithm is not supported at the moment."
)
if level is None or (is_list_like(level) and len(level) == 0): # type: ignore[arg-type]
by = self._internal.index_spark_columns
elif is_list_like(level):
by = [
self._internal.index_spark_columns[lvl] for lvl in level # type: ignore[union-attr]
]
else:
by = [self._internal.index_spark_columns[level]] # type: ignore[index]
psdf = self._sort(by=by, ascending=ascending, na_position=na_position)
if inplace:
self._update_internal_frame(psdf._internal)
return None
else:
return psdf
def swaplevel(
self, i: Union[int, Name] = -2, j: Union[int, Name] = -1, axis: Axis = 0
) -> "DataFrame":
"""
Swap levels i and j in a MultiIndex on a particular axis.
Parameters
----------
i, j : int or str
Levels of the indices to be swapped. Can pass level name as string.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to swap levels on. 0 or 'index' for row-wise, 1 or
'columns' for column-wise.
Returns
-------
DataFrame
DataFrame with levels swapped in MultiIndex.
Examples
--------
>>> midx = pd.MultiIndex.from_arrays(
... [['red', 'blue'], [1, 2], ['s', 'm']], names = ['color', 'number', 'size'])
>>> midx # doctest: +SKIP
MultiIndex([( 'red', 1, 's'),
('blue', 2, 'm')],
names=['color', 'number', 'size'])
Swap levels in a MultiIndex on index.
>>> psdf = ps.DataFrame({'x': [5, 6], 'y':[5, 6]}, index=midx)
>>> psdf # doctest: +NORMALIZE_WHITESPACE
x y
color number size
red 1 s 5 5
blue 2 m 6 6
>>> psdf.swaplevel() # doctest: +NORMALIZE_WHITESPACE
x y
color size number
red s 1 5 5
blue m 2 6 6
>>> psdf.swaplevel(0, 1) # doctest: +NORMALIZE_WHITESPACE
x y
number color size
1 red s 5 5
2 blue m 6 6
>>> psdf.swaplevel('number', 'size') # doctest: +NORMALIZE_WHITESPACE
x y
color size number
red s 1 5 5
blue m 2 6 6
Swap levels in a MultiIndex on columns.
>>> psdf = ps.DataFrame({'x': [5, 6], 'y':[5, 6]})
>>> psdf.columns = midx
>>> psdf
color red blue
number 1 2
size s m
0 5 5
1 6 6
>>> psdf.swaplevel(axis=1)
color red blue
size s m
number 1 2
0 5 5
1 6 6
>>> psdf.swaplevel(axis=1)
color red blue
size s m
number 1 2
0 5 5
1 6 6
>>> psdf.swaplevel(0, 1, axis=1)
number 1 2
color red blue
size s m
0 5 5
1 6 6
>>> psdf.swaplevel('number', 'color', axis=1)
number 1 2
color red blue
size s m
0 5 5
1 6 6
"""
axis = validate_axis(axis)
if axis == 0:
internal = self._swaplevel_index(i, j)
else:
assert axis == 1
internal = self._swaplevel_columns(i, j)
return DataFrame(internal)
def swapaxes(self, i: Axis, j: Axis, copy: bool = True) -> "DataFrame":
"""
Interchange axes and swap values axes appropriately.
.. note:: This method is based on an expensive operation due to the nature
of big data. Internally it needs to generate each row for each value, and
then group twice - it is a huge operation. To prevent misusage, this method
has the 'compute.max_rows' default limit of input length, and raises a ValueError.
>>> from pyspark.pandas.config import option_context
>>> with option_context('compute.max_rows', 1000): # doctest: +NORMALIZE_WHITESPACE
... ps.DataFrame({'a': range(1001)}).swapaxes(i=0, j=1)
Traceback (most recent call last):
...
ValueError: Current DataFrame has more then the given limit 1000 rows.
Please set 'compute.max_rows' by using 'pyspark.pandas.config.set_option'
to retrieve to retrieve more than 1000 rows. Note that, before changing the
'compute.max_rows', this operation is considerably expensive.
Parameters
----------
i: {0 or 'index', 1 or 'columns'}. The axis to swap.
j: {0 or 'index', 1 or 'columns'}. The axis to swap.
copy : bool, default True.
Returns
-------
DataFrame
Examples
--------
>>> psdf = ps.DataFrame(
... [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['x', 'y', 'z'], columns=['a', 'b', 'c']
... )
>>> psdf
a b c
x 1 2 3
y 4 5 6
z 7 8 9
>>> psdf.swapaxes(i=1, j=0)
x y z
a 1 4 7
b 2 5 8
c 3 6 9
>>> psdf.swapaxes(i=1, j=1)
a b c
x 1 2 3
y 4 5 6
z 7 8 9
"""
assert copy is True
i = validate_axis(i)
j = validate_axis(j)
return self.copy() if i == j else self.transpose()
def _swaplevel_columns(self, i: Union[int, Name], j: Union[int, Name]) -> InternalFrame:
assert isinstance(self.columns, pd.MultiIndex)
for index in (i, j):
if not isinstance(index, int) and index not in self.columns.names:
raise KeyError("Level %s not found" % index)
i = i if isinstance(i, int) else self.columns.names.index(i)
j = j if isinstance(j, int) else self.columns.names.index(j)
for index in (i, j):
if index >= len(self.columns) or index < -len(self.columns):
raise IndexError(
"Too many levels: Columns have only %s levels, "
"%s is not a valid level number" % (self._internal.index_level, index)
)
column_label_names = self._internal.column_label_names.copy()
column_label_names[i], column_label_names[j], = (
column_label_names[j],
column_label_names[i],
)
column_labels = self._internal._column_labels
column_label_list = [list(label) for label in column_labels]
for label_list in column_label_list:
label_list[i], label_list[j] = label_list[j], label_list[i]
column_labels = [tuple(x) for x in column_label_list]
internal = self._internal.copy(
column_label_names=list(column_label_names), column_labels=list(column_labels)
)
return internal
def _swaplevel_index(self, i: Union[int, Name], j: Union[int, Name]) -> InternalFrame:
assert isinstance(self.index, ps.MultiIndex)
for index in (i, j):
if not isinstance(index, int) and index not in self.index.names:
raise KeyError("Level %s not found" % index)
i = i if isinstance(i, int) else self.index.names.index(i)
j = j if isinstance(j, int) else self.index.names.index(j)
for index in (i, j):
if index >= self._internal.index_level or index < -self._internal.index_level:
raise IndexError(
"Too many levels: Index has only %s levels, "
"%s is not a valid level number" % (self._internal.index_level, index)
)
index_map = list(
zip(
self._internal.index_spark_columns,
self._internal.index_names,
self._internal.index_fields,
)
)
index_map[i], index_map[j] = index_map[j], index_map[i]
index_spark_columns, index_names, index_fields = zip(*index_map)
internal = self._internal.copy(
index_spark_columns=list(index_spark_columns),
index_names=list(index_names),
index_fields=list(index_fields),
)
return internal
def nlargest(
self, n: int, columns: Union[Name, List[Name]], keep: str = "first"
) -> "DataFrame":
"""
Return the first `n` rows ordered by `columns` in descending order.
Return the first `n` rows with the largest values in `columns`, in
descending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=False).head(n)``, but more
performant in pandas.
In pandas-on-Spark, thanks to Spark's lazy execution and query optimizer,
the two would have same performance.
Parameters
----------
n : int
Number of rows to return.
columns : label or list of labels
Column label(s) to order by.
keep : {'first', 'last'}, default 'first'. 'all' is not implemented yet.
Determines which duplicates (if any) to keep.
- ``first`` : Keep the first occurrence.
- ``last`` : Keep the last occurrence.
Returns
-------
DataFrame
The first `n` rows ordered by the given columns in descending
order.
See Also
--------
DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in
ascending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Notes
-----
This function cannot be used with all column types. For example, when
specifying columns with `object` or `category` dtypes, ``TypeError`` is
raised.
Examples
--------
>>> df = ps.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan],
... 'Y': [6, 7, 8, 9, 10, 11, 12]})
>>> df
X Y
0 1.0 6
1 2.0 7
2 3.0 8
3 5.0 9
4 6.0 10
5 7.0 11
6 NaN 12
In the following example, we will use ``nlargest`` to select the three
rows having the largest values in column "X".
>>> df.nlargest(n=3, columns='X')
X Y
5 7.0 11
4 6.0 10
3 5.0 9
To order by the largest values in column "Y" and then "X", we can
specify multiple columns like in the next example.
>>> df.nlargest(n=3, columns=['Y', 'X'])
X Y
6 NaN 12
5 7.0 11
4 6.0 10
The examples below show how ties are resolved, which is decided by `keep`.
>>> tied_df = ps.DataFrame({'X': [1, 2, 2, 3, 3]}, index=['a', 'b', 'c', 'd', 'e'])
>>> tied_df
X
a 1
b 2
c 2
d 3
e 3
When using keep='first' (by default), ties are resolved in order:
>>> tied_df.nlargest(3, 'X')
X
d 3
e 3
b 2
>>> tied_df.nlargest(3, 'X', keep='first')
X
d 3
e 3
b 2
When using keep='last', ties are resolved in reverse order:
>>> tied_df.nlargest(3, 'X', keep='last')
X
e 3
d 3
c 2
"""
by_scols = self._prepare_sort_by_scols(columns)
return self._sort(by=by_scols, ascending=False, na_position="last", keep=keep).head(n=n)
def nsmallest(
self, n: int, columns: Union[Name, List[Name]], keep: str = "first"
) -> "DataFrame":
"""
Return the first `n` rows ordered by `columns` in ascending order.
Return the first `n` rows with the smallest values in `columns`, in
ascending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to ``df.sort_values(columns, ascending=True).head(n)``,
but more performant. In pandas-on-Spark, thanks to Spark's lazy execution and query
optimizer, the two would have same performance.
Parameters
----------
n : int
Number of items to retrieve.
columns : list or str
Column name or names to order by.
keep : {'first', 'last'}, default 'first'. 'all' is not implemented yet.
Determines which duplicates (if any) to keep.
- ``first`` : Keep the first occurrence.
- ``last`` : Keep the last occurrence.
Returns
-------
DataFrame
See Also
--------
DataFrame.nlargest : Return the first `n` rows ordered by `columns` in
descending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Examples
--------
>>> df = ps.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan],
... 'Y': [6, 7, 8, 9, 10, 11, 12]})
>>> df
X Y
0 1.0 6
1 2.0 7
2 3.0 8
3 5.0 9
4 6.0 10
5 7.0 11
6 NaN 12
In the following example, we will use ``nsmallest`` to select the
three rows having the smallest values in column "X".
>>> df.nsmallest(n=3, columns='X') # doctest: +NORMALIZE_WHITESPACE
X Y
0 1.0 6
1 2.0 7
2 3.0 8
To order by the smallest values in column "Y" and then "X", we can
specify multiple columns like in the next example.
>>> df.nsmallest(n=3, columns=['Y', 'X']) # doctest: +NORMALIZE_WHITESPACE
X Y
0 1.0 6
1 2.0 7
2 3.0 8
The examples below show how ties are resolved, which is decided by `keep`.
>>> tied_df = ps.DataFrame({'X': [1, 1, 2, 2, 3]}, index=['a', 'b', 'c', 'd', 'e'])
>>> tied_df
X
a 1
b 1
c 2
d 2
e 3
When using keep='first' (by default), ties are resolved in order:
>>> tied_df.nsmallest(3, 'X')
X
a 1
b 1
c 2
>>> tied_df.nsmallest(3, 'X', keep='first')
X
a 1
b 1
c 2
When using keep='last', ties are resolved in reverse order:
>>> tied_df.nsmallest(3, 'X', keep='last')
X
b 1
a 1
d 2
"""
by_scols = self._prepare_sort_by_scols(columns)
return self._sort(by=by_scols, ascending=True, na_position="last", keep=keep).head(n=n)
def isin(self, values: Union[List, Dict]) -> "DataFrame":
"""
Whether each element in the DataFrame is contained in values.
Parameters
----------
values : iterable or dict
The sequence of values to test. If values is a dict,
the keys must be the column names, which must match.
Series and DataFrame are not supported.
Returns
-------
DataFrame
DataFrame of booleans showing whether each element in the DataFrame
is contained in values.
Examples
--------
>>> df = ps.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
... index=['falcon', 'dog'],
... columns=['num_legs', 'num_wings'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
When ``values`` is a list check whether every value in the DataFrame
is present in the list (which animals have 0 or 2 legs or wings)
>>> df.isin([0, 2])
num_legs num_wings
falcon True True
dog False True
When ``values`` is a dict, we can pass values to check for each
column separately:
>>> df.isin({'num_wings': [0, 3]})
num_legs num_wings
falcon False False
dog False True
"""
if isinstance(values, (pd.DataFrame, pd.Series)):
raise NotImplementedError("DataFrame and Series are not supported")
if isinstance(values, dict) and not set(values.keys()).issubset(self.columns):
raise AttributeError(
"'DataFrame' object has no attribute %s"
% (set(values.keys()).difference(self.columns))
)
data_spark_columns = []
if isinstance(values, dict):
for i, col in enumerate(self.columns):
if col in values:
item = values[col]
item = item.tolist() if isinstance(item, np.ndarray) else list(item)
scol = self._internal.spark_column_for(self._internal.column_labels[i]).isin(
[SF.lit(v) for v in item]
)
scol = F.coalesce(scol, F.lit(False))
else:
scol = SF.lit(False)
data_spark_columns.append(scol.alias(self._internal.data_spark_column_names[i]))
elif is_list_like(values):
values = (
cast(np.ndarray, values).tolist()
if isinstance(values, np.ndarray)
else list(values)
)
for label in self._internal.column_labels:
scol = self._internal.spark_column_for(label).isin([SF.lit(v) for v in values])
scol = F.coalesce(scol, F.lit(False))
data_spark_columns.append(scol.alias(self._internal.spark_column_name_for(label)))
else:
raise TypeError("Values should be iterable, Series, DataFrame or dict.")
return DataFrame(
self._internal.with_new_columns(
data_spark_columns,
data_fields=[
field.copy(dtype=np.dtype("bool"), spark_type=BooleanType(), nullable=False)
for field in self._internal.data_fields
],
)
)
@property
def shape(self) -> Tuple[int, int]:
"""
Return a tuple representing the dimensionality of the DataFrame.
Examples
--------
>>> df = ps.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.shape
(2, 2)
>>> df = ps.DataFrame({'col1': [1, 2], 'col2': [3, 4],
... 'col3': [5, 6]})
>>> df.shape
(2, 3)
"""
return len(self), len(self.columns)
def merge(
self,
right: "DataFrame",
how: str = "inner",
on: Optional[Union[Name, List[Name]]] = None,
left_on: Optional[Union[Name, List[Name]]] = None,
right_on: Optional[Union[Name, List[Name]]] = None,
left_index: bool = False,
right_index: bool = False,
suffixes: Tuple[str, str] = ("_x", "_y"),
) -> "DataFrame":
"""
Merge DataFrame objects with a database-style join.
The index of the resulting DataFrame will be one of the following:
- 0...n if no index is used for merging
- Index of the left DataFrame if merged only on the index of the right DataFrame
- Index of the right DataFrame if merged only on the index of the left DataFrame
- All involved indices if merged using the indices of both DataFrames
e.g. if `left` with indices (a, x) and `right` with indices (b, x), the result will
be an index (x, a, b)
Parameters
----------
right: Object to merge with.
how: Type of merge to be performed.
{'left', 'right', 'outer', 'inner'}, default 'inner'
left: use only keys from left frame, similar to a SQL left outer join; not preserve
key order unlike pandas.
right: use only keys from right frame, similar to a SQL right outer join; not preserve
key order unlike pandas.
outer: use union of keys from both frames, similar to a SQL full outer join; sort keys
lexicographically.
inner: use intersection of keys from both frames, similar to a SQL inner join;
not preserve the order of the left keys unlike pandas.
on: Column or index level names to join on. These must be found in both DataFrames. If on
is None and not merging on indexes then this defaults to the intersection of the
columns in both DataFrames.
left_on: Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on: Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index: Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index or a number of
columns) must match the number of levels.
right_index: Use the index from the right DataFrame as the join key. Same caveats as
left_index.
suffixes: Suffix to apply to overlapping column names in the left and right side,
respectively.
Returns
-------
DataFrame
A DataFrame of the two merged objects.
See Also
--------
DataFrame.join : Join columns of another DataFrame.
DataFrame.update : Modify in place using non-NA values from another DataFrame.
DataFrame.hint : Specifies some hint on the current DataFrame.
broadcast : Marks a DataFrame as small enough for use in broadcast joins.
Examples
--------
>>> df1 = ps.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]},
... columns=['lkey', 'value'])
>>> df2 = ps.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]},
... columns=['rkey', 'value'])
>>> df1
lkey value
0 foo 1
1 bar 2
2 baz 3
3 foo 5
>>> df2
rkey value
0 foo 5
1 bar 6
2 baz 7
3 foo 8
Merge df1 and df2 on the lkey and rkey columns. The value columns have
the default suffixes, _x and _y, appended.
>>> merged = df1.merge(df2, left_on='lkey', right_on='rkey')
>>> merged.sort_values(by=['lkey', 'value_x', 'rkey', 'value_y']) # doctest: +ELLIPSIS
lkey value_x rkey value_y
...bar 2 bar 6
...baz 3 baz 7
...foo 1 foo 5
...foo 1 foo 8
...foo 5 foo 5
...foo 5 foo 8
>>> left_psdf = ps.DataFrame({'A': [1, 2]})
>>> right_psdf = ps.DataFrame({'B': ['x', 'y']}, index=[1, 2])
>>> left_psdf.merge(right_psdf, left_index=True, right_index=True).sort_index()
A B
1 2 x
>>> left_psdf.merge(right_psdf, left_index=True, right_index=True, how='left').sort_index()
A B
0 1 None
1 2 x
>>> left_psdf.merge(right_psdf, left_index=True, right_index=True, how='right').sort_index()
A B
1 2.0 x
2 NaN y
>>> left_psdf.merge(right_psdf, left_index=True, right_index=True, how='outer').sort_index()
A B
0 1.0 None
1 2.0 x
2 NaN y
Notes
-----
As described in #263, joining string columns currently returns None for missing values
instead of NaN.
"""
def to_list(os: Optional[Union[Name, List[Name]]]) -> List[Label]:
if os is None:
return []
elif is_name_like_tuple(os):
return [cast(Label, os)]
elif is_name_like_value(os):
return [(os,)]
else:
return [o if is_name_like_tuple(o) else (o,) for o in os]
if isinstance(right, ps.Series):
right = right.to_frame()
if on:
if left_on or right_on:
raise ValueError(
'Can only pass argument "on" OR "left_on" and "right_on", '
"not a combination of both."
)
left_key_names = list(map(self._internal.spark_column_name_for, to_list(on)))
right_key_names = list(map(right._internal.spark_column_name_for, to_list(on)))
else:
# TODO: need special handling for multi-index.
if left_index:
left_key_names = self._internal.index_spark_column_names
else:
left_key_names = list(map(self._internal.spark_column_name_for, to_list(left_on)))
if right_index:
right_key_names = right._internal.index_spark_column_names
else:
right_key_names = list(
map(right._internal.spark_column_name_for, to_list(right_on))
)
if left_key_names and not right_key_names:
raise ValueError("Must pass right_on or right_index=True")
if right_key_names and not left_key_names:
raise ValueError("Must pass left_on or left_index=True")
if not left_key_names and not right_key_names:
common = list(self.columns.intersection(right.columns))
if len(common) == 0:
raise ValueError(
"No common columns to perform merge on. Merge options: "
"left_on=None, right_on=None, left_index=False, right_index=False"
)
left_key_names = list(map(self._internal.spark_column_name_for, to_list(common)))
right_key_names = list(map(right._internal.spark_column_name_for, to_list(common)))
if len(left_key_names) != len(right_key_names):
raise ValueError("len(left_keys) must equal len(right_keys)")
# We should distinguish the name to avoid ambiguous column name after merging.
right_prefix = "__right_"
right_key_names = [right_prefix + right_key_name for right_key_name in right_key_names]
how = validate_how(how)
def resolve(internal: InternalFrame, side: str) -> InternalFrame:
def rename(col: str) -> str:
return "__{}_{}".format(side, col)
internal = internal.resolved_copy
sdf = internal.spark_frame
sdf = sdf.select(
*[
scol_for(sdf, col).alias(rename(col))
for col in sdf.columns
if col not in HIDDEN_COLUMNS
],
*HIDDEN_COLUMNS,
)
return internal.copy(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, rename(col)) for col in internal.index_spark_column_names
],
index_fields=[
field.copy(name=rename(field.name)) for field in internal.index_fields
],
data_spark_columns=[
scol_for(sdf, rename(col)) for col in internal.data_spark_column_names
],
data_fields=[field.copy(name=rename(field.name)) for field in internal.data_fields],
)
left_internal = self._internal.resolved_copy
right_internal = resolve(right._internal, "right")
left_table = left_internal.spark_frame.alias("left_table")
right_table = right_internal.spark_frame.alias("right_table")
left_key_columns = [scol_for(left_table, label) for label in left_key_names]
right_key_columns = [scol_for(right_table, label) for label in right_key_names]
join_condition = reduce(
lambda x, y: x & y,
[lkey == rkey for lkey, rkey in zip(left_key_columns, right_key_columns)],
)
joined_table = left_table.join(right_table, join_condition, how=how)
# Unpack suffixes tuple for convenience
left_suffix = suffixes[0]
right_suffix = suffixes[1]
# Append suffixes to columns with the same name to avoid conflicts later
duplicate_columns = set(left_internal.column_labels) & set(right_internal.column_labels)
exprs = []
data_columns = []
column_labels = []
def left_scol_for(label: Label) -> Column:
return scol_for(left_table, left_internal.spark_column_name_for(label))
def right_scol_for(label: Label) -> Column:
return scol_for(right_table, right_internal.spark_column_name_for(label))
for label in left_internal.column_labels:
col = left_internal.spark_column_name_for(label)
scol = left_scol_for(label)
if label in duplicate_columns:
spark_column_name = left_internal.spark_column_name_for(label)
if (
spark_column_name in left_key_names
and (right_prefix + spark_column_name) in right_key_names
):
right_scol = right_scol_for(label)
if how == "right":
scol = right_scol.alias(col)
elif how == "full":
scol = F.when(scol.isNotNull(), scol).otherwise(right_scol).alias(col)
else:
pass
else:
col = col + left_suffix
scol = scol.alias(col)
label = tuple([str(label[0]) + left_suffix] + list(label[1:]))
exprs.append(scol)
data_columns.append(col)
column_labels.append(label)
for label in right_internal.column_labels:
# recover `right_prefix` here.
col = right_internal.spark_column_name_for(label)[len(right_prefix) :]
scol = right_scol_for(label).alias(col)
if label in duplicate_columns:
spark_column_name = left_internal.spark_column_name_for(label)
if (
spark_column_name in left_key_names
and (right_prefix + spark_column_name) in right_key_names
):
continue
else:
col = col + right_suffix
scol = scol.alias(col)
label = tuple([str(label[0]) + right_suffix] + list(label[1:]))
exprs.append(scol)
data_columns.append(col)
column_labels.append(label)
left_index_scols = left_internal.index_spark_columns
right_index_scols = right_internal.index_spark_columns
# Retain indices if they are used for joining
if left_index:
if right_index:
if how in ("inner", "left"):
exprs.extend(left_index_scols)
index_spark_column_names = left_internal.index_spark_column_names
index_names = left_internal.index_names
elif how == "right":
exprs.extend(right_index_scols)
index_spark_column_names = right_internal.index_spark_column_names
index_names = right_internal.index_names
else:
index_spark_column_names = left_internal.index_spark_column_names
index_names = left_internal.index_names
for col, left_scol, right_scol in zip(
index_spark_column_names, left_index_scols, right_index_scols
):
scol = F.when(left_scol.isNotNull(), left_scol).otherwise(right_scol)
exprs.append(scol.alias(col))
else:
exprs.extend(right_index_scols)
index_spark_column_names = right_internal.index_spark_column_names
index_names = right_internal.index_names
elif right_index:
exprs.extend(left_index_scols)
index_spark_column_names = left_internal.index_spark_column_names
index_names = left_internal.index_names
else:
index_spark_column_names = []
index_names = []
selected_columns = joined_table.select(*exprs)
internal = InternalFrame(
spark_frame=selected_columns,
index_spark_columns=[
scol_for(selected_columns, col) for col in index_spark_column_names
],
index_names=index_names,
column_labels=column_labels,
data_spark_columns=[scol_for(selected_columns, col) for col in data_columns],
)
return DataFrame(internal)
def join(
self,
right: "DataFrame",
on: Optional[Union[Name, List[Name]]] = None,
how: str = "left",
lsuffix: str = "",
rsuffix: str = "",
) -> "DataFrame":
"""
Join columns of another DataFrame.
Join columns with `right` DataFrame either on index or on a key column. Efficiently join
multiple DataFrame objects by index at once by passing a list.
Parameters
----------
right: DataFrame, Series
on: str, list of str, or array-like, optional
Column or index level name(s) in the caller to join on the index in `right`, otherwise
joins index-on-index. If multiple values given, the `right` DataFrame must have a
MultiIndex. Can pass an array as the join key if it is not already contained in the
calling DataFrame. Like an Excel VLOOKUP operation.
how: {'left', 'right', 'outer', 'inner'}, default 'left'
How to handle the operation of the two objects.
* left: use `left` frame’s index (or column if on is specified).
* right: use `right`’s index.
* outer: form union of `left` frame’s index (or column if on is specified) with
right’s index, and sort it. lexicographically.
* inner: form intersection of `left` frame’s index (or column if on is specified)
with `right`’s index, preserving the order of the `left`’s one.
lsuffix : str, default ''
Suffix to use from left frame's overlapping columns.
rsuffix : str, default ''
Suffix to use from `right` frame's overlapping columns.
Returns
-------
DataFrame
A dataframe containing columns from both the `left` and `right`.
See Also
--------
DataFrame.merge: For column(s)-on-columns(s) operations.
DataFrame.update : Modify in place using non-NA values from another DataFrame.
DataFrame.hint : Specifies some hint on the current DataFrame.
broadcast : Marks a DataFrame as small enough for use in broadcast joins.
Notes
-----
Parameters on, lsuffix, and rsuffix are not supported when passing a list of DataFrame
objects.
Examples
--------
>>> psdf1 = ps.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],
... 'A': ['A0', 'A1', 'A2', 'A3']},
... columns=['key', 'A'])
>>> psdf2 = ps.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']},
... columns=['key', 'B'])
>>> psdf1
key A
0 K0 A0
1 K1 A1
2 K2 A2
3 K3 A3
>>> psdf2
key B
0 K0 B0
1 K1 B1
2 K2 B2
Join DataFrames using their indexes.
>>> join_psdf = psdf1.join(psdf2, lsuffix='_left', rsuffix='_right')
>>> join_psdf.sort_values(by=join_psdf.columns)
key_left A key_right B
0 K0 A0 K0 B0
1 K1 A1 K1 B1
2 K2 A2 K2 B2
3 K3 A3 None None
If we want to join using the key columns, we need to set key to be the index in both df and
right. The joined DataFrame will have key as its index.
>>> join_psdf = psdf1.set_index('key').join(psdf2.set_index('key'))
>>> join_psdf.sort_values(by=join_psdf.columns) # doctest: +NORMALIZE_WHITESPACE
A B
key
K0 A0 B0
K1 A1 B1
K2 A2 B2
K3 A3 None
Another option to join using the key columns is to use the on parameter. DataFrame.join
always uses right’s index but we can use any column in df. This method not preserve the
original DataFrame’s index in the result unlike pandas.
>>> join_psdf = psdf1.join(psdf2.set_index('key'), on='key')
>>> join_psdf.index
Int64Index([0, 1, 2, 3], dtype='int64')
"""
if isinstance(right, ps.Series):
common = list(self.columns.intersection([right.name]))
else:
common = list(self.columns.intersection(right.columns))
if len(common) > 0 and not lsuffix and not rsuffix:
raise ValueError(
"columns overlap but no suffix specified: " "{rename}".format(rename=common)
)
need_set_index = False
if on:
if not is_list_like(on):
on = [on]
if len(on) != right._internal.index_level:
raise ValueError(
'len(left_on) must equal the number of levels in the index of "right"'
)
need_set_index = len(set(on) & set(self.index.names)) == 0
if need_set_index:
self = self.set_index(on)
join_psdf = self.merge(
right, left_index=True, right_index=True, how=how, suffixes=(lsuffix, rsuffix)
)
return join_psdf.reset_index() if need_set_index else join_psdf
def combine_first(self, other: "DataFrame") -> "DataFrame":
"""
Update null elements with value in the same location in `other`.
Combine two DataFrame objects by filling null values in one DataFrame
with non-null values from other DataFrame. The row and column indexes
of the resulting DataFrame will be the union of the two.
.. versionadded:: 3.3.0
Parameters
----------
other : DataFrame
Provided DataFrame to use to fill null values.
Returns
-------
DataFrame
Examples
--------
>>> ps.set_option("compute.ops_on_diff_frames", True)
>>> df1 = ps.DataFrame({'A': [None, 0], 'B': [None, 4]})
>>> df2 = ps.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine_first(df2).sort_index()
A B
0 1.0 3.0
1 0.0 4.0
Null values still persist if the location of that null value does not exist in other
>>> df1 = ps.DataFrame({'A': [None, 0], 'B': [4, None]})
>>> df2 = ps.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2])
>>> df1.combine_first(df2).sort_index()
A B C
0 NaN 4.0 NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
>>> ps.reset_option("compute.ops_on_diff_frames")
"""
if not isinstance(other, DataFrame):
raise TypeError("`combine_first` only allows `DataFrame` for parameter `other`")
if same_anchor(self, other):
combined = self
this = self
that = other
else:
combined = combine_frames(self, other)
this = combined["this"]
that = combined["that"]
intersect_column_labels = set(self._internal.column_labels).intersection(
set(other._internal.column_labels)
)
column_labels, data_spark_columns = [], []
for column_label in this._internal.column_labels:
this_scol = this._internal.spark_column_for(column_label)
if column_label in intersect_column_labels:
that_scol = that._internal.spark_column_for(column_label)
this_scol_name = this._internal.spark_column_name_for(column_label)
combined_scol = (
F.when(this_scol.isNull(), that_scol).otherwise(this_scol).alias(this_scol_name)
)
data_spark_columns.append(combined_scol)
else:
data_spark_columns.append(this_scol)
column_labels.append(column_label)
for column_label in that._internal.column_labels:
if column_label not in intersect_column_labels:
that_scol = that._internal.spark_column_for(column_label)
data_spark_columns.append(that_scol)
column_labels.append(column_label)
internal = combined._internal.copy(
column_labels=column_labels,
data_spark_columns=data_spark_columns,
data_fields=None, # TODO: dtype?
column_label_names=self._internal.column_label_names,
)
return DataFrame(internal)
def append(
self,
other: "DataFrame",
ignore_index: bool = False,
verify_integrity: bool = False,
sort: bool = False,
) -> "DataFrame":
"""
Append rows of other to the end of caller, returning a new object.
Columns in other that are not in the caller are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : boolean, default False
If True, do not use the index labels.
verify_integrity : boolean, default False
If True, raise ValueError on creating index with duplicates.
sort : boolean, default False
Currently not supported.
Returns
-------
appended : DataFrame
Examples
--------
>>> df = ps.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df.append(df)
A B
0 1 2
1 3 4
0 1 2
1 3 4
>>> df.append(df, ignore_index=True)
A B
0 1 2
1 3 4
2 1 2
3 3 4
"""
if isinstance(other, ps.Series):
raise TypeError("DataFrames.append() does not support appending Series to DataFrames")
if sort:
raise NotImplementedError("The 'sort' parameter is currently not supported")
if not ignore_index:
index_scols = self._internal.index_spark_columns
if len(index_scols) != other._internal.index_level:
raise ValueError("Both DataFrames have to have the same number of index levels")
if verify_integrity and len(index_scols) > 0:
if (
self._internal.spark_frame.select(index_scols)
.intersect(
other._internal.spark_frame.select(other._internal.index_spark_columns)
)
.count()
) > 0:
raise ValueError("Indices have overlapping values")
# Lazy import to avoid circular dependency issues
from pyspark.pandas.namespace import concat
return cast(DataFrame, concat([self, other], ignore_index=ignore_index))
# TODO: add 'filter_func' and 'errors' parameter
def update(self, other: "DataFrame", join: str = "left", overwrite: bool = True) -> None:
"""
Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or Series
join : 'left', default 'left'
Only left join is implemented, keeping the index and columns of the original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values with values from `other`.
* False: only update values that are NA in the original DataFrame.
Returns
-------
None : method directly changes calling object
See Also
--------
DataFrame.merge : For column(s)-on-columns(s) operations.
DataFrame.join : Join columns of another DataFrame.
DataFrame.hint : Specifies some hint on the current DataFrame.
broadcast : Marks a DataFrame as small enough for use in broadcast joins.
Examples
--------
>>> df = ps.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B'])
>>> new_df = ps.DataFrame({'B': [4, 5, 6], 'C': [7, 8, 9]}, columns=['B', 'C'])
>>> df.update(new_df)
>>> df.sort_index()
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = ps.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B'])
>>> new_df = ps.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']}, columns=['B'])
>>> df.update(new_df)
>>> df.sort_index()
A B
0 a d
1 b e
2 c f
For Series, it's name attribute must be set.
>>> df = ps.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B'])
>>> new_column = ps.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df.sort_index()
A B
0 a d
1 b y
2 c e
If `other` contains None the corresponding values are not updated in the original dataframe.
>>> df = ps.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B'])
>>> new_df = ps.DataFrame({'B': [4, None, 6]}, columns=['B'])
>>> df.update(new_df)
>>> df.sort_index()
A B
0 1 4.0
1 2 500.0
2 3 6.0
"""
if join != "left":
raise NotImplementedError("Only left join is supported")
if isinstance(other, ps.Series):
other = other.to_frame()
update_columns = list(
set(self._internal.column_labels).intersection(set(other._internal.column_labels))
)
update_sdf = self.join(
other[update_columns], rsuffix="_new"
)._internal.resolved_copy.spark_frame
data_fields = self._internal.data_fields.copy()
for column_labels in update_columns:
column_name = self._internal.spark_column_name_for(column_labels)
old_col = scol_for(update_sdf, column_name)
new_col = scol_for(
update_sdf, other._internal.spark_column_name_for(column_labels) + "_new"
)
if overwrite:
update_sdf = update_sdf.withColumn(
column_name, F.when(new_col.isNull(), old_col).otherwise(new_col)
)
else:
update_sdf = update_sdf.withColumn(
column_name, F.when(old_col.isNull(), new_col).otherwise(old_col)
)
data_fields[self._internal.column_labels.index(column_labels)] = None
sdf = update_sdf.select(
*[scol_for(update_sdf, col) for col in self._internal.spark_column_names],
*HIDDEN_COLUMNS,
)
internal = self._internal.with_new_sdf(sdf, data_fields=data_fields)
self._update_internal_frame(internal, requires_same_anchor=False)
# TODO: ddof should be implemented.
def cov(self, min_periods: Optional[int] = None) -> "DataFrame":
"""
Compute pairwise covariance of columns, excluding NA/null values.
Compute the pairwise covariance among the series of a DataFrame.
The returned data frame is the `covariance matrix
<https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns
of the DataFrame.
Both NA and null values are automatically excluded from the
calculation. (See the note below about bias from missing values.)
A threshold can be set for the minimum number of
observations for each value created. Comparisons with observations
below this threshold will be returned as ``NaN``.
This method is generally used for the analysis of time series data to
understand the relationship between different measures
across time.
.. versionadded:: 3.3.0
Parameters
----------
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
Returns
-------
DataFrame
The covariance matrix of the series of the DataFrame.
See Also
--------
Series.cov : Compute covariance with another Series.
Examples
--------
>>> df = ps.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)],
... columns=['dogs', 'cats'])
>>> df.cov()
dogs cats
dogs 0.666667 -1.000000
cats -1.000000 1.666667
>>> np.random.seed(42)
>>> df = ps.DataFrame(np.random.randn(1000, 5),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df.cov()
a b c d e
a 0.998438 -0.020161 0.059277 -0.008943 0.014144
b -0.020161 1.059352 -0.008543 -0.024738 0.009826
c 0.059277 -0.008543 1.010670 -0.001486 -0.000271
d -0.008943 -0.024738 -0.001486 0.921297 -0.013692
e 0.014144 0.009826 -0.000271 -0.013692 0.977795
**Minimum number of periods**
This method also supports an optional ``min_periods`` keyword
that specifies the required minimum number of non-NA observations for
each column pair in order to have a valid result:
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(20, 3),
... columns=['a', 'b', 'c'])
>>> df.loc[df.index[:5], 'a'] = np.nan
>>> df.loc[df.index[5:10], 'b'] = np.nan
>>> sdf = ps.from_pandas(df)
>>> sdf.cov(min_periods=12)
a b c
a 0.316741 NaN -0.150812
b NaN 1.248003 0.191417
c -0.150812 0.191417 0.895202
"""
min_periods = 1 if min_periods is None else min_periods
# Only compute covariance for Boolean and Numeric except Decimal
psdf = self[
[
col
for col in self.columns
if isinstance(self[col].spark.data_type, BooleanType)
or (
isinstance(self[col].spark.data_type, NumericType)
and not isinstance(self[col].spark.data_type, DecimalType)
)
]
]
num_cols = len(psdf.columns)
cov = np.zeros([num_cols, num_cols])
if num_cols == 0:
return DataFrame()
if len(psdf) < min_periods:
cov.fill(np.nan)
return DataFrame(cov, columns=psdf.columns, index=psdf.columns)
data_cols = psdf._internal.data_spark_column_names
cov_scols = []
count_not_null_scols = []
# Count number of null row between two columns
# Example:
# a b c
# 0 1 1 1
# 1 NaN 2 2
# 2 3 NaN 3
# 3 4 4 4
#
# a b c
# a count(a, a) count(a, b) count(a, c)
# b count(b, b) count(b, c)
# c count(c, c)
#
# count_not_null_scols =
# [F.count(a, a), F.count(a, b), F.count(a, c), F.count(b, b), F.count(b, c), F.count(c, c)]
for r in range(0, num_cols):
for c in range(r, num_cols):
count_not_null_scols.append(
F.count(
F.when(F.col(data_cols[r]).isNotNull() & F.col(data_cols[c]).isNotNull(), 1)
)
)
count_not_null = (
psdf._internal.spark_frame.replace(float("nan"), None)
.select(*count_not_null_scols)
.head(1)[0]
)
# Calculate covariance between two columns
# Example:
# with min_periods = 3
# a b c
# 0 1 1 1
# 1 NaN 2 2
# 2 3 NaN 3
# 3 4 4 4
#
# a b c
# a cov(a, a) None cov(a, c)
# b cov(b, b) cov(b, c)
# c cov(c, c)
#
# cov_scols = [F.cov(a, a), None, F.cov(a, c), F.cov(b, b), F.cov(b, c), F.cov(c, c)]
step = 0
for r in range(0, num_cols):
step += r
for c in range(r, num_cols):
cov_scols.append(
F.covar_samp(
F.col(data_cols[r]).cast("double"), F.col(data_cols[c]).cast("double")
)
if count_not_null[r * num_cols + c - step] >= min_periods
else F.lit(None)
)
pair_cov = psdf._internal.spark_frame.select(*cov_scols).head(1)[0]
# Convert from row to 2D array
# Example:
# pair_cov = [cov(a, a), None, cov(a, c), cov(b, b), cov(b, c), cov(c, c)]
#
# cov =
#
# a b c
# a cov(a, a) None cov(a, c)
# b cov(b, b) cov(b, c)
# c cov(c, c)
step = 0
for r in range(0, num_cols):
step += r
for c in range(r, num_cols):
cov[r][c] = pair_cov[r * num_cols + c - step]
# Copy values
# Example:
# cov =
# a b c
# a cov(a, a) None cov(a, c)
# b None cov(b, b) cov(b, c)
# c cov(a, c) cov(b, c) cov(c, c)
cov = cov + cov.T - np.diag(np.diag(cov))
return DataFrame(cov, columns=psdf.columns, index=psdf.columns)
def sample(
self,
n: Optional[int] = None,
frac: Optional[float] = None,
replace: bool = False,
random_state: Optional[int] = None,
) -> "DataFrame":
"""
Return a random sample of items from an axis of object.
Please call this function using named argument by specifying the ``frac`` argument.
You can use `random_state` for reproducibility. However, note that different from pandas,
specifying a seed in pandas-on-Spark/Spark does not guarantee the sampled rows will
be fixed. The result set depends on not only the seed, but also how the data is distributed
across machines and to some extent network randomness when shuffle operations are involved.
Even in the simplest case, the result set will depend on the system's CPU core count.
Parameters
----------
n : int, optional
Number of items to return. This is currently NOT supported. Use frac instead.
frac : float, optional
Fraction of axis items to return.
replace : bool, default False
Sample with or without replacement.
random_state : int, optional
Seed for the random number generator (if int).
Returns
-------
Series or DataFrame
A new object of same type as caller containing the sampled items.
Examples
--------
>>> df = ps.DataFrame({'num_legs': [2, 4, 8, 0],
... 'num_wings': [2, 0, 0, 0],
... 'num_specimen_seen': [10, 2, 1, 8]},
... index=['falcon', 'dog', 'spider', 'fish'],
... columns=['num_legs', 'num_wings', 'num_specimen_seen'])
>>> df # doctest: +SKIP
num_legs num_wings num_specimen_seen
falcon 2 2 10
dog 4 0 2
spider 8 0 1
fish 0 0 8
A random 25% sample of the ``DataFrame``.
Note that we use `random_state` to ensure the reproducibility of
the examples.
>>> df.sample(frac=0.25, random_state=1) # doctest: +SKIP
num_legs num_wings num_specimen_seen
falcon 2 2 10
fish 0 0 8
Extract 25% random elements from the ``Series`` ``df['num_legs']``, with replacement,
so the same items could appear more than once.
>>> df['num_legs'].sample(frac=0.4, replace=True, random_state=1) # doctest: +SKIP
falcon 2
spider 8
spider 8
Name: num_legs, dtype: int64
Specifying the exact number of items to return is not supported at the moment.
>>> df.sample(n=5) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
NotImplementedError: Function sample currently does not support specifying ...
"""
# Note: we don't run any of the doctests because the result can change depending on the
# system's core count.
if n is not None:
raise NotImplementedError(
"Function sample currently does not support specifying "
"exact number of items to return. Use frac instead."
)
if frac is None:
raise ValueError("frac must be specified.")
sdf = self._internal.resolved_copy.spark_frame.sample(
withReplacement=replace, fraction=frac, seed=random_state
)
return DataFrame(self._internal.with_new_sdf(sdf))
def astype(self, dtype: Union[str, Dtype, Dict[Name, Union[str, Dtype]]]) -> "DataFrame":
"""
Cast a pandas-on-Spark object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type, or dict of column name -> data type
Use a numpy.dtype or Python type to cast entire pandas-on-Spark object to
the same type. Alternatively, use {col: dtype, ...}, where col is a
column label and dtype is a numpy.dtype or Python type to cast one
or more of the DataFrame's columns to column-specific types.
Returns
-------
casted : same type as caller
See Also
--------
to_datetime : Convert argument to datetime.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]}, dtype='int64')
>>> df
a b
0 1 1
1 2 2
2 3 3
Convert to float type:
>>> df.astype('float')
a b
0 1.0 1.0
1 2.0 2.0
2 3.0 3.0
Convert to int64 type back:
>>> df.astype('int64')
a b
0 1 1
1 2 2
2 3 3
Convert column a to float type:
>>> df.astype({'a': float})
a b
0 1.0 1
1 2.0 2
2 3.0 3
"""
applied = []
if is_dict_like(dtype):
dtype_dict = cast(Dict[Name, Union[str, Dtype]], dtype)
for col_name in dtype_dict.keys():
if col_name not in self.columns:
raise KeyError(
"Only a column name can be used for the "
"key in a dtype mappings argument."
)
for col_name, col in self.items():
if col_name in dtype_dict:
applied.append(col.astype(dtype=dtype_dict[col_name]))
else:
applied.append(col)
else:
for col_name, col in self.items():
applied.append(col.astype(dtype=cast(Union[str, Dtype], dtype)))
return DataFrame(self._internal.with_new_columns(applied))
def add_prefix(self, prefix: str) -> "DataFrame":
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
DataFrame
New DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> df = ps.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B'])
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_prefix('col_')
col_A col_B
0 1 3
1 2 4
2 3 5
3 4 6
"""
assert isinstance(prefix, str)
return self._apply_series_op(
lambda psser: psser.rename(tuple([prefix + i for i in psser._column_label]))
)
def add_suffix(self, suffix: str) -> "DataFrame":
"""
Suffix labels with string `suffix`.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add before each label.
Returns
-------
DataFrame
New DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> df = ps.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B'])
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_suffix('_col')
A_col B_col
0 1 3
1 2 4
2 3 5
3 4 6
"""
assert isinstance(suffix, str)
return self._apply_series_op(
lambda psser: psser.rename(tuple([i + suffix for i in psser._column_label]))
)
# TODO: include, and exclude should be implemented.
def describe(self, percentiles: Optional[List[float]] = None) -> "DataFrame":
"""
Generate descriptive statistics that summarize the central tendency,
dispersion and shape of a dataset's distribution, excluding
``NaN`` values.
Analyzes both numeric and object series, as well
as ``DataFrame`` column sets of mixed data types. The output
will vary depending on what is provided. Refer to the notes
below for more detail.
Parameters
----------
percentiles : list of ``float`` in range [0.0, 1.0], default [0.25, 0.5, 0.75]
A list of percentiles to be computed.
Returns
-------
DataFrame
Summary statistics of the Dataframe provided.
See Also
--------
DataFrame.count: Count number of non-NA/null observations.
DataFrame.max: Maximum of the values in the object.
DataFrame.min: Minimum of the values in the object.
DataFrame.mean: Mean of the values.
DataFrame.std: Standard deviation of the observations.
Notes
-----
For numeric data, the result's index will include ``count``,
``mean``, ``std``, ``min``, ``25%``, ``50%``, ``75%``, ``max``.
For object data (e.g. strings or timestamps), the result’s index will include
``count``, ``unique``, ``top``, and ``freq``.
The ``top`` is the most common value. The ``freq`` is the most common value’s frequency.
Timestamps also include the ``first`` and ``last`` items.
Examples
--------
Describing a numeric ``Series``.
>>> s = ps.Series([1, 2, 3])
>>> s.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.0
50% 2.0
75% 3.0
max 3.0
dtype: float64
Describing a ``DataFrame``. Only numeric fields are returned.
>>> df = ps.DataFrame({'numeric1': [1, 2, 3],
... 'numeric2': [4.0, 5.0, 6.0],
... 'object': ['a', 'b', 'c']
... },
... columns=['numeric1', 'numeric2', 'object'])
>>> df.describe()
numeric1 numeric2
count 3.0 3.0
mean 2.0 5.0
std 1.0 1.0
min 1.0 4.0
25% 1.0 4.0
50% 2.0 5.0
75% 3.0 6.0
max 3.0 6.0
For multi-index columns:
>>> df.columns = [('num', 'a'), ('num', 'b'), ('obj', 'c')]
>>> df.describe() # doctest: +NORMALIZE_WHITESPACE
num
a b
count 3.0 3.0
mean 2.0 5.0
std 1.0 1.0
min 1.0 4.0
25% 1.0 4.0
50% 2.0 5.0
75% 3.0 6.0
max 3.0 6.0
>>> df[('num', 'b')].describe()
count 3.0
mean 5.0
std 1.0
min 4.0
25% 4.0
50% 5.0
75% 6.0
max 6.0
Name: (num, b), dtype: float64
Describing a ``DataFrame`` and selecting custom percentiles.
>>> df = ps.DataFrame({'numeric1': [1, 2, 3],
... 'numeric2': [4.0, 5.0, 6.0]
... },
... columns=['numeric1', 'numeric2'])
>>> df.describe(percentiles = [0.85, 0.15])
numeric1 numeric2
count 3.0 3.0
mean 2.0 5.0
std 1.0 1.0
min 1.0 4.0
15% 1.0 4.0
50% 2.0 5.0
85% 3.0 6.0
max 3.0 6.0
Describing a column from a ``DataFrame`` by accessing it as
an attribute.
>>> df.numeric1.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.0
50% 2.0
75% 3.0
max 3.0
Name: numeric1, dtype: float64
Describing a column from a ``DataFrame`` by accessing it as
an attribute and selecting custom percentiles.
>>> df.numeric1.describe(percentiles = [0.85, 0.15])
count 3.0
mean 2.0
std 1.0
min 1.0
15% 1.0
50% 2.0
85% 3.0
max 3.0
Name: numeric1, dtype: float64
"""
psser_numeric: List[Series] = []
psser_string: List[Series] = []
psser_timestamp: List[Series] = []
spark_data_types: List[DataType] = []
column_labels: Optional[List[Label]] = []
column_names: List[str] = []
for label in self._internal.column_labels:
psser = self._psser_for(label)
spark_data_type = psser.spark.data_type
if isinstance(spark_data_type, NumericType):
psser_numeric.append(psser)
column_labels.append(label)
spark_data_types.append(spark_data_type)
elif isinstance(spark_data_type, (TimestampType, TimestampNTZType)):
psser_timestamp.append(psser)
column_labels.append(label)
spark_data_types.append(spark_data_type)
else:
psser_string.append(psser)
column_names.append(self._internal.spark_column_name_for(label))
if percentiles is not None:
if any((p < 0.0) or (p > 1.0) for p in percentiles):
raise ValueError("Percentiles should all be in the interval [0, 1]")
# appending 50% if not in percentiles already
percentiles = (percentiles + [0.5]) if 0.5 not in percentiles else percentiles
else:
percentiles = [0.25, 0.5, 0.75]
# Identify the cases
is_all_string_type = (
len(psser_numeric) == 0 and len(psser_timestamp) == 0 and len(psser_string) > 0
)
is_all_numeric_type = len(psser_numeric) > 0 and len(psser_timestamp) == 0
has_timestamp_type = len(psser_timestamp) > 0
has_numeric_type = len(psser_numeric) > 0
if is_all_string_type:
# Handling string type columns
# We will retrive the `count`, `unique`, `top` and `freq`.
internal = self._internal.resolved_copy
exprs_string = [
internal.spark_column_for(psser._column_label) for psser in psser_string
]
sdf = internal.spark_frame.select(*exprs_string)
# Get `count` & `unique` for each columns
counts, uniques = map(lambda x: x[1:], sdf.summary("count", "count_distinct").take(2))
# Handling Empty DataFrame
if len(counts) == 0 or counts[0] == "0":
data = dict()
for psser in psser_string:
data[psser.name] = [0, 0, np.nan, np.nan]
return DataFrame(data, index=["count", "unique", "top", "freq"])
# Get `top` & `freq` for each columns
tops = []
freqs = []
# TODO(SPARK-37711): We should do it in single pass since invoking Spark job
# for every columns is too expensive.
for column in exprs_string:
top, freq = sdf.groupby(column).count().sort("count", ascending=False).first()
tops.append(str(top))
freqs.append(str(freq))
stats = [counts, uniques, tops, freqs]
stats_names = ["count", "unique", "top", "freq"]
result: DataFrame = DataFrame(
data=stats,
index=stats_names,
columns=column_names,
)
elif is_all_numeric_type:
# Handling numeric columns
exprs_numeric = [
psser._dtype_op.nan_to_null(psser).spark.column for psser in psser_numeric
]
formatted_perc = ["{:.0%}".format(p) for p in sorted(percentiles)]
stats = ["count", "mean", "stddev", "min", *formatted_perc, "max"]
# In this case, we can simply use `summary` to calculate the stats.
sdf = self._internal.spark_frame.select(*exprs_numeric).summary(*stats)
sdf = sdf.replace("stddev", "std", subset=["summary"])
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, "summary")],
column_labels=column_labels,
data_spark_columns=[
scol_for(sdf, self._internal.spark_column_name_for(label))
for label in column_labels
],
)
result = DataFrame(internal).astype("float64")
elif has_timestamp_type:
internal = self._internal.resolved_copy
column_names = [
internal.spark_column_name_for(column_label) for column_label in column_labels
]
column_length = len(column_labels)
# Apply stat functions for each column.
count_exprs = map(F.count, column_names)
min_exprs = map(F.min, column_names)
# Here we try to flat the multiple map into single list that contains each calculated
# percentile using `chain`.
# e.g. flat the `[<map object at 0x7fc1907dc280>, <map object at 0x7fc1907dcc70>]`
# to `[Column<'percentile_approx(A, 0.2, 10000)'>,
# Column<'percentile_approx(B, 0.2, 10000)'>,
# Column<'percentile_approx(A, 0.5, 10000)'>,
# Column<'percentile_approx(B, 0.5, 10000)'>]`
perc_exprs = chain(
*[
map(F.percentile_approx, column_names, [percentile] * column_length)
for percentile in percentiles
]
)
max_exprs = map(F.max, column_names)
mean_exprs = []
for column_name, spark_data_type in zip(column_names, spark_data_types):
mean_exprs.append(F.mean(column_name).astype(spark_data_type))
exprs = [*count_exprs, *mean_exprs, *min_exprs, *perc_exprs, *max_exprs]
formatted_perc = ["{:.0%}".format(p) for p in sorted(percentiles)]
stats_names = ["count", "mean", "min", *formatted_perc, "max"]
# If not all columns are timestamp type,
# we also need to calculate the `std` for numeric columns
if has_numeric_type:
std_exprs = []
for label, spark_data_type in zip(column_labels, spark_data_types):
column_name = label[0]
if isinstance(spark_data_type, (TimestampType, TimestampNTZType)):
std_exprs.append(F.lit(None).alias("stddev_samp({})".format(column_name)))
else:
std_exprs.append(F.stddev(column_name))
exprs.extend(std_exprs)
stats_names.append("std")
# Select stats for all columns at once.
sdf = internal.spark_frame.select(exprs)
stat_values = sdf.first()
num_stats = int(len(exprs) / column_length)
# `column_name_stats_kv` is key-value store that has column name as key, and
# the stats as values e.g. {"A": [{count_value}, {min_value}, ...],
# "B": [{count_value}, {min_value} ...]}
column_name_stats_kv: Dict[str, List[str]] = defaultdict(list)
for i, column_name in enumerate(column_names):
for first_stat_idx in range(num_stats):
column_name_stats_kv[column_name].append(
stat_values[(first_stat_idx * column_length) + i]
)
# For timestamp type columns, we should cast the column type to string.
for key, spark_data_type in zip(column_name_stats_kv, spark_data_types):
if isinstance(spark_data_type, (TimestampType, TimestampNTZType)):
column_name_stats_kv[key] = [str(value) for value in column_name_stats_kv[key]]
result: DataFrame = DataFrame( # type: ignore[no-redef]
data=column_name_stats_kv,
index=stats_names,
columns=column_names,
)
else:
# Empty DataFrame without column
raise ValueError("Cannot describe a DataFrame without columns")
return result
def drop_duplicates(
self,
subset: Optional[Union[Name, List[Name]]] = None,
keep: Union[bool, str] = "first",
inplace: bool = False,
) -> Optional["DataFrame"]:
"""
Return DataFrame with duplicate rows removed, optionally only
considering certain columns.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns.
keep : {'first', 'last', False}, default 'first'
Determines which duplicates (if any) to keep.
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy.
Returns
-------
DataFrame
DataFrame with duplicates removed or None if ``inplace=True``.
>>> df = ps.DataFrame(
... {'a': [1, 2, 2, 2, 3], 'b': ['a', 'a', 'a', 'c', 'd']}, columns = ['a', 'b'])
>>> df
a b
0 1 a
1 2 a
2 2 a
3 2 c
4 3 d
>>> df.drop_duplicates().sort_index()
a b
0 1 a
1 2 a
3 2 c
4 3 d
>>> df.drop_duplicates('a').sort_index()
a b
0 1 a
1 2 a
4 3 d
>>> df.drop_duplicates(['a', 'b']).sort_index()
a b
0 1 a
1 2 a
3 2 c
4 3 d
>>> df.drop_duplicates(keep='last').sort_index()
a b
0 1 a
2 2 a
3 2 c
4 3 d
>>> df.drop_duplicates(keep=False).sort_index()
a b
0 1 a
3 2 c
4 3 d
"""
inplace = validate_bool_kwarg(inplace, "inplace")
sdf, column = self._mark_duplicates(subset, keep)
sdf = sdf.where(~scol_for(sdf, column)).drop(column)
internal = self._internal.with_new_sdf(sdf)
if inplace:
self._update_internal_frame(internal)
return None
else:
return DataFrame(internal)
def reindex(
self,
labels: Optional[Sequence[Any]] = None,
index: Optional[Union["Index", Sequence[Any]]] = None,
columns: Optional[Union[pd.Index, Sequence[Any]]] = None,
axis: Optional[Axis] = None,
copy: Optional[bool] = True,
fill_value: Optional[Any] = None,
) -> "DataFrame":
"""
Conform DataFrame to new index with optional filling logic, placing
NA/NaN in locations having no value in the previous index. A new object
is produced unless the new index is equivalent to the current one and
``copy=False``.
Parameters
----------
labels: array-like, optional
New labels / index to conform the axis specified by ‘axis’ to.
index, columns: array-like, optional
New labels / index to conform to, should be specified using keywords.
Preferably an Index object to avoid duplicating data
axis: int or str, optional
Axis to target. Can be either the axis name (‘index’, ‘columns’) or
number (0, 1).
copy : bool, default True
Return a new object, even if the passed indexes are the same.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
Returns
-------
DataFrame with changed index.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
Examples
--------
``DataFrame.reindex`` supports two calling conventions
* ``(index=index_labels, columns=column_labels, ...)``
* ``(labels, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Create a dataframe with some fictional data.
>>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
>>> df = ps.DataFrame({
... 'http_status': [200, 200, 404, 404, 301],
... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]},
... index=index,
... columns=['http_status', 'response_time'])
>>> df
http_status response_time
Firefox 200 0.04
Chrome 200 0.02
Safari 404 0.07
IE10 404 0.08
Konqueror 301 1.00
Create a new index and reindex the dataframe. By default
values in the new index that do not have corresponding
records in the dataframe are assigned ``NaN``.
>>> new_index= ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
... 'Chrome']
>>> df.reindex(new_index).sort_index()
http_status response_time
Chrome 200.0 0.02
Comodo Dragon NaN NaN
IE10 404.0 0.08
Iceweasel NaN NaN
Safari 404.0 0.07
We can fill in the missing values by passing a value to
the keyword ``fill_value``.
>>> df.reindex(new_index, fill_value=0, copy=False).sort_index()
http_status response_time
Chrome 200 0.02
Comodo Dragon 0 0.00
IE10 404 0.08
Iceweasel 0 0.00
Safari 404 0.07
We can also reindex the columns.
>>> df.reindex(columns=['http_status', 'user_agent']).sort_index()
http_status user_agent
Chrome 200 NaN
Firefox 200 NaN
IE10 404 NaN
Konqueror 301 NaN
Safari 404 NaN
Or we can use "axis-style" keyword arguments
>>> df.reindex(['http_status', 'user_agent'], axis="columns").sort_index()
http_status user_agent
Chrome 200 NaN
Firefox 200 NaN
IE10 404 NaN
Konqueror 301 NaN
Safari 404 NaN
To further illustrate the filling functionality in
``reindex``, we will create a dataframe with a
monotonically increasing index (for example, a sequence
of dates).
>>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')
>>> df2 = ps.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]},
... index=date_index)
>>> df2.sort_index()
prices
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
Suppose we decide to expand the dataframe to cover a wider
date range.
>>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')
>>> df2.reindex(date_index2).sort_index()
prices
2009-12-29 NaN
2009-12-30 NaN
2009-12-31 NaN
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
"""
if axis is not None and (index is not None or columns is not None):
raise TypeError("Cannot specify both 'axis' and any of 'index' or 'columns'.")
if labels is not None:
axis = validate_axis(axis)
if axis == 0:
index = labels
elif axis == 1:
columns = labels
if index is not None and not is_list_like(index):
raise TypeError(
"Index must be called with a collection of some kind, "
"%s was passed" % type(index)
)
if columns is not None and not is_list_like(columns):
raise TypeError(
"Columns must be called with a collection of some kind, "
"%s was passed" % type(columns)
)
df = self
if index is not None:
df = df._reindex_index(index, fill_value)
if columns is not None:
df = df._reindex_columns(columns, fill_value)
# Copy
if copy and df is self:
return df.copy()
else:
return df
def _reindex_index(
self, index: Optional[Union["Index", Sequence[Any]]], fill_value: Optional[Any]
) -> "DataFrame":
# When axis is index, we can mimic pandas' by a right outer join.
nlevels = self._internal.index_level
assert nlevels <= 1 or (
isinstance(index, ps.MultiIndex) and nlevels == index.nlevels
), "MultiIndex DataFrame can only be reindexed with a similar pandas-on-Spark MultiIndex."
index_columns = self._internal.index_spark_column_names
frame = self._internal.resolved_copy.spark_frame.drop(NATURAL_ORDER_COLUMN_NAME)
if isinstance(index, ps.Index):
if nlevels != index.nlevels:
return DataFrame(index._internal.with_new_columns([])).reindex(
columns=self.columns, fill_value=fill_value
)
index_names = index._internal.index_names
scols = index._internal.index_spark_columns
labels = index._internal.spark_frame.select(
[scol.alias(index_column) for scol, index_column in zip(scols, index_columns)]
)
else:
index = ps.Index(list(index))
labels = index._internal.spark_frame.select(index.spark.column.alias(index_columns[0]))
index_names = self._internal.index_names
if fill_value is not None:
frame_index_columns = [
verify_temp_column_name(frame, "__frame_index_column_{}__".format(i))
for i in range(nlevels)
]
index_scols = [
scol_for(frame, index_col).alias(frame_index_col)
for index_col, frame_index_col in zip(index_columns, frame_index_columns)
]
scols = self._internal.resolved_copy.data_spark_columns
frame = frame.select(index_scols + scols)
temp_fill_value = verify_temp_column_name(frame, "__fill_value__")
labels = labels.withColumn(temp_fill_value, SF.lit(fill_value))
frame_index_scols = [scol_for(frame, col) for col in frame_index_columns]
labels_index_scols = [scol_for(labels, col) for col in index_columns]
joined_df = frame.join(
labels,
on=[fcol == lcol for fcol, lcol in zip(frame_index_scols, labels_index_scols)],
how="right",
)
joined_df = joined_df.select(
*labels_index_scols,
*[
F.when(
reduce(
lambda c1, c2: c1 & c2,
[
fcol.isNull() & lcol.isNotNull()
for fcol, lcol in zip(frame_index_scols, labels_index_scols)
],
),
scol_for(joined_df, temp_fill_value),
)
.otherwise(scol_for(joined_df, col))
.alias(col)
for col in self._internal.data_spark_column_names
],
)
data_fields = None
else:
joined_df = frame.join(labels, on=index_columns, how="right")
data_fields = [field.copy(nullable=True) for field in self._internal.data_fields]
sdf = joined_df.drop(NATURAL_ORDER_COLUMN_NAME)
internal = self._internal.copy(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in self._internal.index_spark_column_names
],
index_names=index_names,
index_fields=[
field.copy(name=name)
for field, name in zip(
index._internal.index_fields, self._internal.index_spark_column_names
)
],
data_spark_columns=[
scol_for(sdf, col) for col in self._internal.data_spark_column_names
],
data_fields=data_fields,
)
return DataFrame(internal)
def _reindex_columns(
self, columns: Optional[Union[pd.Index, Sequence[Any]]], fill_value: Optional[Any]
) -> "DataFrame":
level = self._internal.column_labels_level
if level > 1:
label_columns = list(columns)
for col in label_columns:
if not isinstance(col, tuple):
raise TypeError("Expected tuple, got {}".format(type(col).__name__))
else:
label_columns = [(col,) for col in columns]
for col in label_columns:
if len(col) != level:
raise ValueError(
"shape (1,{}) doesn't match the shape (1,{})".format(len(col), level)
)
fill_value = np.nan if fill_value is None else fill_value
scols_or_pssers: List[Union[Series, Column]] = []
labels = []
for label in label_columns:
if label in self._internal.column_labels:
scols_or_pssers.append(self._psser_for(label))
else:
scols_or_pssers.append(SF.lit(fill_value).alias(name_like_string(label)))
labels.append(label)
if isinstance(columns, pd.Index):
column_label_names = [
name if is_name_like_tuple(name) else (name,) for name in columns.names
]
internal = self._internal.with_new_columns(
scols_or_pssers, column_labels=labels, column_label_names=column_label_names
)
else:
internal = self._internal.with_new_columns(scols_or_pssers, column_labels=labels)
return DataFrame(internal)
def reindex_like(self, other: "DataFrame", copy: bool = True) -> "DataFrame":
"""
Return a DataFrame with matching indices as other object.
Conform the object to the same index on all axes. Places NA/NaN in locations
having no value in the previous index. A new object is produced unless the
new index is equivalent to the current one and copy=False.
Parameters
----------
other : DataFrame
Its row and column indices are used to define the new indices
of this object.
copy : bool, default True
Return a new object, even if the passed indexes are the same.
Returns
-------
DataFrame
DataFrame with changed indices on each axis.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex : Change to new indices or expand indices.
Notes
-----
Same as calling
``.reindex(index=other.index, columns=other.columns,...)``.
Examples
--------
>>> df1 = ps.DataFrame([[24.3, 75.7, 'high'],
... [31, 87.8, 'high'],
... [22, 71.6, 'medium'],
... [35, 95, 'medium']],
... columns=['temp_celsius', 'temp_fahrenheit',
... 'windspeed'],
... index=pd.date_range(start='2014-02-12',
... end='2014-02-15', freq='D'))
>>> df1
temp_celsius temp_fahrenheit windspeed
2014-02-12 24.3 75.7 high
2014-02-13 31.0 87.8 high
2014-02-14 22.0 71.6 medium
2014-02-15 35.0 95.0 medium
>>> df2 = ps.DataFrame([[28, 'low'],
... [30, 'low'],
... [35.1, 'medium']],
... columns=['temp_celsius', 'windspeed'],
... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',
... '2014-02-15']))
>>> df2
temp_celsius windspeed
2014-02-12 28.0 low
2014-02-13 30.0 low
2014-02-15 35.1 medium
>>> df2.reindex_like(df1).sort_index() # doctest: +NORMALIZE_WHITESPACE
temp_celsius temp_fahrenheit windspeed
2014-02-12 28.0 NaN low
2014-02-13 30.0 NaN low
2014-02-14 NaN NaN None
2014-02-15 35.1 NaN medium
"""
if isinstance(other, DataFrame):
return self.reindex(index=other.index, columns=other.columns, copy=copy)
else:
raise TypeError("other must be a pandas-on-Spark DataFrame")
def melt(
self,
id_vars: Optional[Union[Name, List[Name]]] = None,
value_vars: Optional[Union[Name, List[Name]]] = None,
var_name: Optional[Union[str, List[str]]] = None,
value_name: str = "value",
) -> "DataFrame":
"""
Unpivot a DataFrame from wide format to long format, optionally
leaving identifier variables set.
This function is useful to massage a DataFrame into a format where one
or more columns are identifier variables (`id_vars`), while all other
columns, considered measured variables (`value_vars`), are "unpivoted" to
the row axis, leaving just two non-identifier columns, 'variable' and
'value'.
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar, default 'variable'
Name to use for the 'variable' column. If None it uses `frame.columns.name` or
‘variable’.
value_name : scalar, default 'value'
Name to use for the 'value' column.
Returns
-------
DataFrame
Unpivoted DataFrame.
Examples
--------
>>> df = ps.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
... 'B': {0: 1, 1: 3, 2: 5},
... 'C': {0: 2, 1: 4, 2: 6}},
... columns=['A', 'B', 'C'])
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> ps.melt(df)
variable value
0 A a
1 B 1
2 C 2
3 A b
4 B 3
5 C 4
6 A c
7 B 5
8 C 6
>>> df.melt(id_vars='A')
A variable value
0 a B 1
1 a C 2
2 b B 3
3 b C 4
4 c B 5
5 c C 6
>>> df.melt(value_vars='A')
variable value
0 A a
1 A b
2 A c
>>> ps.melt(df, id_vars=['A', 'B'])
A B variable value
0 a 1 C 2
1 b 3 C 4
2 c 5 C 6
>>> df.melt(id_vars=['A'], value_vars=['C'])
A variable value
0 a C 2
1 b C 4
2 c C 6
The names of 'variable' and 'value' columns can be customized:
>>> ps.melt(df, id_vars=['A'], value_vars=['B'],
... var_name='myVarname', value_name='myValname')
A myVarname myValname
0 a B 1
1 b B 3
2 c B 5
"""
column_labels = self._internal.column_labels
if id_vars is None:
id_vars = []
else:
if isinstance(id_vars, tuple):
if self._internal.column_labels_level == 1:
id_vars = [idv if is_name_like_tuple(idv) else (idv,) for idv in id_vars]
else:
raise ValueError(
"id_vars must be a list of tuples" " when columns are a MultiIndex"
)
elif is_name_like_value(id_vars):
id_vars = [(id_vars,)]
else:
id_vars = [idv if is_name_like_tuple(idv) else (idv,) for idv in id_vars]
non_existence_col = [idv for idv in id_vars if idv not in column_labels]
if len(non_existence_col) != 0:
raveled_column_labels = np.ravel(column_labels)
missing = [
nec for nec in np.ravel(non_existence_col) if nec not in raveled_column_labels
]
if len(missing) != 0:
raise KeyError(
"The following 'id_vars' are not present"
" in the DataFrame: {}".format(missing)
)
else:
raise KeyError(
"None of {} are in the {}".format(non_existence_col, column_labels)
)
if value_vars is None:
value_vars = []
else:
if isinstance(value_vars, tuple):
if self._internal.column_labels_level == 1:
value_vars = [
valv if is_name_like_tuple(valv) else (valv,) for valv in value_vars
]
else:
raise ValueError(
"value_vars must be a list of tuples" " when columns are a MultiIndex"
)
elif is_name_like_value(value_vars):
value_vars = [(value_vars,)]
else:
value_vars = [valv if is_name_like_tuple(valv) else (valv,) for valv in value_vars]
non_existence_col = [valv for valv in value_vars if valv not in column_labels]
if len(non_existence_col) != 0:
raveled_column_labels = np.ravel(column_labels)
missing = [
nec for nec in np.ravel(non_existence_col) if nec not in raveled_column_labels
]
if len(missing) != 0:
raise KeyError(
"The following 'value_vars' are not present"
" in the DataFrame: {}".format(missing)
)
else:
raise KeyError(
"None of {} are in the {}".format(non_existence_col, column_labels)
)
if len(value_vars) == 0:
value_vars = column_labels
column_labels = [label for label in column_labels if label not in id_vars]
sdf = self._internal.spark_frame
if var_name is None:
if (
self._internal.column_labels_level == 1
and self._internal.column_label_names[0] is None
):
var_name = ["variable"]
else:
var_name = [
name_like_string(name) if name is not None else "variable_{}".format(i)
for i, name in enumerate(self._internal.column_label_names)
]
elif isinstance(var_name, str):
var_name = [var_name]
pairs = F.explode(
F.array(
*[
F.struct(
*[SF.lit(c).alias(name) for c, name in zip(label, var_name)],
*[self._internal.spark_column_for(label).alias(value_name)],
)
for label in column_labels
if label in value_vars
]
)
)
columns = (
[
self._internal.spark_column_for(label).alias(name_like_string(label))
for label in id_vars
]
+ [F.col("pairs.`%s`" % name) for name in var_name]
+ [F.col("pairs.`%s`" % value_name)]
)
exploded_df = sdf.withColumn("pairs", pairs).select(columns)
return DataFrame(
InternalFrame(
spark_frame=exploded_df,
index_spark_columns=None,
column_labels=(
[label if len(label) == 1 else (name_like_string(label),) for label in id_vars]
+ [(name,) for name in var_name]
+ [(value_name,)]
),
)
)
def stack(self) -> DataFrameOrSeries:
"""
Stack the prescribed level(s) from columns to index.
Return a reshaped DataFrame or Series having a multi-level
index with one or more new inner-most levels compared to the current
DataFrame. The new inner-most levels are created by pivoting the
columns of the current dataframe:
- if the columns have a single level, the output is a Series;
- if the columns have multiple levels, the new index
level(s) is (are) taken from the prescribed level(s) and
the output is a DataFrame.
The new index levels are sorted.
Returns
-------
DataFrame or Series
Stacked dataframe or series.
See Also
--------
DataFrame.unstack : Unstack prescribed level(s) from index axis
onto column axis.
DataFrame.pivot : Reshape dataframe from long format to wide
format.
DataFrame.pivot_table : Create a spreadsheet-style pivot table
as a DataFrame.
Notes
-----
The function is named by analogy with a collection of books
being reorganized from being side by side on a horizontal
position (the columns of the dataframe) to being stacked
vertically on top of each other (in the index of the
dataframe).
Examples
--------
**Single level columns**
>>> df_single_level_cols = ps.DataFrame([[0, 1], [2, 3]],
... index=['cat', 'dog'],
... columns=['weight', 'height'])
Stacking a dataframe with a single level column axis returns a Series:
>>> df_single_level_cols
weight height
cat 0 1
dog 2 3
>>> df_single_level_cols.stack().sort_index()
cat height 1
weight 0
dog height 3
weight 2
dtype: int64
**Multi level columns: simple case**
>>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('weight', 'pounds')])
>>> df_multi_level_cols1 = ps.DataFrame([[1, 2], [2, 4]],
... index=['cat', 'dog'],
... columns=multicol1)
Stacking a dataframe with a multi-level column axis:
>>> df_multi_level_cols1 # doctest: +NORMALIZE_WHITESPACE
weight
kg pounds
cat 1 2
dog 2 4
>>> df_multi_level_cols1.stack().sort_index()
weight
cat kg 1
pounds 2
dog kg 2
pounds 4
**Missing values**
>>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('height', 'm')])
>>> df_multi_level_cols2 = ps.DataFrame([[1.0, 2.0], [3.0, 4.0]],
... index=['cat', 'dog'],
... columns=multicol2)
It is common to have missing values when stacking a dataframe
with multi-level columns, as the stacked dataframe typically
has more values than the original dataframe. Missing values
are filled with NaNs:
>>> df_multi_level_cols2
weight height
kg m
cat 1.0 2.0
dog 3.0 4.0
>>> df_multi_level_cols2.stack().sort_index() # doctest: +SKIP
height weight
cat kg NaN 1.0
m 2.0 NaN
dog kg NaN 3.0
m 4.0 NaN
"""
from pyspark.pandas.series import first_series
if len(self._internal.column_labels) == 0:
return DataFrame(
self._internal.copy(
column_label_names=self._internal.column_label_names[:-1]
).with_filter(SF.lit(False))
)
column_labels: Dict[Label, Dict[Any, Column]] = defaultdict(dict)
index_values = set()
should_returns_series = False
for label in self._internal.column_labels:
new_label = label[:-1]
if len(new_label) == 0:
new_label = None
should_returns_series = True
value = label[-1]
scol = self._internal.spark_column_for(label)
column_labels[new_label][value] = scol
index_values.add(value)
column_labels = dict(sorted(column_labels.items(), key=lambda x: x[0]))
index_name = self._internal.column_label_names[-1]
column_label_names = self._internal.column_label_names[:-1]
if len(column_label_names) == 0:
column_label_names = [None]
index_column = SPARK_INDEX_NAME_FORMAT(self._internal.index_level)
data_columns = [name_like_string(label) for label in column_labels]
structs = [
F.struct(
*[SF.lit(value).alias(index_column)],
*[
(
column_labels[label][value]
if value in column_labels[label]
else SF.lit(None)
).alias(name)
for label, name in zip(column_labels, data_columns)
],
).alias(value)
for value in index_values
]
pairs = F.explode(F.array(*structs))
sdf = self._internal.spark_frame.withColumn("pairs", pairs)
sdf = sdf.select(
self._internal.index_spark_columns
+ [sdf["pairs"][index_column].alias(index_column)]
+ [sdf["pairs"][name].alias(name) for name in data_columns]
)
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col)
for col in (self._internal.index_spark_column_names + [index_column])
],
index_names=self._internal.index_names + [index_name],
index_fields=self._internal.index_fields + [None],
column_labels=list(column_labels),
data_spark_columns=[scol_for(sdf, col) for col in data_columns],
column_label_names=column_label_names,
)
psdf: DataFrame = DataFrame(internal)
if should_returns_series:
return first_series(psdf)
else:
return psdf
def unstack(self) -> DataFrameOrSeries:
"""
Pivot the (necessarily hierarchical) index labels.
Returns a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels.
If the index is not a MultiIndex, the output will be a Series.
.. note:: If the index is a MultiIndex, the output DataFrame could be very wide, and
it could cause a serious performance degradation since Spark partitions it row based.
Returns
-------
Series or DataFrame
See Also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation from unstack).
Examples
--------
>>> df = ps.DataFrame({"A": {"0": "a", "1": "b", "2": "c"},
... "B": {"0": "1", "1": "3", "2": "5"},
... "C": {"0": "2", "1": "4", "2": "6"}},
... columns=["A", "B", "C"])
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> df.unstack().sort_index()
A 0 a
1 b
2 c
B 0 1
1 3
2 5
C 0 2
1 4
2 6
dtype: object
>>> df.columns = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'), ('Y', 'C')])
>>> df.unstack().sort_index()
X A 0 a
1 b
2 c
B 0 1
1 3
2 5
Y C 0 2
1 4
2 6
dtype: object
For MultiIndex case:
>>> df = ps.DataFrame({"A": ["a", "b", "c"],
... "B": [1, 3, 5],
... "C": [2, 4, 6]},
... columns=["A", "B", "C"])
>>> df = df.set_index('A', append=True)
>>> df # doctest: +NORMALIZE_WHITESPACE
B C
A
0 a 1 2
1 b 3 4
2 c 5 6
>>> df.unstack().sort_index() # doctest: +NORMALIZE_WHITESPACE
B C
A a b c a b c
0 1.0 NaN NaN 2.0 NaN NaN
1 NaN 3.0 NaN NaN 4.0 NaN
2 NaN NaN 5.0 NaN NaN 6.0
"""
from pyspark.pandas.series import first_series
if self._internal.index_level > 1:
# The index after `reset_index()` will never be used, so use "distributed" index
# as a dummy to avoid overhead.
with option_context("compute.default_index_type", "distributed"):
df = self.reset_index()
index = df._internal.column_labels[: self._internal.index_level - 1]
columns = df.columns[self._internal.index_level - 1]
df = df.pivot_table(
index=index, columns=columns, values=self._internal.column_labels, aggfunc="first"
)
internal = df._internal.copy(
index_names=self._internal.index_names[:-1],
index_fields=df._internal.index_fields[: self._internal.index_level - 1],
column_label_names=(
df._internal.column_label_names[:-1]
+ [
None
if self._internal.index_names[-1] is None
else df._internal.column_label_names[-1]
]
),
)
return DataFrame(internal)
# TODO: Codes here are similar with melt. Should we deduplicate?
column_labels = self._internal.column_labels
ser_name = SPARK_DEFAULT_SERIES_NAME
sdf = self._internal.spark_frame
new_index_columns = [
SPARK_INDEX_NAME_FORMAT(i) for i in range(self._internal.column_labels_level)
]
new_index_map = list(zip_longest(new_index_columns, self._internal.column_label_names, []))
pairs = F.explode(
F.array(
*[
F.struct(
*[SF.lit(c).alias(name) for c, name in zip(idx, new_index_columns)],
*[self._internal.spark_column_for(idx).alias(ser_name)],
)
for idx in column_labels
]
)
)
columns = [
F.col("pairs.%s" % name)
for name in new_index_columns[: self._internal.column_labels_level]
] + [F.col("pairs.%s" % ser_name)]
new_index_len = len(new_index_columns)
existing_index_columns = []
for i, (index_name, index_field) in enumerate(
zip(self._internal.index_names, self._internal.index_fields)
):
name = SPARK_INDEX_NAME_FORMAT(i + new_index_len)
new_index_map.append((name, index_name, index_field.copy(name=name)))
existing_index_columns.append(self._internal.index_spark_columns[i].alias(name))
exploded_df = sdf.withColumn("pairs", pairs).select(existing_index_columns + columns)
index_spark_column_names, index_names, index_fields = zip(*new_index_map)
return first_series(
DataFrame(
InternalFrame(
exploded_df,
index_spark_columns=[
scol_for(exploded_df, col) for col in index_spark_column_names
],
index_names=list(index_names),
index_fields=list(index_fields),
column_labels=[None],
)
)
)
# TODO: axis, skipna, level and **kwargs should be implemented.
def all(self, axis: Axis = 0, bool_only: Optional[bool] = None) -> "Series":
"""
Return whether all elements are True.
Returns True unless there is at least one element within a series that is
False or equivalent (e.g. zero or empty)
Parameters
----------
axis : {0 or 'index'}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
bool_only : bool, default None
Include only boolean columns. If None, will attempt to use everything,
then use only boolean data.
Returns
-------
Series
Examples
--------
Create a dataframe from a dictionary.
>>> df = ps.DataFrame({
... 'col1': [True, True, True],
... 'col2': [True, False, False],
... 'col3': [0, 0, 0],
... 'col4': [1, 2, 3],
... 'col5': [True, True, None],
... 'col6': [True, False, None]},
... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6'])
Default behaviour checks if column-wise values all return True.
>>> df.all()
col1 True
col2 False
col3 False
col4 True
col5 True
col6 False
dtype: bool
Include only boolean columns when set `bool_only=True`.
>>> df.all(bool_only=True)
col1 True
col2 False
dtype: bool
"""
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
column_labels = self._internal.column_labels
if bool_only:
column_labels = self._bool_column_labels(column_labels)
if len(column_labels) == 0:
return ps.Series([], dtype=bool)
applied = []
for label in column_labels:
scol = self._internal.spark_column_for(label)
all_col = F.min(F.coalesce(scol.cast("boolean"), SF.lit(True)))
applied.append(F.when(all_col.isNull(), True).otherwise(all_col))
return self._result_aggregated(column_labels, applied)
# TODO: axis, skipna, level and **kwargs should be implemented.
def any(self, axis: Axis = 0, bool_only: Optional[bool] = None) -> "Series":
"""
Return whether any element is True.
Returns False unless there is at least one element within a series that is
True or equivalent (e.g. non-zero or non-empty).
Parameters
----------
axis : {0 or 'index'}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
bool_only : bool, default None
Include only boolean columns. If None, will attempt to use everything,
then use only boolean data.
Returns
-------
Series
Examples
--------
Create a dataframe from a dictionary.
>>> df = ps.DataFrame({
... 'col1': [False, False, False],
... 'col2': [True, False, False],
... 'col3': [0, 0, 1],
... 'col4': [0, 1, 2],
... 'col5': [False, False, None],
... 'col6': [True, False, None]},
... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6'])
Default behaviour checks if column-wise values all return True.
>>> df.any()
col1 False
col2 True
col3 True
col4 True
col5 False
col6 True
dtype: bool
Include only boolean columns when set `bool_only=True`.
>>> df.any(bool_only=True)
col1 False
col2 True
dtype: bool
"""
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
column_labels = self._internal.column_labels
if bool_only:
column_labels = self._bool_column_labels(column_labels)
if len(column_labels) == 0:
return ps.Series([], dtype=bool)
applied = []
for label in column_labels:
scol = self._internal.spark_column_for(label)
any_col = F.max(F.coalesce(scol.cast("boolean"), SF.lit(False)))
applied.append(F.when(any_col.isNull(), False).otherwise(any_col))
return self._result_aggregated(column_labels, applied)
def _bool_column_labels(self, column_labels: List[Label]) -> List[Label]:
"""
Filter column labels of boolean columns (without None).
"""
bool_column_labels = []
for label in column_labels:
psser = self._psser_for(label)
if is_bool_dtype(psser):
# Rely on dtype rather than spark type because
# columns that consist of bools and Nones should be excluded
# if bool_only is True
bool_column_labels.append(label)
return bool_column_labels
def _result_aggregated(self, column_labels: List[Label], scols: List[Column]) -> "Series":
"""
Given aggregated Spark columns and respective column labels from the original
pandas-on-Spark DataFrame, construct the result Series.
"""
from pyspark.pandas.series import first_series
cols = []
result_scol_name = "value"
for label, applied_col in zip(column_labels, scols):
cols.append(
F.struct(
*[SF.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i)) for i, col in enumerate(label)],
*[applied_col.alias(result_scol_name)],
)
)
# Statements under this comment implement spark frame transformations as below:
# From:
# +-------------------------------------------------------------------------------------+
# |arrays |
# +-------------------------------------------------------------------------------------+
# |[{col1, true}, {col2, true}, {col3, false}, {col4, true}]|
# +-------------------------------------------------------------------------------------+
# To:
# +-------------+
# |col |
# +-------------+
# |{col1, true} |
# |{col2, true} |
# |{col3, false}|
# |{col4, true} |
# +-------------+
# To:
# +-----------------+-----+
# |__index_level_0__|value|
# +-----------------+-----+
# |col1 |true |
# |col2 |true |
# |col3 |false|
# |col4 |true |
# +-----------------+-----+
sdf = self._internal.spark_frame.select(F.array(*cols).alias("arrays")).select(
F.explode(F.col("arrays"))
)
sdf = sdf.selectExpr("col.*")
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, SPARK_INDEX_NAME_FORMAT(i))
for i in range(self._internal.column_labels_level)
],
index_names=self._internal.column_label_names,
column_labels=[None],
data_spark_columns=[scol_for(sdf, result_scol_name)],
)
# (cont.) The result Series should look as below:
# col1 False
# col2 True
# col3 True
# col4 True
# dtype: bool
return first_series(DataFrame(internal))
# TODO: add axis, pct, na_option parameter
def rank(
self, method: str = "average", ascending: bool = True, numeric_only: Optional[bool] = None
) -> "DataFrame":
"""
Compute numerical data ranks (1 through n) along axis. Equal values are
assigned a rank that is the average of the ranks of those values.
.. note:: the current implementation of rank uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
method : {'average', 'min', 'max', 'first', 'dense'}
* average: average rank of group
* min: lowest rank in group
* max: highest rank in group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
ascending : boolean, default True
False for ranks by high (1) to low (N)
numeric_only : bool, optional
For DataFrame objects, rank only numeric columns if set to True.
Returns
-------
ranks : same type as caller
Examples
--------
>>> df = ps.DataFrame({'A': [1, 2, 2, 3], 'B': [4, 3, 2, 1]}, columns=['A', 'B'])
>>> df
A B
0 1 4
1 2 3
2 2 2
3 3 1
>>> df.rank().sort_index()
A B
0 1.0 4.0
1 2.5 3.0
2 2.5 2.0
3 4.0 1.0
If method is set to 'min', it use lowest rank in group.
>>> df.rank(method='min').sort_index()
A B
0 1.0 4.0
1 2.0 3.0
2 2.0 2.0
3 4.0 1.0
If method is set to 'max', it use highest rank in group.
>>> df.rank(method='max').sort_index()
A B
0 1.0 4.0
1 3.0 3.0
2 3.0 2.0
3 4.0 1.0
If method is set to 'dense', it leaves no gaps in group.
>>> df.rank(method='dense').sort_index()
A B
0 1.0 4.0
1 2.0 3.0
2 2.0 2.0
3 3.0 1.0
If numeric_only is set to 'True', rank only numeric columns.
>>> df = ps.DataFrame({'A': [1, 2, 2, 3], 'B': ['a', 'b', 'd', 'c']}, columns= ['A', 'B'])
>>> df
A B
0 1 a
1 2 b
2 2 d
3 3 c
>>> df.rank(numeric_only=True)
A
0 1.0
1 2.5
2 2.5
3 4.0
"""
if numeric_only:
numeric_col_names = []
for label in self._internal.column_labels:
psser = self._psser_for(label)
if isinstance(psser.spark.data_type, (NumericType, BooleanType)):
numeric_col_names.append(psser.name)
psdf = self[numeric_col_names] if numeric_only else self
return psdf._apply_series_op(
lambda psser: psser._rank(method=method, ascending=ascending), should_resolve=True
)
def filter(
self,
items: Optional[Sequence[Any]] = None,
like: Optional[str] = None,
regex: Optional[str] = None,
axis: Optional[Axis] = None,
) -> "DataFrame":
"""
Subset rows or columns of dataframe according to labels in
the specified index.
Note that this routine does not filter a dataframe on its
contents. The filter is applied to the labels of the index.
Parameters
----------
items : list-like
Keep labels from axis which are in items.
like : string
Keep labels from axis for which "like in label == True".
regex : string (regular expression)
Keep labels from axis for which re.search(regex, label) == True.
axis : int or string axis name
The axis to filter on. By default this is the info axis,
'index' for Series, 'columns' for DataFrame.
Returns
-------
same type as input object
See Also
--------
DataFrame.loc
Notes
-----
The ``items``, ``like``, and ``regex`` parameters are
enforced to be mutually exclusive.
``axis`` defaults to the info axis that is used when indexing
with ``[]``.
Examples
--------
>>> df = ps.DataFrame(np.array(([1, 2, 3], [4, 5, 6])),
... index=['mouse', 'rabbit'],
... columns=['one', 'two', 'three'])
>>> # select columns by name
>>> df.filter(items=['one', 'three'])
one three
mouse 1 3
rabbit 4 6
>>> # select columns by regular expression
>>> df.filter(regex='e$', axis=1)
one three
mouse 1 3
rabbit 4 6
>>> # select rows containing 'bbi'
>>> df.filter(like='bbi', axis=0)
one two three
rabbit 4 5 6
For a Series,
>>> # select rows by name
>>> df.one.filter(items=['rabbit'])
rabbit 4
Name: one, dtype: int64
>>> # select rows by regular expression
>>> df.one.filter(regex='e$')
mouse 1
Name: one, dtype: int64
>>> # select rows containing 'bbi'
>>> df.one.filter(like='bbi')
rabbit 4
Name: one, dtype: int64
"""
if sum(x is not None for x in (items, like, regex)) > 1:
raise TypeError(
"Keyword arguments `items`, `like`, or `regex` " "are mutually exclusive"
)
axis = validate_axis(axis, none_axis=1)
index_scols = self._internal.index_spark_columns
if items is not None:
if is_list_like(items):
items = list(items)
else:
raise ValueError("items should be a list-like object.")
if axis == 0:
if len(index_scols) == 1:
if len(items) <= ps.get_option("compute.isin_limit"):
col = index_scols[0].isin([SF.lit(item) for item in items])
return DataFrame(self._internal.with_filter(col))
else:
item_sdf_col = verify_temp_column_name(
self._internal.spark_frame, "__item__"
)
item_sdf = default_session().createDataFrame(
pd.DataFrame({item_sdf_col: items})
)
joined_sdf = self._internal.spark_frame.join(
other=F.broadcast(item_sdf),
on=(index_scols[0] == scol_for(item_sdf, item_sdf_col)),
how="semi",
)
return DataFrame(self._internal.with_new_sdf(joined_sdf))
else:
# for multi-index
col = None
for item in items:
if not isinstance(item, tuple):
raise TypeError("Unsupported type {}".format(type(item).__name__))
if not item:
raise ValueError("The item should not be empty.")
midx_col = None
for i, element in enumerate(item):
if midx_col is None:
midx_col = index_scols[i] == SF.lit(element)
else:
midx_col = midx_col & (index_scols[i] == SF.lit(element))
if col is None:
col = midx_col
else:
col = col | midx_col
return DataFrame(self._internal.with_filter(col))
else:
return self[items]
elif like is not None:
if axis == 0:
col = None
for index_scol in index_scols:
if col is None:
col = index_scol.contains(like)
else:
col = col | index_scol.contains(like)
return DataFrame(self._internal.with_filter(col))
else:
column_labels = self._internal.column_labels
output_labels = [label for label in column_labels if any(like in i for i in label)]
return self[output_labels]
elif regex is not None:
if axis == 0:
col = None
for index_scol in index_scols:
if col is None:
col = index_scol.rlike(regex)
else:
col = col | index_scol.rlike(regex)
return DataFrame(self._internal.with_filter(col))
else:
column_labels = self._internal.column_labels
matcher = re.compile(regex)
output_labels = [
label
for label in column_labels
if any(matcher.search(i) is not None for i in label)
]
return self[output_labels]
else:
raise TypeError("Must pass either `items`, `like`, or `regex`")
def rename(
self,
mapper: Optional[Union[Dict, Callable[[Any], Any]]] = None,
index: Optional[Union[Dict, Callable[[Any], Any]]] = None,
columns: Optional[Union[Dict, Callable[[Any], Any]]] = None,
axis: Axis = "index",
inplace: bool = False,
level: Optional[int] = None,
errors: str = "ignore",
) -> Optional["DataFrame"]:
"""
Alter axes labels.
Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series
will be left as-is. Extra labels listed don’t throw an error.
Parameters
----------
mapper : dict-like or function
Dict-like or functions transformations to apply to that axis’ values.
Use either `mapper` and `axis` to specify the axis to target with `mapper`, or `index`
and `columns`.
index : dict-like or function
Alternative to specifying axis ("mapper, axis=0" is equivalent to "index=mapper").
columns : dict-like or function
Alternative to specifying axis ("mapper, axis=1" is equivalent to "columns=mapper").
axis : int or str, default 'index'
Axis to target with mapper. Can be either the axis name ('index', 'columns') or
number (0, 1).
inplace : bool, default False
Whether to return a new DataFrame.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified level.
errors : {'ignore', 'raise}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`, or `columns`
contains labels that are not present in the Index being transformed. If 'ignore',
existing keys will be renamed and extra keys will be ignored.
Returns
-------
DataFrame with the renamed axis labels.
Raises
------
`KeyError`
If any of the labels is not found in the selected axis and "errors='raise'".
Examples
--------
>>> psdf1 = ps.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> psdf1.rename(columns={"A": "a", "B": "c"}) # doctest: +NORMALIZE_WHITESPACE
a c
0 1 4
1 2 5
2 3 6
>>> psdf1.rename(index={1: 10, 2: 20}) # doctest: +NORMALIZE_WHITESPACE
A B
0 1 4
10 2 5
20 3 6
>>> def str_lower(s) -> str:
... return str.lower(s)
>>> psdf1.rename(str_lower, axis='columns') # doctest: +NORMALIZE_WHITESPACE
a b
0 1 4
1 2 5
2 3 6
>>> def mul10(x) -> int:
... return x * 10
>>> psdf1.rename(mul10, axis='index') # doctest: +NORMALIZE_WHITESPACE
A B
0 1 4
10 2 5
20 3 6
>>> idx = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'), ('Y', 'C'), ('Y', 'D')])
>>> psdf2 = ps.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=idx)
>>> psdf2.rename(columns=str_lower, level=0) # doctest: +NORMALIZE_WHITESPACE
x y
A B C D
0 1 2 3 4
1 5 6 7 8
>>> psdf3 = ps.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=idx, columns=list('ab'))
>>> psdf3.rename(index=str_lower) # doctest: +NORMALIZE_WHITESPACE
a b
x a 1 2
b 3 4
y c 5 6
d 7 8
"""
def gen_mapper_fn(
mapper: Union[Dict, Callable[[Any], Any]]
) -> Tuple[Callable[[Any], Any], Dtype, DataType]:
if isinstance(mapper, dict):
mapper_dict = mapper
type_set = set(map(lambda x: type(x), mapper_dict.values()))
if len(type_set) > 1:
raise ValueError("Mapper dict should have the same value type.")
dtype, spark_return_type = pandas_on_spark_type(list(type_set)[0])
def mapper_fn(x: Any) -> Any:
if x in mapper_dict:
return mapper_dict[x]
else:
if errors == "raise":
raise KeyError("Index include value which is not in the `mapper`")
return x
elif callable(mapper):
mapper_callable = cast(Callable, mapper)
return_type = cast(ScalarType, infer_return_type(mapper))
dtype = return_type.dtype
spark_return_type = return_type.spark_type
def mapper_fn(x: Any) -> Any:
return mapper_callable(x)
else:
raise ValueError(
"`mapper` or `index` or `columns` should be "
"either dict-like or function type."
)
return mapper_fn, dtype, spark_return_type
index_mapper_fn = None
index_mapper_ret_stype = None
columns_mapper_fn = None
inplace = validate_bool_kwarg(inplace, "inplace")
if mapper:
axis = validate_axis(axis)
if axis == 0:
index_mapper_fn, index_mapper_ret_dtype, index_mapper_ret_stype = gen_mapper_fn(
mapper
)
elif axis == 1:
columns_mapper_fn, _, _ = gen_mapper_fn(mapper)
else:
if index:
index_mapper_fn, index_mapper_ret_dtype, index_mapper_ret_stype = gen_mapper_fn(
index
)
if columns:
columns_mapper_fn, _, _ = gen_mapper_fn(columns)
if not index and not columns:
raise ValueError("Either `index` or `columns` should be provided.")
psdf = self.copy()
if index_mapper_fn:
# rename index labels, if `level` is None, rename all index columns, otherwise only
# rename the corresponding level index.
# implement this by transform the underlying spark dataframe,
# Example:
# suppose the psdf index column in underlying spark dataframe is "index_0", "index_1",
# if rename level 0 index labels, will do:
# ``psdf._sdf.withColumn("index_0", mapper_fn_udf(col("index_0"))``
# if rename all index labels (`level` is None), then will do:
# ```
# psdf._sdf.withColumn("index_0", mapper_fn_udf(col("index_0"))
# .withColumn("index_1", mapper_fn_udf(col("index_1"))
# ```
index_columns = psdf._internal.index_spark_column_names
num_indices = len(index_columns)
if level:
if level < 0 or level >= num_indices:
raise ValueError("level should be an integer between [0, num_indices)")
@pandas_udf(returnType=index_mapper_ret_stype) # type: ignore[call-overload]
def index_mapper_udf(s: pd.Series) -> pd.Series:
return s.map(index_mapper_fn)
index_spark_columns = psdf._internal.index_spark_columns.copy()
index_fields = psdf._internal.index_fields.copy()
if level is None:
for i in range(num_indices):
index_spark_columns[i] = index_mapper_udf(index_spark_columns[i]).alias(
index_columns[i]
)
index_fields[i] = index_fields[i].copy(
dtype=index_mapper_ret_dtype,
spark_type=index_mapper_ret_stype,
nullable=True,
)
else:
index_spark_columns[level] = index_mapper_udf(index_spark_columns[level]).alias(
index_columns[level]
)
index_fields[level] = index_fields[level].copy(
dtype=index_mapper_ret_dtype,
spark_type=index_mapper_ret_stype,
nullable=True,
)
psdf = DataFrame(
psdf._internal.copy(
index_spark_columns=index_spark_columns, index_fields=index_fields
)
)
if columns_mapper_fn:
# rename column name.
# Will modify the `_internal._column_labels` and transform underlying spark dataframe
# to the same column name with `_internal._column_labels`.
if level:
if level < 0 or level >= psdf._internal.column_labels_level:
raise ValueError("level should be an integer between [0, column_labels_level)")
def gen_new_column_labels_entry(column_labels_entry: Label) -> Label:
if level is None:
# rename all level columns
return tuple(map(columns_mapper_fn, column_labels_entry))
else:
# only rename specified level column
entry_list = list(column_labels_entry)
entry_list[level] = columns_mapper_fn(entry_list[level])
return tuple(entry_list)
new_column_labels = list(map(gen_new_column_labels_entry, psdf._internal.column_labels))
new_data_pssers = [
psdf._psser_for(old_label).rename(new_label)
for old_label, new_label in zip(psdf._internal.column_labels, new_column_labels)
]
psdf = DataFrame(psdf._internal.with_new_columns(new_data_pssers))
if inplace:
self._update_internal_frame(psdf._internal)
return None
else:
return psdf
def rename_axis(
self,
mapper: Union[Any, Sequence[Any], Dict[Name, Any], Callable[[Name], Any]] = None,
index: Union[Any, Sequence[Any], Dict[Name, Any], Callable[[Name], Any]] = None,
columns: Union[Any, Sequence[Any], Dict[Name, Any], Callable[[Name], Any]] = None,
axis: Optional[Axis] = 0,
inplace: Optional[bool] = False,
) -> Optional["DataFrame"]:
"""
Set the name of the axis for the index or columns.
Parameters
----------
mapper : scalar, list-like, optional
A scalar, list-like, dict-like or functions transformations to
apply to the axis name attribute.
index, columns : scalar, list-like, dict-like or function, optional
A scalar, list-like, dict-like or functions transformations to
apply to that axis' values.
Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index``
and/or ``columns``.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to rename.
inplace : bool, default False
Modifies the object directly, instead of creating a new DataFrame.
Returns
-------
DataFrame, or None if `inplace` is True.
See Also
--------
Series.rename : Alter Series index labels or name.
DataFrame.rename : Alter DataFrame index labels or name.
Index.rename : Set new names on index.
Notes
-----
``DataFrame.rename_axis`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
The first calling convention will only modify the names of
the index and/or the names of the Index object that is the columns.
The second calling convention will modify the names of the
corresponding index specified by axis.
We *highly* recommend using keyword arguments to clarify your
intent.
Examples
--------
>>> df = ps.DataFrame({"num_legs": [4, 4, 2],
... "num_arms": [0, 0, 2]},
... index=["dog", "cat", "monkey"],
... columns=["num_legs", "num_arms"])
>>> df
num_legs num_arms
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("animal").sort_index()
>>> df # doctest: +NORMALIZE_WHITESPACE
num_legs num_arms
animal
cat 4 0
dog 4 0
monkey 2 2
>>> df = df.rename_axis("limbs", axis="columns").sort_index()
>>> df # doctest: +NORMALIZE_WHITESPACE
limbs num_legs num_arms
animal
cat 4 0
dog 4 0
monkey 2 2
**MultiIndex**
>>> index = pd.MultiIndex.from_product([['mammal'],
... ['dog', 'cat', 'monkey']],
... names=['type', 'name'])
>>> df = ps.DataFrame({"num_legs": [4, 4, 2],
... "num_arms": [0, 0, 2]},
... index=index,
... columns=["num_legs", "num_arms"])
>>> df # doctest: +NORMALIZE_WHITESPACE
num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(index={'type': 'class'}).sort_index() # doctest: +NORMALIZE_WHITESPACE
num_legs num_arms
class name
mammal cat 4 0
dog 4 0
monkey 2 2
>>> df.rename_axis(index=str.upper).sort_index() # doctest: +NORMALIZE_WHITESPACE
num_legs num_arms
TYPE NAME
mammal cat 4 0
dog 4 0
monkey 2 2
"""
def gen_names(
v: Union[Any, Sequence[Any], Dict[Name, Any], Callable[[Name], Any]],
curnames: List[Name],
) -> List[Label]:
newnames: List[Name]
if is_scalar(v):
newnames = [cast(Name, v)]
elif is_list_like(v) and not is_dict_like(v):
newnames = list(cast(Sequence[Name], v))
elif is_dict_like(v):
v_dict = cast(Dict[Name, Name], v)
newnames = [v_dict[name] if name in v_dict else name for name in curnames]
elif callable(v):
v_callable = cast(Callable[[Name], Name], v)
newnames = [v_callable(name) for name in curnames]
else:
raise ValueError(
"`mapper` or `index` or `columns` should be "
"either dict-like or function type."
)
if len(newnames) != len(curnames):
raise ValueError(
"Length of new names must be {}, got {}".format(len(curnames), len(newnames))
)
return [name if is_name_like_tuple(name) else (name,) for name in newnames]
if mapper is not None and (index is not None or columns is not None):
raise TypeError("Cannot specify both 'mapper' and any of 'index' or 'columns'.")
if mapper is not None:
axis = validate_axis(axis)
if axis == 0:
index = mapper
elif axis == 1:
columns = mapper
column_label_names = (
gen_names(columns, self.columns.names)
if columns is not None
else self._internal.column_label_names
)
index_names = (
gen_names(index, self.index.names) if index is not None else self._internal.index_names
)
internal = self._internal.copy(
index_names=index_names, column_label_names=column_label_names
)
if inplace:
self._update_internal_frame(internal)
return None
else:
return DataFrame(internal)
def keys(self) -> pd.Index:
"""
Return alias for columns.
Returns
-------
Index
Columns of the DataFrame.
Examples
--------
>>> df = ps.DataFrame([[1, 2], [4, 5], [7, 8]],
... index=['cobra', 'viper', 'sidewinder'],
... columns=['max_speed', 'shield'])
>>> df
max_speed shield
cobra 1 2
viper 4 5
sidewinder 7 8
>>> df.keys()
Index(['max_speed', 'shield'], dtype='object')
"""
return self.columns
def pct_change(self, periods: int = 1) -> "DataFrame":
"""
Percentage change between the current and a prior element.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int, default 1
Periods to shift for forming percent change.
Returns
-------
DataFrame
Examples
--------
Percentage change in French franc, Deutsche Mark, and Italian lira
from 1980-01-01 to 1980-03-01.
>>> df = ps.DataFrame({
... 'FR': [4.0405, 4.0963, 4.3149],
... 'GR': [1.7246, 1.7482, 1.8519],
... 'IT': [804.74, 810.01, 860.13]},
... index=['1980-01-01', '1980-02-01', '1980-03-01'])
>>> df
FR GR IT
1980-01-01 4.0405 1.7246 804.74
1980-02-01 4.0963 1.7482 810.01
1980-03-01 4.3149 1.8519 860.13
>>> df.pct_change()
FR GR IT
1980-01-01 NaN NaN NaN
1980-02-01 0.013810 0.013684 0.006549
1980-03-01 0.053365 0.059318 0.061876
You can set periods to shift for forming percent change
>>> df.pct_change(2)
FR GR IT
1980-01-01 NaN NaN NaN
1980-02-01 NaN NaN NaN
1980-03-01 0.067912 0.073814 0.06883
"""
window = Window.orderBy(NATURAL_ORDER_COLUMN_NAME).rowsBetween(-periods, -periods)
def op(psser: ps.Series) -> Column:
prev_row = F.lag(psser.spark.column, periods).over(window)
return ((psser.spark.column - prev_row) / prev_row).alias(
psser._internal.data_spark_column_names[0]
)
return self._apply_series_op(op, should_resolve=True)
# TODO: axis = 1
def idxmax(self, axis: Axis = 0) -> "Series":
"""
Return index of first occurrence of maximum over requested axis.
NA/null values are excluded.
.. note:: This API collect all rows with maximum value using `to_pandas()`
because we suppose the number of rows with max values are usually small in general.
Parameters
----------
axis : 0 or 'index'
Can only be set to 0 at the moment.
Returns
-------
Series
See Also
--------
Series.idxmax
Examples
--------
>>> psdf = ps.DataFrame({'a': [1, 2, 3, 2],
... 'b': [4.0, 2.0, 3.0, 1.0],
... 'c': [300, 200, 400, 200]})
>>> psdf
a b c
0 1 4.0 300
1 2 2.0 200
2 3 3.0 400
3 2 1.0 200
>>> psdf.idxmax()
a 2
b 0
c 2
dtype: int64
For Multi-column Index
>>> psdf = ps.DataFrame({'a': [1, 2, 3, 2],
... 'b': [4.0, 2.0, 3.0, 1.0],
... 'c': [300, 200, 400, 200]})
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
0 1 4.0 300
1 2 2.0 200
2 3 3.0 400
3 2 1.0 200
>>> psdf.idxmax()
a x 2
b y 0
c z 2
dtype: int64
"""
max_cols = map(lambda scol: F.max(scol), self._internal.data_spark_columns)
sdf_max = self._internal.spark_frame.select(*max_cols).head()
# `sdf_max` looks like below
# +------+------+------+
# |(a, x)|(b, y)|(c, z)|
# +------+------+------+
# | 3| 4.0| 400|
# +------+------+------+
conds = (
scol == max_val for scol, max_val in zip(self._internal.data_spark_columns, sdf_max)
)
cond = reduce(lambda x, y: x | y, conds)
psdf: DataFrame = DataFrame(self._internal.with_filter(cond))
return cast(ps.Series, ps.from_pandas(psdf._to_internal_pandas().idxmax()))
# TODO: axis = 1
def idxmin(self, axis: Axis = 0) -> "Series":
"""
Return index of first occurrence of minimum over requested axis.
NA/null values are excluded.
.. note:: This API collect all rows with minimum value using `to_pandas()`
because we suppose the number of rows with min values are usually small in general.
Parameters
----------
axis : 0 or 'index'
Can only be set to 0 at the moment.
Returns
-------
Series
See Also
--------
Series.idxmin
Examples
--------
>>> psdf = ps.DataFrame({'a': [1, 2, 3, 2],
... 'b': [4.0, 2.0, 3.0, 1.0],
... 'c': [300, 200, 400, 200]})
>>> psdf
a b c
0 1 4.0 300
1 2 2.0 200
2 3 3.0 400
3 2 1.0 200
>>> psdf.idxmin()
a 0
b 3
c 1
dtype: int64
For Multi-column Index
>>> psdf = ps.DataFrame({'a': [1, 2, 3, 2],
... 'b': [4.0, 2.0, 3.0, 1.0],
... 'c': [300, 200, 400, 200]})
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
0 1 4.0 300
1 2 2.0 200
2 3 3.0 400
3 2 1.0 200
>>> psdf.idxmin()
a x 0
b y 3
c z 1
dtype: int64
"""
min_cols = map(lambda scol: F.min(scol), self._internal.data_spark_columns)
sdf_min = self._internal.spark_frame.select(*min_cols).head()
conds = (
scol == min_val for scol, min_val in zip(self._internal.data_spark_columns, sdf_min)
)
cond = reduce(lambda x, y: x | y, conds)
psdf: DataFrame = DataFrame(self._internal.with_filter(cond))
return cast(ps.Series, ps.from_pandas(psdf._to_internal_pandas().idxmin()))
def info(
self,
verbose: Optional[bool] = None,
buf: Optional[IO[str]] = None,
max_cols: Optional[int] = None,
null_counts: Optional[bool] = None,
) -> None:
"""
Print a concise summary of a DataFrame.
This method prints information about a DataFrame including
the index dtype and column dtypes, non-null values and memory usage.
Parameters
----------
verbose : bool, optional
Whether to print the full summary.
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
max_cols : int, optional
When to switch from the verbose to the truncated output. If the
DataFrame has more than `max_cols` columns, the truncated output
is used.
null_counts : bool, optional
Whether to show the non-null counts.
Returns
-------
None
This method prints a summary of a DataFrame and returns None.
See Also
--------
DataFrame.describe: Generate descriptive statistics of DataFrame
columns.
Examples
--------
>>> int_values = [1, 2, 3, 4, 5]
>>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']
>>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
>>> df = ps.DataFrame(
... {"int_col": int_values, "text_col": text_values, "float_col": float_values},
... columns=['int_col', 'text_col', 'float_col'])
>>> df
int_col text_col float_col
0 1 alpha 0.00
1 2 beta 0.25
2 3 gamma 0.50
3 4 delta 0.75
4 5 epsilon 1.00
Prints information of all columns:
>>> df.info(verbose=True) # doctest: +SKIP
<class 'pyspark.pandas.frame.DataFrame'>
Index: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 int_col 5 non-null int64
1 text_col 5 non-null object
2 float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
Prints a summary of columns count and its dtypes but not per column
information:
>>> df.info(verbose=False) # doctest: +SKIP
<class 'pyspark.pandas.frame.DataFrame'>
Index: 5 entries, 0 to 4
Columns: 3 entries, int_col to float_col
dtypes: float64(1), int64(1), object(1)
Pipe output of DataFrame.info to buffer instead of sys.stdout, get
buffer content and writes to a text file:
>>> import io
>>> buffer = io.StringIO()
>>> df.info(buf=buffer)
>>> s = buffer.getvalue()
>>> with open('%s/info.txt' % path, "w",
... encoding="utf-8") as f:
... _ = f.write(s)
>>> with open('%s/info.txt' % path) as f:
... f.readlines() # doctest: +SKIP
["<class 'pyspark.pandas.frame.DataFrame'>\\n",
'Index: 5 entries, 0 to 4\\n',
'Data columns (total 3 columns):\\n',
' # Column Non-Null Count Dtype \\n',
'--- ------ -------------- ----- \\n',
' 0 int_col 5 non-null int64 \\n',
' 1 text_col 5 non-null object \\n',
' 2 float_col 5 non-null float64\\n',
'dtypes: float64(1), int64(1), object(1)']
"""
# To avoid pandas' existing config affects pandas-on-Spark.
# TODO: should we have corresponding pandas-on-Spark configs?
with pd.option_context(
"display.max_info_columns", sys.maxsize, "display.max_info_rows", sys.maxsize
):
try:
# hack to use pandas' info as is.
object.__setattr__(self, "_data", self)
count_func = self.count
self.count = ( # type: ignore[assignment]
lambda: count_func()._to_pandas() # type: ignore[assignment, misc, union-attr]
)
return pd.DataFrame.info(
self, # type: ignore[arg-type]
verbose=verbose,
buf=buf,
max_cols=max_cols,
memory_usage=False,
null_counts=null_counts,
)
finally:
del self._data
self.count = count_func # type: ignore[assignment]
# TODO: fix parameter 'axis' and 'numeric_only' to work same as pandas'
def quantile(
self,
q: Union[float, Iterable[float]] = 0.5,
axis: Axis = 0,
numeric_only: bool = True,
accuracy: int = 10000,
) -> DataFrameOrSeries:
"""
Return value at the given quantile.
.. note:: Unlike pandas', the quantile in pandas-on-Spark is an approximated quantile
based upon approximate percentile computation because computing quantile across a
large dataset is extremely expensive.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute.
axis : int or str, default 0 or 'index'
Can only be set to 0 at the moment.
numeric_only : bool, default True
If False, the quantile of datetime and timedelta data will be computed as well.
Can only be set to True at the moment.
accuracy : int, optional
Default accuracy of approximation. Larger value means better accuracy.
The relative error can be deduced by 1.0 / accuracy.
Returns
-------
Series or DataFrame
If q is an array, a DataFrame will be returned where the
index is q, the columns are the columns of self, and the values are the quantiles.
If q is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
Examples
--------
>>> psdf = ps.DataFrame({'a': [1, 2, 3, 4, 5], 'b': [6, 7, 8, 9, 0]})
>>> psdf
a b
0 1 6
1 2 7
2 3 8
3 4 9
4 5 0
>>> psdf.quantile(.5)
a 3.0
b 7.0
Name: 0.5, dtype: float64
>>> psdf.quantile([.25, .5, .75])
a b
0.25 2.0 6.0
0.50 3.0 7.0
0.75 4.0 8.0
"""
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
if not isinstance(accuracy, int):
raise TypeError(
"accuracy must be an integer; however, got [%s]" % type(accuracy).__name__
)
qq: Union[float, List[float]] = list(q) if isinstance(q, Iterable) else q
for v in qq if isinstance(qq, list) else [qq]:
if not isinstance(v, float):
raise TypeError(
"q must be a float or an array of floats; however, [%s] found." % type(v)
)
if v < 0.0 or v > 1.0:
raise ValueError("percentiles should all be in the interval [0, 1].")
def quantile(psser: "Series") -> Column:
spark_type = psser.spark.data_type
spark_column = psser.spark.column
if isinstance(spark_type, (BooleanType, NumericType)):
return F.percentile_approx(spark_column.cast(DoubleType()), qq, accuracy)
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if isinstance(qq, list):
# First calculate the percentiles from all columns and map it to each `quantiles`
# by creating each entry as a struct. So, it becomes an array of structs as below:
#
# +-----------------------------------------+
# | arrays|
# +-----------------------------------------+
# |[[0.25, 2, 6], [0.5, 3, 7], [0.75, 4, 8]]|
# +-----------------------------------------+
percentile_cols: List[Column] = []
percentile_col_names: List[str] = []
column_labels: List[Label] = []
for label, column in zip(
self._internal.column_labels, self._internal.data_spark_column_names
):
psser = self._psser_for(label)
is_numeric_or_boolean = isinstance(
psser.spark.data_type, (NumericType, BooleanType)
)
keep_column = not numeric_only or is_numeric_or_boolean
if keep_column:
percentile_col = quantile(psser)
percentile_cols.append(percentile_col.alias(column))
percentile_col_names.append(column)
column_labels.append(label)
if len(percentile_cols) == 0:
return DataFrame(index=qq)
sdf = self._internal.spark_frame.select(percentile_cols)
# Here, after select percentile cols, a spark_frame looks like below:
# +---------+---------+
# | a| b|
# +---------+---------+
# |[2, 3, 4]|[6, 7, 8]|
# +---------+---------+
cols_dict: Dict[str, List[Column]] = {}
for column in percentile_col_names:
cols_dict[column] = list()
for i in range(len(qq)):
cols_dict[column].append(scol_for(sdf, column)[i].alias(column))
internal_index_column = SPARK_DEFAULT_INDEX_NAME
cols = []
for i, col in enumerate(zip(*cols_dict.values())):
cols.append(F.struct(SF.lit(qq[i]).alias(internal_index_column), *col))
sdf = sdf.select(F.array(*cols).alias("arrays"))
# And then, explode it and manually set the index.
# +-----------------+---+---+
# |__index_level_0__| a| b|
# +-----------------+---+---+
# | 0.25| 2| 6|
# | 0.5| 3| 7|
# | 0.75| 4| 8|
# +-----------------+---+---+
sdf = sdf.select(F.explode(F.col("arrays"))).selectExpr("col.*")
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, internal_index_column)],
column_labels=column_labels,
data_spark_columns=[scol_for(sdf, col) for col in percentile_col_names],
)
return DataFrame(internal)
else:
return self._reduce_for_stat_function(
quantile, name="quantile", numeric_only=numeric_only
).rename(qq)
def query(self, expr: str, inplace: bool = False) -> Optional["DataFrame"]:
"""
Query the columns of a DataFrame with a boolean expression.
.. note:: Internal columns that starting with a '__' prefix are able to access, however,
they are not supposed to be accessed.
.. note:: This API delegates to Spark SQL so the syntax follows Spark SQL. Therefore, the
pandas specific syntax such as `@` is not supported. If you want the pandas syntax,
you can work around with :meth:`DataFrame.pandas_on_spark.apply_batch`, but you should
be aware that `query_func` will be executed at different nodes in a distributed manner.
So, for example, to use `@` syntax, make sure the variable is serialized by, for
example, putting it within the closure as below.
>>> df = ps.DataFrame({'A': range(2000), 'B': range(2000)})
>>> def query_func(pdf):
... num = 1995
... return pdf.query('A > @num')
>>> df.pandas_on_spark.apply_batch(query_func)
A B
1996 1996 1996
1997 1997 1997
1998 1998 1998
1999 1999 1999
Parameters
----------
expr : str
The query string to evaluate.
You can refer to column names that contain spaces by surrounding
them in backticks.
For example, if one of your columns is called ``a a`` and you want
to sum it with ``b``, your query should be ```a a` + b``.
inplace : bool
Whether the query should modify the data in place or return
a modified copy.
Returns
-------
DataFrame
DataFrame resulting from the provided query expression.
Examples
--------
>>> df = ps.DataFrame({'A': range(1, 6),
... 'B': range(10, 0, -2),
... 'C C': range(10, 5, -1)})
>>> df
A B C C
0 1 10 10
1 2 8 9
2 3 6 8
3 4 4 7
4 5 2 6
>>> df.query('A > B')
A B C C
4 5 2 6
The previous expression is equivalent to
>>> df[df.A > df.B]
A B C C
4 5 2 6
For columns with spaces in their name, you can use backtick quoting.
>>> df.query('B == `C C`')
A B C C
0 1 10 10
The previous expression is equivalent to
>>> df[df.B == df['C C']]
A B C C
0 1 10 10
"""
if isinstance(self.columns, pd.MultiIndex):
raise TypeError("Doesn't support for MultiIndex columns")
if not isinstance(expr, str):
raise TypeError(
"expr must be a string to be evaluated, {} given".format(type(expr).__name__)
)
inplace = validate_bool_kwarg(inplace, "inplace")
data_columns = [label[0] for label in self._internal.column_labels]
sdf = self._internal.spark_frame.select(
self._internal.index_spark_columns
+ [
scol.alias(col)
for scol, col in zip(self._internal.data_spark_columns, data_columns)
]
).filter(expr)
internal = self._internal.with_new_sdf(sdf, data_columns=data_columns)
if inplace:
self._update_internal_frame(internal)
return None
else:
return DataFrame(internal)
def take(self, indices: List[int], axis: Axis = 0, **kwargs: Any) -> "DataFrame":
"""
Return the elements in the given *positional* indices along an axis.
This means that we are not indexing according to actual values in
the index attribute of the object. We are indexing according to the
actual position of the element in the object.
Parameters
----------
indices : array-like
An array of ints indicating which positions to take.
axis : {0 or 'index', 1 or 'columns', None}, default 0
The axis on which to select elements. ``0`` means that we are
selecting rows, ``1`` means that we are selecting columns.
**kwargs
For compatibility with :meth:`numpy.take`. Has no effect on the
output.
Returns
-------
taken : same type as caller
An array-like containing the elements taken from the object.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by labels.
DataFrame.iloc : Select a subset of a DataFrame by positions.
numpy.take : Take elements from an array along an axis.
Examples
--------
>>> df = ps.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=['name', 'class', 'max_speed'],
... index=[0, 2, 3, 1])
>>> df
name class max_speed
0 falcon bird 389.0
2 parrot bird 24.0
3 lion mammal 80.5
1 monkey mammal NaN
Take elements at positions 0 and 3 along the axis 0 (default).
Note how the actual indices selected (0 and 1) do not correspond to
our selected indices 0 and 3. That's because we are selecting the 0th
and 3rd rows, not rows whose indices equal 0 and 3.
>>> df.take([0, 3]).sort_index()
name class max_speed
0 falcon bird 389.0
1 monkey mammal NaN
Take elements at indices 1 and 2 along the axis 1 (column selection).
>>> df.take([1, 2], axis=1)
class max_speed
0 bird 389.0
2 bird 24.0
3 mammal 80.5
1 mammal NaN
We may take elements using negative integers for positive indices,
starting from the end of the object, just like with Python lists.
>>> df.take([-1, -2]).sort_index()
name class max_speed
1 monkey mammal NaN
3 lion mammal 80.5
"""
axis = validate_axis(axis)
if not is_list_like(indices) or isinstance(indices, (dict, set)):
raise TypeError("`indices` must be a list-like except dict or set")
if axis == 0:
return cast(DataFrame, self.iloc[indices, :])
else:
return cast(DataFrame, self.iloc[:, indices])
def eval(self, expr: str, inplace: bool = False) -> Optional[DataFrameOrSeries]:
"""
Evaluate a string describing operations on DataFrame columns.
Operates on columns only, not specific rows or elements. This allows
`eval` to run arbitrary code, which can make you vulnerable to code
injection if you pass user input to this function.
Parameters
----------
expr : str
The expression string to evaluate.
inplace : bool, default False
If the expression contains an assignment, whether to perform the
operation inplace and mutate the existing DataFrame. Otherwise,
a new DataFrame is returned.
Returns
-------
The result of the evaluation.
See Also
--------
DataFrame.query : Evaluates a boolean expression to query the columns
of a frame.
DataFrame.assign : Can evaluate an expression or function to create new
values for a column.
eval : Evaluate a Python expression as a string using various
backends.
Examples
--------
>>> df = ps.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)})
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
>>> df.eval('A + B')
0 11
1 10
2 9
3 8
4 7
dtype: int64
Assignment is allowed though by default the original DataFrame is not
modified.
>>> df.eval('C = A + B')
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
Use ``inplace=True`` to modify the original DataFrame.
>>> df.eval('C = A + B', inplace=True)
>>> df
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
"""
from pyspark.pandas.series import first_series
if isinstance(self.columns, pd.MultiIndex):
raise TypeError("`eval` is not supported for multi-index columns")
inplace = validate_bool_kwarg(inplace, "inplace")
should_return_series = False
series_name = None
should_return_scalar = False
# Since `eval_func` doesn't have a type hint, inferring the schema is always preformed
# in the `apply_batch`. Hence, the variables `should_return_series`, `series_name`,
# and `should_return_scalar` can be updated.
def eval_func(pdf): # type: ignore[no-untyped-def]
nonlocal should_return_series
nonlocal series_name
nonlocal should_return_scalar
result_inner = pdf.eval(expr, inplace=inplace)
if inplace:
result_inner = pdf
if isinstance(result_inner, pd.Series):
should_return_series = True
series_name = result_inner.name
result_inner = result_inner.to_frame()
elif is_scalar(result_inner):
should_return_scalar = True
result_inner = pd.Series(result_inner).to_frame()
return result_inner
result = self.pandas_on_spark.apply_batch(eval_func)
if inplace:
# Here, the result is always a frame because the error is thrown during schema inference
# from pandas.
self._update_internal_frame(result._internal, requires_same_anchor=False)
return None
elif should_return_series:
return first_series(result).rename(series_name)
elif should_return_scalar:
return first_series(result)[0]
else:
# Returns a frame
return result
def explode(self, column: Name) -> "DataFrame":
"""
Transform each element of a list-like to a row, replicating index values.
Parameters
----------
column : str or tuple
Column to explode.
Returns
-------
DataFrame
Exploded lists to rows of the subset columns;
index will be duplicated for these rows.
See Also
--------
DataFrame.unstack : Pivot a level of the (necessarily hierarchical)
index labels.
DataFrame.melt : Unpivot a DataFrame from wide format to long format.
Examples
--------
>>> df = ps.DataFrame({'A': [[1, 2, 3], [], [3, 4]], 'B': 1})
>>> df
A B
0 [1, 2, 3] 1
1 [] 1
2 [3, 4] 1
>>> df.explode('A')
A B
0 1.0 1
0 2.0 1
0 3.0 1
1 NaN 1
2 3.0 1
2 4.0 1
"""
from pyspark.pandas.series import Series
if not is_name_like_value(column):
raise TypeError("column must be a scalar")
psdf: DataFrame = DataFrame(self._internal.resolved_copy)
psser = psdf[column]
if not isinstance(psser, Series):
raise ValueError(
"The column %s is not unique. For a multi-index, the label must be a tuple "
"with elements corresponding to each level." % name_like_string(column)
)
if not isinstance(psser.spark.data_type, ArrayType):
return self.copy()
sdf = psdf._internal.spark_frame.withColumn(
psser._internal.data_spark_column_names[0], F.explode_outer(psser.spark.column)
)
data_fields = psdf._internal.data_fields.copy()
idx = psdf._internal.column_labels.index(psser._column_label)
field = data_fields[idx]
spark_type = cast(ArrayType, field.spark_type).elementType
dtype = spark_type_to_pandas_dtype(spark_type)
data_fields[idx] = field.copy(dtype=dtype, spark_type=spark_type, nullable=True)
internal = psdf._internal.with_new_sdf(sdf, data_fields=data_fields)
return DataFrame(internal)
def mad(self, axis: Axis = 0) -> "Series":
"""
Return the mean absolute deviation of values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
>>> df.mad()
a 0.666667
b 0.066667
dtype: float64
>>> df.mad(axis=1)
0 0.45
1 0.90
2 1.35
3 NaN
dtype: float64
"""
from pyspark.pandas.series import first_series
axis = validate_axis(axis)
if axis == 0:
def get_spark_column(psdf: DataFrame, label: Label) -> Column:
scol = psdf._internal.spark_column_for(label)
col_type = psdf._internal.spark_type_for(label)
if isinstance(col_type, BooleanType):
scol = scol.cast("integer")
return scol
new_column_labels: List[Label] = []
for label in self._internal.column_labels:
# Filtering out only columns of numeric and boolean type column.
dtype = self._psser_for(label).spark.data_type
if isinstance(dtype, (NumericType, BooleanType)):
new_column_labels.append(label)
new_columns = [
F.avg(get_spark_column(self, label)).alias(name_like_string(label))
for label in new_column_labels
]
mean_data = self._internal.spark_frame.select(*new_columns).first()
new_columns = [
F.avg(
F.abs(get_spark_column(self, label) - mean_data[name_like_string(label)])
).alias(name_like_string(label))
for label in new_column_labels
]
sdf = self._internal.spark_frame.select(
*[SF.lit(None).cast(StringType()).alias(SPARK_DEFAULT_INDEX_NAME)], *new_columns
)
# The data is expected to be small so it's fine to transpose/use default index.
with ps.option_context("compute.max_rows", 1):
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)],
column_labels=new_column_labels,
column_label_names=self._internal.column_label_names,
)
return first_series(DataFrame(internal).transpose())
else:
@pandas_udf(returnType=DoubleType()) # type: ignore[call-overload]
def calculate_columns_axis(*cols: pd.Series) -> pd.Series:
return pd.concat(cols, axis=1).mad(axis=1)
internal = self._internal.copy(
column_labels=[None],
data_spark_columns=[
calculate_columns_axis(*self._internal.data_spark_columns).alias(
SPARK_DEFAULT_SERIES_NAME
)
],
data_fields=[None],
column_label_names=None,
)
return first_series(DataFrame(internal))
def tail(self, n: int = 5) -> "DataFrame":
"""
Return the last `n` rows.
This function returns last `n` rows from the object based on
position. It is useful for quickly verifying data, for example,
after sorting or appending rows.
For negative values of `n`, this function returns all rows except
the first `n` rows, equivalent to ``df[n:]``.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
type of caller
The last `n` rows of the caller object.
See Also
--------
DataFrame.head : The first `n` rows of the caller object.
Examples
--------
>>> df = ps.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last 5 lines
>>> df.tail() # doctest: +SKIP
animal
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last `n` lines (three in this case)
>>> df.tail(3) # doctest: +SKIP
animal
6 shark
7 whale
8 zebra
For negative values of `n`
>>> df.tail(-3) # doctest: +SKIP
animal
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
"""
if not isinstance(n, int):
raise TypeError("bad operand type for unary -: '{}'".format(type(n).__name__))
if n < 0:
n = len(self) + n
if n <= 0:
return ps.DataFrame(self._internal.with_filter(SF.lit(False)))
# Should use `resolved_copy` here for the case like `(psdf + 1).tail()`
sdf = self._internal.resolved_copy.spark_frame
rows = sdf.tail(n)
new_sdf = default_session().createDataFrame(rows, sdf.schema)
return DataFrame(self._internal.with_new_sdf(new_sdf))
def align(
self,
other: DataFrameOrSeries,
join: str = "outer",
axis: Optional[Axis] = None,
copy: bool = True,
) -> Tuple["DataFrame", DataFrameOrSeries]:
"""
Align two objects on their axes with the specified join method.
Join method is specified for each axis Index.
Parameters
----------
other : DataFrame or Series
join : {{'outer', 'inner', 'left', 'right'}}, default 'outer'
axis : allowed axis of the other object, default None
Align on index (0), columns (1), or both (None).
copy : bool, default True
Always returns new objects. If copy=False and no reindexing is
required then original objects are returned.
Returns
-------
(left, right) : (DataFrame, type of other)
Aligned objects.
Examples
--------
>>> ps.set_option("compute.ops_on_diff_frames", True)
>>> df1 = ps.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}, index=[10, 20, 30])
>>> df2 = ps.DataFrame({"a": [4, 5, 6], "c": ["d", "e", "f"]}, index=[10, 11, 12])
Align both axis:
>>> aligned_l, aligned_r = df1.align(df2)
>>> aligned_l.sort_index()
a b c
10 1.0 a NaN
11 NaN None NaN
12 NaN None NaN
20 2.0 b NaN
30 3.0 c NaN
>>> aligned_r.sort_index()
a b c
10 4.0 NaN d
11 5.0 NaN e
12 6.0 NaN f
20 NaN NaN None
30 NaN NaN None
Align only axis=0 (index):
>>> aligned_l, aligned_r = df1.align(df2, axis=0)
>>> aligned_l.sort_index()
a b
10 1.0 a
11 NaN None
12 NaN None
20 2.0 b
30 3.0 c
>>> aligned_r.sort_index()
a c
10 4.0 d
11 5.0 e
12 6.0 f
20 NaN None
30 NaN None
Align only axis=1 (column):
>>> aligned_l, aligned_r = df1.align(df2, axis=1)
>>> aligned_l.sort_index()
a b c
10 1 a NaN
20 2 b NaN
30 3 c NaN
>>> aligned_r.sort_index()
a b c
10 4 NaN d
11 5 NaN e
12 6 NaN f
Align with the join type "inner":
>>> aligned_l, aligned_r = df1.align(df2, join="inner")
>>> aligned_l.sort_index()
a
10 1
>>> aligned_r.sort_index()
a
10 4
Align with a Series:
>>> s = ps.Series([7, 8, 9], index=[10, 11, 12])
>>> aligned_l, aligned_r = df1.align(s, axis=0)
>>> aligned_l.sort_index()
a b
10 1.0 a
11 NaN None
12 NaN None
20 2.0 b
30 3.0 c
>>> aligned_r.sort_index()
10 7.0
11 8.0
12 9.0
20 NaN
30 NaN
dtype: float64
>>> ps.reset_option("compute.ops_on_diff_frames")
"""
from pyspark.pandas.series import Series, first_series
if not isinstance(other, (DataFrame, Series)):
raise TypeError("unsupported type: {}".format(type(other).__name__))
how = validate_how(join)
axis = validate_axis(axis, None)
right_is_series = isinstance(other, Series)
if right_is_series:
if axis is None:
raise ValueError("Must specify axis=0 or 1")
elif axis != 0:
raise NotImplementedError(
"align currently only works for axis=0 when right is Series"
)
left = self
right = other
if (axis is None or axis == 0) and not same_anchor(left, right):
combined = combine_frames(left, right, how=how)
left = combined["this"]
right = combined["that"]
if right_is_series:
right = first_series(cast(DataFrame[Any], right)).rename(other.name)
if (
axis is None or axis == 1
) and left._internal.column_labels != right._internal.column_labels:
if left._internal.column_labels_level != right._internal.column_labels_level:
raise ValueError("cannot join with no overlapping index names")
left = left.copy()
right = right.copy()
if how == "full":
column_labels = sorted(
list(set(left._internal.column_labels) | set(right._internal.column_labels))
)
elif how == "inner":
column_labels = sorted(
list(set(left._internal.column_labels) & set(right._internal.column_labels))
)
elif how == "left":
column_labels = left._internal.column_labels
else:
column_labels = right._internal.column_labels
for label in column_labels:
if label not in left._internal.column_labels:
left[label] = SF.lit(None).cast(DoubleType())
left = left[column_labels]
for label in column_labels:
if label not in right._internal.column_labels:
right[label] = SF.lit(None).cast(DoubleType())
right = right[column_labels]
return (left.copy(), right.copy()) if copy else (left, right)
@staticmethod
def from_dict(
data: Dict[Name, Sequence[Any]],
orient: str = "columns",
dtype: Union[str, Dtype] = None,
columns: Optional[List[Name]] = None,
) -> "DataFrame":
"""
Construct DataFrame from dict of array-like or dicts.
Creates DataFrame object from dictionary by columns or by index
allowing dtype specification.
Parameters
----------
data : dict
Of the form {field : array-like} or {field : dict}.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict
should be the columns of the resulting DataFrame, pass 'columns'
(default). Otherwise if the keys should be rows, pass 'index'.
dtype : dtype, default None
Data type to force, otherwise infer.
columns : list, default None
Column labels to use when ``orient='index'``. Raises a ValueError
if used with ``orient='columns'``.
Returns
-------
DataFrame
See Also
--------
DataFrame.from_records : DataFrame from structured ndarray, sequence
of tuples or dicts, or DataFrame.
DataFrame : DataFrame object creation using constructor.
Examples
--------
By default the keys of the dict become the DataFrame columns:
>>> data = {'col_1': [3, 2, 1, 0], 'col_2': [10, 20, 30, 40]}
>>> ps.DataFrame.from_dict(data)
col_1 col_2
0 3 10
1 2 20
2 1 30
3 0 40
Specify ``orient='index'`` to create the DataFrame using dictionary
keys as rows:
>>> data = {'row_1': [3, 2, 1, 0], 'row_2': [10, 20, 30, 40]}
>>> ps.DataFrame.from_dict(data, orient='index').sort_index()
0 1 2 3
row_1 3 2 1 0
row_2 10 20 30 40
When using the 'index' orientation, the column names can be
specified manually:
>>> ps.DataFrame.from_dict(data, orient='index',
... columns=['A', 'B', 'C', 'D']).sort_index()
A B C D
row_1 3 2 1 0
row_2 10 20 30 40
"""
return DataFrame(
pd.DataFrame.from_dict(
data, orient=orient, dtype=dtype, columns=columns # type: ignore[arg-type]
)
)
# Override the `groupby` to specify the actual return type annotation.
def groupby(
self,
by: Union[Name, "Series", List[Union[Name, "Series"]]],
axis: Axis = 0,
as_index: bool = True,
dropna: bool = True,
) -> "DataFrameGroupBy":
return cast(
"DataFrameGroupBy", super().groupby(by=by, axis=axis, as_index=as_index, dropna=dropna)
)
groupby.__doc__ = Frame.groupby.__doc__
def _build_groupby(
self, by: List[Union["Series", Label]], as_index: bool, dropna: bool
) -> "DataFrameGroupBy":
from pyspark.pandas.groupby import DataFrameGroupBy
return DataFrameGroupBy._build(self, by, as_index=as_index, dropna=dropna)
def _to_internal_pandas(self) -> pd.DataFrame:
"""
Return a pandas DataFrame directly from _internal to avoid overhead of copy.
This method is for internal use only.
"""
return self._internal.to_pandas_frame
def _get_or_create_repr_pandas_cache(self, n: int) -> Union[pd.DataFrame, pd.Series]:
if not hasattr(self, "_repr_pandas_cache") or n not in self._repr_pandas_cache:
object.__setattr__(
self, "_repr_pandas_cache", {n: self.head(n + 1)._to_internal_pandas()}
)
return self._repr_pandas_cache[n]
def __repr__(self) -> str:
max_display_count = get_option("display.max_rows")
if max_display_count is None:
return self._to_internal_pandas().to_string()
pdf = cast("DataFrame", self._get_or_create_repr_pandas_cache(max_display_count))
pdf_length = len(pdf)
pdf = cast("DataFrame", pdf.iloc[:max_display_count])
if pdf_length > max_display_count:
repr_string = pdf.to_string(show_dimensions=True)
match = REPR_PATTERN.search(repr_string)
if match is not None:
nrows = match.group("rows")
ncols = match.group("columns")
footer = "\n\n[Showing only the first {nrows} rows x {ncols} columns]".format(
nrows=nrows, ncols=ncols
)
return REPR_PATTERN.sub(footer, repr_string)
return pdf.to_string()
def _repr_html_(self) -> str:
max_display_count = get_option("display.max_rows")
if max_display_count is None:
return self._to_internal_pandas().to_html(notebook=True)
pdf = self._get_or_create_repr_pandas_cache(max_display_count)
pdf_length = len(pdf)
pdf = pdf.iloc[:max_display_count]
if pdf_length > max_display_count:
repr_html = pdf.to_html(show_dimensions=True, notebook=True)
match = REPR_HTML_PATTERN.search(repr_html)
if match is not None:
nrows = match.group("rows")
ncols = match.group("columns")
by = chr(215)
footer = (
"\n<p>Showing only the first {rows} rows "
"{by} {cols} columns</p>\n</div>".format(rows=nrows, by=by, cols=ncols)
)
return REPR_HTML_PATTERN.sub(footer, repr_html)
return pdf.to_html(notebook=True)
def __getitem__(self, key: Any) -> Any:
from pyspark.pandas.series import Series
if key is None:
raise KeyError("none key")
elif isinstance(key, Series):
return self.loc[key.astype(bool)]
elif isinstance(key, slice):
if any(type(n) == int or None for n in [key.start, key.stop]):
# Seems like pandas Frame always uses int as positional search when slicing
# with ints.
return self.iloc[key]
return self.loc[key]
elif is_name_like_value(key):
return self.loc[:, key]
elif is_list_like(key):
return self.loc[:, list(key)]
raise NotImplementedError(key)
def __setitem__(self, key: Any, value: Any) -> None:
from pyspark.pandas.series import Series
if isinstance(value, (DataFrame, Series)) and not same_anchor(value, self):
# Different Series or DataFrames
level = self._internal.column_labels_level
key = DataFrame._index_normalized_label(level, key)
value = DataFrame._index_normalized_frame(level, value)
def assign_columns(
psdf: DataFrame, this_column_labels: List[Label], that_column_labels: List[Label]
) -> Iterator[Tuple["Series", Label]]:
assert len(key) == len(that_column_labels)
# Note that here intentionally uses `zip_longest` that combine
# that_columns.
for k, this_label, that_label in zip_longest(
key, this_column_labels, that_column_labels
):
yield (psdf._psser_for(that_label), tuple(["that", *k]))
if this_label is not None and this_label[1:] != k:
yield (psdf._psser_for(this_label), this_label)
psdf = align_diff_frames(assign_columns, self, value, fillna=False, how="left")
elif isinstance(value, list):
if len(self) != len(value):
raise ValueError("Length of values does not match length of index")
# TODO: avoid using default index?
with option_context(
"compute.default_index_type",
"distributed-sequence",
"compute.ops_on_diff_frames",
True,
):
psdf = self.reset_index()
psdf[key] = ps.DataFrame(value)
psdf = psdf.set_index(psdf.columns[: self._internal.index_level])
psdf.index.names = self.index.names
elif isinstance(key, list):
assert isinstance(value, DataFrame)
# Same DataFrames.
field_names = value.columns
psdf = self._assign({k: value[c] for k, c in zip(key, field_names)})
else:
# Same Series.
psdf = self._assign({key: value})
self._update_internal_frame(psdf._internal)
@staticmethod
def _index_normalized_label(level: int, labels: Union[Name, Sequence[Name]]) -> List[Label]:
"""
Returns a label that is normalized against the current column index level.
For example, the key "abc" can be ("abc", "", "") if the current Frame has
a multi-index for its column
"""
if is_name_like_tuple(labels):
labels = [labels]
elif is_name_like_value(labels):
labels = [(labels,)]
else:
labels = [k if is_name_like_tuple(k) else (k,) for k in labels]
if any(len(label) > level for label in labels):
raise KeyError(
"Key length ({}) exceeds index depth ({})".format(
max(len(label) for label in labels), level
)
)
return [tuple(list(label) + ([""] * (level - len(label)))) for label in labels]
@staticmethod
def _index_normalized_frame(level: int, psser_or_psdf: DataFrameOrSeries) -> "DataFrame":
"""
Returns a frame that is normalized against the current column index level.
For example, the name in `pd.Series([...], name="abc")` can be can be
("abc", "", "") if the current DataFrame has a multi-index for its column
"""
from pyspark.pandas.series import Series
if isinstance(psser_or_psdf, Series):
psdf = psser_or_psdf.to_frame()
else:
assert isinstance(psser_or_psdf, DataFrame), type(psser_or_psdf)
psdf = psser_or_psdf.copy()
psdf.columns = pd.MultiIndex.from_tuples(
[
tuple([name_like_string(label)] + ([""] * (level - 1)))
for label in psdf._internal.column_labels
],
)
return psdf
def __getattr__(self, key: str) -> Any:
if key.startswith("__"):
raise AttributeError(key)
if hasattr(_MissingPandasLikeDataFrame, key):
property_or_func = getattr(_MissingPandasLikeDataFrame, key)
if isinstance(property_or_func, property):
return property_or_func.fget(self)
else:
return partial(property_or_func, self)
try:
return self.loc[:, key]
except KeyError:
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__.__name__, key)
)
def __setattr__(self, key: str, value: Any) -> None:
try:
object.__getattribute__(self, key)
return object.__setattr__(self, key, value)
except AttributeError:
pass
if (key,) in self._internal.column_labels:
self[key] = value
else:
msg = "pandas-on-Spark doesn't allow columns to be created via a new attribute name"
if is_testing():
raise AssertionError(msg)
else:
warnings.warn(msg, UserWarning)
def __len__(self) -> int:
return self._internal.resolved_copy.spark_frame.count()
def __dir__(self) -> Iterable[str]:
fields = [
f for f in self._internal.resolved_copy.spark_frame.schema.fieldNames() if " " not in f
]
return list(super().__dir__()) + fields
def __iter__(self) -> Iterator[Name]:
return iter(self.columns)
# NDArray Compat
def __array_ufunc__(
self, ufunc: Callable, method: str, *inputs: Any, **kwargs: Any
) -> "DataFrame":
# TODO: is it possible to deduplicate it with '_map_series_op'?
if all(isinstance(inp, DataFrame) for inp in inputs) and any(
not same_anchor(inp, inputs[0]) for inp in inputs
):
# binary only
assert len(inputs) == 2
this = inputs[0]
that = inputs[1]
if this._internal.column_labels_level != that._internal.column_labels_level:
raise ValueError("cannot join with no overlapping index names")
# Different DataFrames
def apply_op(
psdf: DataFrame, this_column_labels: List[Label], that_column_labels: List[Label]
) -> Iterator[Tuple["Series", Label]]:
for this_label, that_label in zip(this_column_labels, that_column_labels):
yield (
ufunc(
psdf._psser_for(this_label), psdf._psser_for(that_label), **kwargs
).rename(this_label),
this_label,
)
return align_diff_frames(apply_op, this, that, fillna=True, how="full")
else:
# DataFrame and Series
applied = []
this = inputs[0]
assert all(inp is this for inp in inputs if isinstance(inp, DataFrame))
for label in this._internal.column_labels:
arguments = []
for inp in inputs:
arguments.append(inp[label] if isinstance(inp, DataFrame) else inp)
# both binary and unary.
applied.append(ufunc(*arguments, **kwargs).rename(label))
internal = this._internal.with_new_columns(applied)
return DataFrame(internal)
def __class_getitem__(cls, params: Any) -> object:
# This is a workaround to support variadic generic in DataFrame in Python 3.7.
# See https://github.com/python/typing/issues/193
# we always wraps the given type hints by a tuple to mimic the variadic generic.
return create_tuple_for_frame_type(params)
def _reduce_spark_multi(sdf: SparkDataFrame, aggs: List[Column]) -> Any:
"""
Performs a reduction on a spark DataFrame, the functions being known sql aggregate functions.
"""
assert isinstance(sdf, SparkDataFrame)
sdf0 = sdf.agg(*aggs)
lst = sdf0.limit(2).toPandas()
assert len(lst) == 1, (sdf, lst)
row = lst.iloc[0]
lst2 = list(row)
assert len(lst2) == len(aggs), (row, lst2)
return lst2
class CachedDataFrame(DataFrame):
"""
Cached pandas-on-Spark DataFrame, which corresponds to pandas DataFrame logically, but
internally it caches the corresponding Spark DataFrame.
"""
def __init__(self, internal: InternalFrame, storage_level: Optional[StorageLevel] = None):
if storage_level is None:
object.__setattr__(self, "_cached", internal.spark_frame.cache())
elif isinstance(storage_level, StorageLevel):
object.__setattr__(self, "_cached", internal.spark_frame.persist(storage_level))
else:
raise TypeError(
"Only a valid pyspark.StorageLevel type is acceptable for the `storage_level`"
)
super().__init__(internal)
def __enter__(self) -> "CachedDataFrame":
return self
def __exit__(
self,
exception_type: Optional[Type[BaseException]],
exception_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> Optional[bool]:
self.spark.unpersist()
return None
# create accessor for Spark related methods.
spark = CachedAccessor("spark", CachedSparkFrameMethods)
def _test() -> None:
import os
import doctest
import shutil
import sys
import tempfile
import uuid
from pyspark.sql import SparkSession
import pyspark.pandas.frame
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.frame.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]").appName("pyspark.pandas.frame tests").getOrCreate()
)
db_name = "db%s" % str(uuid.uuid4()).replace("-", "")
spark.sql("CREATE DATABASE %s" % db_name)
globs["db"] = db_name
path = tempfile.mkdtemp()
globs["path"] = path
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.frame,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
shutil.rmtree(path, ignore_errors=True)
spark.sql("DROP DATABASE IF EXISTS %s CASCADE" % db_name)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| 36.066055 | 100 | 0.519576 | [
"Apache-2.0"
] | Flyangz/spark | python/pyspark/pandas/frame.py | 460,380 | Python |
# -*- coding: utf-8 -*-
class C:
a = 'abc'
def __getattribute__(self, args):
print('__getattribute_ is called')
#import pdb; pdb.set_trace()
#return object.__getattribute__(self, args)
return super().__getattribute__(args)
def __getattr__(self, name):
print('__getattr()__ is called')
return name+ 'from __getattr__'
def __get__(self, instance, owner):
print('__get__() is called', instance,'||', owner, '||')
return self
def __set__(self, instance, value):
print('__set__() is called', instance, value)
def foo(self, x):
print('foo:',x)
def __call__(sef, *args, **kwargs):
print('__call()__ is called', args, kwargs)
class C2:
d = C()
| 26.72 | 58 | 0.670659 | [
"MIT"
] | asuraswrath/pysample | app/getattrtest.py | 668 | Python |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Module for common display functions."""
from datetime import datetime
from typing import Any, Union, Set, Dict, Tuple
import pandas as pd
from bokeh.io import output_notebook, show
from bokeh.models import (
ColumnDataSource,
DatetimeTickFormatter,
HoverTool,
Label,
Legend,
RangeTool,
Title,
)
# pylint: disable=no-name-in-module
from bokeh.palettes import viridis
# pylint: enable=no-name-in-module
from bokeh.plotting import figure, reset_output
from bokeh.layouts import column
from .._version import VERSION
from .utility import export
__version__ = VERSION
__author__ = "Ian Hellen"
# Constants
_WRAP = 50
_WRAP_CMDL = "WrapCmdl"
@export
def display_timeline(
data: Union[pd.DataFrame, dict],
time_column: str = "TimeGenerated",
source_columns: list = None,
**kwargs,
) -> figure:
"""
Display a timeline of events.
Parameters
----------
data : Union[dict, pd.DataFrame]
Either
dict of data sets to plot on the timeline with the following structure::
Key (str) - Name of data set to be displayed in legend
Value (Dict[str, Any]) - containing:
data (pd.DataFrame) - Data to plot
time_column (str, optional) - Name of the timestamp column
source_columns (list[str], optional) - source columns to use
in tooltips
color (str, optional) - color of datapoints for this data
If any of the last values are omitted, they default to the values
supplied as parameters to the function (see below)
Or
DataFrame as a single data set or grouped into individual
plot series using the `group_by` parameter
time_column : str, optional
Name of the timestamp column
(the default is 'TimeGenerated')
source_columns : list, optional
List of default source columns to use in tooltips
(the default is None)
Other Parameters
----------------
title : str, optional
Title to display (the default is None)
alert : SecurityAlert, optional
Add a reference line/label using the alert time (the default is None)
ref_event : Any, optional
Add a reference line/label using the alert time (the default is None)
ref_time : datetime, optional
Add a reference line/label using `ref_time` (the default is None)
group_by : str
(where `data` is a DataFrame)
The column to group timelines on
legend: str, optional
"left", "right", "inline" or "none"
(the default is to show a legend when plotting multiple series
and not to show one when plotting a single series)
yaxis : bool, optional
Whether to show the yaxis and labels (default is False)
ygrid : bool, optional
Whether to show the yaxis grid (default is False)
xgrid : bool, optional
Whether to show the xaxis grid (default is True)
range_tool : bool, optional
Show the the range slider tool (default is True)
height : int, optional
The height of the plot figure
(the default is auto-calculated height)
width : int, optional
The width of the plot figure (the default is 900)
color : str
Default series color (default is "navy")
overlay_color : str
Overlay series color (default is "green")
Returns
-------
figure
The bokeh plot figure.
"""
# Get args
overlay_data: pd.DataFrame = kwargs.pop("overlay_data", None)
overlay_columns: list = kwargs.pop("overlay_columns", source_columns)
color: str = kwargs.get("color", "navy") # don't remove this from kwargs
overlay_color: str = kwargs.pop("overlay_color", "green")
kwargs_sub = kwargs.copy()
kwargs_sub["time_column"] = time_column
kwargs_sub["source_columns"] = source_columns
kwargs_sub["ref_time"], kwargs_sub["ref_label"] = _get_ref_event_time(**kwargs)
if isinstance(data, pd.DataFrame):
if overlay_data is not None:
aggr_data = {
"Primary": {
"data": data,
"time_column": time_column,
"source_columns": source_columns,
"color": color,
},
"Secondary": {
"data": overlay_data,
"time_column": time_column,
"source_columns": overlay_columns,
"color": overlay_color,
},
}
return _display_timeline_dict(data=aggr_data, **kwargs_sub)
# Create a dictionary from a grouped or simple series
series_dict = _create_dict_from_grouping(
data=data,
source_columns=source_columns,
time_column=time_column,
group_by=kwargs.get("group_by", None),
color=kwargs.get("color", "navy"),
)
return _display_timeline_dict(data=series_dict, **kwargs_sub)
if isinstance(data, dict):
return _display_timeline_dict(data, **kwargs_sub)
return None
# pylint: disable=invalid-name, too-many-locals, too-many-statements, too-many-branches
@export # noqa: C901, MC0001
def display_timeline_values(
data: pd.DataFrame,
y: str,
time_column: str = "TimeGenerated",
source_columns: list = None,
**kwargs,
) -> figure:
"""
Display a timeline of events.
Parameters
----------
data : pd.DataFrame
DataFrame as a single data set or grouped into individual
plot series using the `group_by` parameter
time_column : str, optional
Name of the timestamp column
(the default is 'TimeGenerated')
y : str
The column name holding the value to plot vertically
source_columns : list, optional
List of default source columns to use in tooltips
(the default is None)
Other Parameters
----------------
x : str, optional
alias of `time_column`
title : str, optional
Title to display (the default is None)
ref_event : Any, optional
Add a reference line/label using the alert time (the default is None)
ref_time : datetime, optional
Add a reference line/label using `ref_time` (the default is None)
group_by : str
(where `data` is a DataFrame)
The column to group timelines on
legend_column : str, optional
(where `data` is a DataFrame)
Name of the column used to generate the legend labels if a legend is
to be displayed. Default is `group_by` parameter.
yaxis : bool, optional
Whether to show the yaxis and labels
range_tool : bool, optional
Show the the range slider tool (default is True)
height : int, optional
The height of the plot figure
(the default is auto-calculated height)
width : int, optional
The width of the plot figure (the default is 900)
color : str
Default series color (default is "navy"). This is overridden by
automatic color assignments if plotting a grouped chart
kind : Union[str, List[str]]
one or more glyph types to plot., optional
Supported types are "circle", "line" and "vbar" (default is "vbar")
Returns
-------
figure
The bokeh plot figure.
"""
reset_output()
output_notebook()
height: int = kwargs.pop("height", None)
width: int = kwargs.pop("width", 900)
title: str = kwargs.pop("title", None)
time_column = kwargs.get("x", time_column)
group_by: str = kwargs.get("group_by", None)
show_yaxis: bool = kwargs.pop("yaxis", True)
show_range: bool = kwargs.pop("range_tool", True)
color: str = kwargs.get("color", "navy")
legend_pos: str = kwargs.pop("legend", None)
kind: Any = kwargs.pop("kind", ["vbar"])
plot_kinds = kind if isinstance(kind, list) else [kind]
ref_time, ref_label = _get_ref_event_time(**kwargs)
graph_df, group_count_df, tool_tip_columns, series_count = _create_data_grouping(
data, source_columns, time_column, group_by, color
)
# build the tool tips from columns (excluding these)
tool_tip_items = [(f"{col}", f"@{col}") for col in tool_tip_columns]
hover = HoverTool(tooltips=tool_tip_items, formatters={"Tooltip": "printf"})
# Create the Plot figure
title = title if title else "Timeline"
min_time = graph_df[time_column].min()
max_time = graph_df[time_column].max()
start_range = min_time - ((max_time - min_time) * 0.1)
end_range = max_time + ((max_time - min_time) * 0.1)
height = height if height else _calc_auto_plot_height(series_count)
plot = figure(
x_range=(start_range, end_range),
min_border_left=50,
plot_height=height,
plot_width=width,
x_axis_label="Event Time",
x_axis_type="datetime",
x_minor_ticks=10,
y_axis_label=y,
tools=[hover, "xwheel_zoom", "box_zoom", "reset", "save", "xpan"],
toolbar_location="above",
title=title,
)
plot.yaxis.visible = show_yaxis
plot.ygrid.minor_grid_line_color = "navy"
plot.ygrid.minor_grid_line_alpha = 0.1
plot.ygrid.grid_line_color = "navy"
plot.ygrid.grid_line_alpha = 0.3
plot.xgrid.minor_grid_line_color = "navy"
plot.xgrid.minor_grid_line_alpha = 0.1
plot.xgrid.grid_line_color = "navy"
plot.xgrid.grid_line_alpha = 0.3
# set the tick datetime formatter
plot.xaxis[0].formatter = _get_tick_formatter()
# plot groups individually so that we can create an interactive legend
if group_by:
legend_items = []
for _, group_id in group_count_df[group_by].items():
first_group_item = graph_df[graph_df[group_by] == group_id].iloc[0]
legend_label = str(first_group_item[group_by])
inline_legend = str(group_id) if legend_pos == "inline" else None
group_color = first_group_item["color"]
row_source = ColumnDataSource(graph_df[graph_df[group_by] == group_id])
p_series = []
# create default plot args
plot_args: Dict[str, Any] = dict(
x=time_column,
alpha=0.7,
source=row_source,
legend_label=str(inline_legend),
)
if "vbar" in plot_kinds:
p_series.append(plot.vbar(top=y, width=4, color="color", **plot_args))
if "circle" in plot_kinds:
p_series.append(plot.circle(y=y, size=4, color="color", **plot_args))
if "line" in plot_kinds:
p_series.append(
plot.line(y=y, line_width=1, line_color=group_color, **plot_args)
)
if not inline_legend:
legend_items.append((legend_label, p_series))
if legend_pos == "inline":
# Position the inline legend
plot.legend.location = "top_left"
plot.legend.click_policy = "hide"
elif legend_pos in ["left", "right"]:
# Create the legend box outside of the plot area
ext_legend = Legend(
items=legend_items,
location="center",
click_policy="hide",
label_text_font_size="8pt",
)
plot.add_layout(ext_legend, legend_pos)
else:
plot_args = dict(
x=time_column, color="color", alpha=0.7, source=ColumnDataSource(graph_df)
)
if "vbar" in plot_kinds:
plot.vbar(top=y, width=4, **plot_args)
if "circle" in plot_kinds:
plot.circle(y=y, size=4, **plot_args)
if "line" in plot_kinds:
plot.line(y=y, line_width=4, **plot_args)
# if we have a reference, plot the time as a line
if ref_time is not None:
_add_ref_line(plot, ref_time, ref_label, series_count)
if show_range:
rng_select = _create_range_tool(
data=graph_df,
min_time=min_time,
max_time=max_time,
plot_range=plot.x_range,
width=width,
height=height,
time_column=time_column,
)
show(column(plot, rng_select))
else:
show(plot)
return plot
# pylint: enable=invalid-name,too-many-locals, too-many-statements, too-many-branches
# pylint: disable=too-many-locals, too-many-statements, too-many-branches
def _display_timeline_dict(data: dict, **kwargs) -> figure: # noqa: C901, MC0001
"""
Display a timeline of events.
Parameters
----------
data : dict
Data points to plot on the timeline.
Need to contain:
Key - Name of data type to be displayed in legend
Value - dict of data containing:
data : pd.DataFrame
Data to plot
time_column : str
Name of the timestamp column
source_columns : list
List of source columns to use in tooltips
color: str
Color of datapoints for this data
Other Parameters
----------------
ref_time : datetime, optional
Input reference line to display (the default is None)
title : str, optional
Title to display (the default is None)
time_column : str, optional
Name of the timestamp column
(the default is 'TimeGenerated')
legend: str, optional
Where to position the legend
None, left, right or inline (default is None)
yaxis : bool, optional
Whether to show the yaxis and labels
range_tool : bool, optional
Show the the range slider tool (default is True)
source_columns : list, optional
List of default source columns to use in tooltips
(the default is None)
height : int, optional
The height of the plot figure
(the default is auto-calculated height)
width : int, optional
The width of the plot figure (the default is 900)
Returns
-------
figure
The bokeh plot figure.
"""
reset_output()
output_notebook()
height: int = kwargs.pop("height", None)
width: int = kwargs.pop("width", 900)
ref_time: Any = kwargs.pop("ref_time", None)
ref_label: str = kwargs.pop("ref_label", None)
title: str = kwargs.pop("title", None)
legend_pos: str = kwargs.pop("legend", None)
show_yaxis: bool = kwargs.pop("yaxis", False)
show_range: bool = kwargs.pop("range_tool", True)
xgrid: bool = kwargs.pop("xgrid", True)
ygrid: bool = kwargs.pop("ygrid", False)
tool_tip_columns, min_time, max_time = _unpack_data_series_dict(data, **kwargs)
series_count = len(data)
# build the tool tips from all specified columns
tool_tip_items = [(f"{col}", f"@{col}") for col in tool_tip_columns]
hover = HoverTool(tooltips=tool_tip_items, formatters={"Tooltip": "printf"})
title = f"Timeline: {title}" if title else "Event Timeline"
start_range = min_time - ((max_time - min_time) * 0.1)
end_range = max_time + ((max_time - min_time) * 0.1)
height = height if height else _calc_auto_plot_height(len(data))
y_range = ((-1 / series_count), series_count - 1 + (1 / series_count))
plot = figure(
x_range=(start_range, end_range),
y_range=y_range,
min_border_left=50,
plot_height=height,
plot_width=width,
x_axis_label="Event Time",
x_axis_type="datetime",
x_minor_ticks=10,
tools=[hover, "xwheel_zoom", "box_zoom", "reset", "save", "xpan"],
title=title,
)
plot.yaxis.visible = show_yaxis
if show_yaxis:
if data:
y_labels = {ser_def["y_index"]: str(lbl) for lbl, ser_def in data.items()}
plot.yaxis.major_label_overrides = y_labels
if ygrid:
plot.ygrid.minor_grid_line_color = "navy"
plot.ygrid.minor_grid_line_alpha = 0.1
plot.ygrid.grid_line_color = "navy"
plot.ygrid.grid_line_alpha = 0.3
else:
plot.ygrid.grid_line_color = None
if xgrid:
plot.xgrid.minor_grid_line_color = "navy"
plot.xgrid.minor_grid_line_alpha = 0.3
else:
plot.xgrid.grid_line_color = None
# Create plot bar to act as as range selector
rng_select = _create_range_tool(
data=data,
min_time=min_time,
max_time=max_time,
plot_range=plot.x_range,
width=width,
height=height,
)
# set the tick datetime formatter
plot.xaxis[0].formatter = _get_tick_formatter()
if series_count > 1 and not legend_pos:
legend_pos = "left"
# plot groups individually so that we can create an interactive legend
# if legend_pos is "inline", we add add the normal legend inside the plot
# if legend_pos is "left" or "right", we add the legend to the side
legend_items = []
for ser_name, series_def in data.items():
if legend_pos == "inline":
p_series = plot.diamond(
x=series_def["time_column"],
y="y_index",
color=series_def["color"],
alpha=0.5,
size=10,
source=series_def["source"],
legend_label=str(ser_name),
)
else:
p_series = plot.diamond(
x=series_def["time_column"],
y="y_index",
color=series_def["color"],
alpha=0.5,
size=10,
source=series_def["source"],
)
if legend_pos in ["left", "right"]:
legend_items.append((str(ser_name), [p_series]))
if legend_pos == "inline":
# Position the inline legend
plot.legend.location = "center_left"
plot.legend.click_policy = "hide"
elif legend_pos in ["left", "right"]:
# Create the legend box outside of the plot area
ext_legend = Legend(
items=legend_items,
location="center",
click_policy="hide",
label_text_font_size="8pt",
)
plot.add_layout(ext_legend, legend_pos)
if ref_time is not None:
_add_ref_line(plot, ref_time, ref_label, len(data))
if show_range:
show(column(plot, rng_select))
else:
show(plot)
return plot
# pylint: enable=too-many-locals, too-many-statements, too-many-branches
# pylint: disable=too-many-locals
def _unpack_data_series_dict(data, **kwargs):
time_column: str = kwargs.pop("time_column", "TimeGenerated")
source_columns: list = kwargs.pop("source_columns", None)
if not source_columns:
source_columns = ["NewProcessName", "EventID", "CommandLine"]
def_source_columns = set(source_columns) if source_columns else set()
# Process the input dictionary
# Take each item that is passed and fill in blanks and add a y_index
tool_tip_columns: Set[str] = set()
min_time = pd.Timestamp(pd.Timestamp.max)
max_time = pd.Timestamp(pd.Timestamp.min)
y_index = 0
# Create a color map in case colors have not been specified
# (Shift the Viridis palatte so we lose the top, harder-to-see colors)
series_count = len(data)
colors, palette_size = _get_color_palette(series_count)
for series_def in data.values():
data_columns: Set[str] = set()
series_data = series_def["data"]
# if the series has source columns, use those
src_cols = series_def.get("source_columns", def_source_columns)
data_columns.update(src_cols if src_cols else def_source_columns)
# add these columns to the tool tip column set
tool_tip_columns.update(data_columns)
time_col = series_def.get("time_column", None)
if not time_col:
time_col = time_column
series_def["time_column"] = time_col
min_time = min(min_time, series_data[time_col].min())
max_time = max(max_time, series_data[time_col].max())
data_columns.update([time_col])
# Create the Column data source to plot
graph_df = series_data[list(data_columns)].copy()
graph_df["y_index"] = y_index
series_def["y_index"] = y_index
ser_color = series_def.get("color", None)
if not ser_color:
ser_color = colors[y_index % palette_size]
series_def["color"] = ser_color
# Wrap tooltip lines longer than 50 chars
_wrap_df_columns(graph_df, 50)
series_def["source"] = ColumnDataSource(graph_df)
y_index += 1
return tool_tip_columns, min_time, max_time
# pylint: enable=too-many-locals
def _create_data_grouping(data, source_columns, time_column, group_by, color):
if not source_columns:
data_columns = set(["NewProcessName", "EventID", "CommandLine"])
else:
data_columns = set(source_columns)
tool_tip_columns = data_columns.copy()
# If the time column not explicity specified in source_columns, add it
data_columns.add(time_column)
# create group frame so that we can color each group separately
if group_by:
group_count_df = (
data[[group_by, time_column]]
.groupby(group_by)
.count()
.reset_index()
.rename(columns={time_column: "count"})
)
group_count_df["y_index"] = group_count_df.index
# Shift the Viridis palatte so we lose the top, harder-to-see colors
series_count = len(group_count_df)
colors, palette_size = _get_color_palette(series_count)
group_count_df["color"] = group_count_df.apply(
lambda x: colors[x.y_index % palette_size], axis=1
)
# re-join with the original data
data_columns.update([group_by, "y_index", "color"])
clean_data = data.drop(columns=["y_index", "color"], errors="ignore")
graph_df = clean_data.merge(group_count_df, on=group_by)[list(data_columns)]
else:
graph_df = data[list(data_columns)].copy()
graph_df["color"] = color
graph_df["y_index"] = 1
series_count = 1
group_count_df = None
return graph_df, group_count_df, tool_tip_columns, series_count
# pylint: enable=too-many-arguments
def _create_dict_from_grouping(data, source_columns, time_column, group_by, color):
if not source_columns:
data_columns = set(["NewProcessName", "EventID", "CommandLine"])
else:
data_columns = set(source_columns)
# If the time column not explicitly specified in source_columns, add it
data_columns.add(time_column)
series_dict: Dict[str, Dict] = {}
# create group frame so that we can color each group separately
if group_by:
data_columns.add(group_by)
grouped_data = data[list(data_columns)].groupby(group_by)
series_count = len(grouped_data)
colors, palette_size = _get_color_palette(series_count)
color_index = 0
for group_name, group_df in grouped_data:
series_dict[str(group_name)] = dict(
data=group_df,
time_column=time_column,
source_columns=source_columns,
color=colors[color_index % palette_size],
)
color_index += 1
else:
group_df = data[list(data_columns)].copy()
series_dict["unnamed series"] = dict(
data=group_df,
time_column=time_column,
source_columns=source_columns,
color=color,
)
return series_dict
def _get_ref_event_time(**kwargs) -> Tuple[datetime, str]:
"""Extract the reference time from kwargs."""
ref_alert = kwargs.get("alert", None)
if ref_alert is not None:
ref_event = ref_alert
ref_label = "Alert time"
else:
ref_event = kwargs.get("ref_event", None)
ref_label = "Event time"
if ref_event is not None:
ref_time = getattr(ref_event, "StartTimeUtc", None)
if not ref_time:
ref_time = getattr(ref_event, "TimeGenerated", None)
else:
ref_time = kwargs.get("ref_time", None)
ref_label = "Ref time"
return ref_time, kwargs.get("ref_label", ref_label)
def _get_color_palette(series_count):
palette_size = min(256, series_count + int(series_count / 5))
return viridis(palette_size), palette_size
def _plot_dict_series(data, plot, legend_pos):
"""Plot series from dict."""
# If legend_pos is outside the graph we need to create the legend
# seperately.
# We plot groups individually so that we can create an interactive legend.
legend_items = []
for ser_name, series_def in data.items():
if legend_pos == "inline":
p_series = plot.diamond(
x=series_def["time_column"],
y="y_index",
color=series_def["color"],
alpha=0.5,
size=10,
source=series_def["source"],
legend_label=str(ser_name),
)
else:
p_series = plot.diamond(
x=series_def["time_column"],
y="y_index",
color=series_def["color"],
alpha=0.5,
size=10,
source=series_def["source"],
)
if legend_pos in ["left", "right"]:
legend_items.append((ser_name, [p_series]))
if legend_pos == "inline":
# Position the inline legend
plot.legend.location = "top_left"
plot.legend.click_policy = "hide"
elif legend_pos in ["left", "right"]:
# Create the legend box outside of the plot area
ext_legend = Legend(
items=legend_items,
location="center",
click_policy="hide",
label_text_font_size="8pt",
)
plot.add_layout(ext_legend, legend_pos)
def _wrap_df_columns(data: pd.DataFrame, wrap_len: int = 50):
"""Wrap any string columns."""
if not data.empty:
for col in data.columns:
if isinstance(data[col].iloc[0], str):
data[col] = data[col].str.wrap(wrap_len)
def _get_tick_formatter() -> DatetimeTickFormatter:
"""Return tick formatting for different zoom levels."""
# '%H:%M:%S.%3Nms
tick_format = DatetimeTickFormatter()
tick_format.days = ["%m-%d %H:%M"]
tick_format.hours = ["%H:%M:%S"]
tick_format.minutes = ["%H:%M:%S"]
tick_format.seconds = ["%H:%M:%S"]
tick_format.milliseconds = ["%H:%M:%S.%3N"]
return tick_format
def _calc_auto_plot_height(group_count):
"""Dynamic calculation of plot height."""
ht_per_row = 40
if group_count > 15:
ht_per_row = 25
return max(ht_per_row * group_count, 300)
# pylint: disable=too-many-arguments
def _create_range_tool(
data, min_time, max_time, plot_range, width, height, time_column: str = None
):
"""Create plot bar to act as as range selector."""
ext_min = min_time - ((max_time - min_time) * 0.15)
ext_max = max_time + ((max_time - min_time) * 0.15)
plot_height = max(120, int(height * 0.20))
rng_select = figure(
x_range=(ext_min, ext_max),
title="Range Selector",
plot_height=plot_height,
plot_width=width,
x_axis_type="datetime",
y_axis_type=None,
tools="",
toolbar_location=None,
)
help_str = (
"Drag the middle or edges of the selection box to change "
+ "the range in the main chart"
)
rng_select.add_layout(
Title(text=help_str, align="right", text_font_size="10px"), "below"
)
rng_select.xaxis[0].formatter = _get_tick_formatter()
if isinstance(data, dict):
for _, series_def in data.items():
rng_select.circle(
x=series_def["time_column"],
y="y_index",
color=series_def["color"],
source=series_def["source"],
)
elif isinstance(data, pd.DataFrame):
rng_select.circle(
x=time_column, y="y_index", color="blue", source=ColumnDataSource(data)
)
range_tool = RangeTool(x_range=plot_range)
range_tool.overlay.fill_color = "navy"
range_tool.overlay.fill_alpha = 0.2
rng_select.ygrid.grid_line_color = None
rng_select.add_tools(range_tool)
rng_select.toolbar.active_multi = range_tool
return rng_select
# pylint: enable=too-many-arguments
def _add_ref_line(plot, ref_time, ref_text="Ref time", series_count=1):
"""Add a reference marker line and label at `ref_time`."""
ref_label_tm = pd.Timestamp(ref_time)
plot.line(x=[ref_label_tm, ref_label_tm], y=[0, series_count])
ref_label = Label(
x=ref_label_tm,
y=0,
y_offset=10,
x_units="data",
y_units="data",
text=f"< {ref_text}",
text_font_size="8pt",
render_mode="css",
border_line_color="red",
border_line_alpha=1.0,
background_fill_color="white",
background_fill_alpha=0.5,
)
plot.add_layout(ref_label)
| 34.958531 | 87 | 0.616065 | [
"MIT"
] | Dqirvin/msticpy | msticpy/nbtools/timeline.py | 29,505 | Python |
img_size = 84
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
test_pipeline = [
dict(type='LoadImageFromBytes'),
dict(type='Resize', size=(int(img_size * 1.15), -1)),
dict(type='CenterCrop', crop_size=img_size),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img', 'gt_label'])
]
num_ways = 5
num_shots = 1
num_queries = 15
num_val_episodes = 100
num_test_episodes = 2000
data = dict(
val=dict(
type='MetaTestDataset',
num_episodes=num_val_episodes,
num_ways=num_ways,
num_shots=num_shots,
num_queries=num_queries,
dataset=dict(
type='TieredImageNetDataset',
subset='val',
data_prefix='data/tiered_imagenet',
pipeline=test_pipeline),
meta_test_cfg=dict(
num_episodes=num_val_episodes,
num_ways=num_ways,
# whether to cache features in fixed-backbone methods for
# testing acceleration.
fast_test=False,
test_set=dict(batch_size=16, num_workers=2),
# worker initialization is a time consuming operation
support=dict(batch_size=num_ways * num_shots, num_workers=0),
query=dict(batch_size=num_ways * num_queries, num_workers=0))),
test=dict(
type='MetaTestDataset',
num_episodes=num_test_episodes,
num_ways=num_ways,
num_shots=num_shots,
num_queries=num_queries,
# seed for generating meta test episodes
episodes_seed=0,
dataset=dict(
type='TieredImageNetDataset',
subset='test',
data_prefix='data/tiered_imagenet',
pipeline=test_pipeline),
meta_test_cfg=dict(
num_episodes=num_test_episodes,
num_ways=num_ways,
# whether to cache features in fixed-backbone methods for
# testing acceleration.
fast_test=False,
test_set=dict(batch_size=16, num_workers=2),
# worker initialization for each task is a time consuming operation
support=dict(batch_size=num_ways * num_shots, num_workers=0),
query=dict(batch_size=num_ways * num_queries, num_workers=0))))
| 36.484375 | 79 | 0.627409 | [
"Apache-2.0"
] | BIGWangYuDong/mmfewshot | configs/classification/_base_/meta_test/tiered-imagenet_meta-test_5way-1shot.py | 2,335 | Python |
import logging
import os
import platform
import subprocess
from xml.dom import minidom
from xml.etree import ElementTree
_RESOURCE_DIR = '../resources'
_INKSCAPE = None
_FFMPEG = None
_BASENAME = 'napkin'
_NAMESPACE = 'nap::qt'
def findAppInWindows(appexe):
try:
import winreg
except ImportError:
import _winreg as winreg
handle = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
r"SOFTWARE\Microsoft\Windows\CurrentVersion\App Paths\%s" % appexe)
num_values = winreg.QueryInfoKey(handle)[1]
for i in range(num_values):
print(winreg.EnumValue(handle, i))
def resourcePath(suffix=''):
return os.path.realpath('%s/%s/%s' %
(os.path.dirname(__file__),
_RESOURCE_DIR,
suffix))
def walkDir(d):
for dirName, subdirList, fileList in os.walk(d):
for fname in fileList:
yield '%s/%s' % (dirName, fname)
def searchInFolders(folders, filename):
for d in folders:
for f in walkDir(d):
if os.path.basename(f).lower() == filename:
return f
def getInkscapeExe():
global _INKSCAPE
if _INKSCAPE is not None:
return _INKSCAPE
if platform.system() != 'Windows':
return 'inkscape'
_INKSCAPE = searchInFolders([
'c:/program files/inkscape',
'c:/program files',
'c:/',
], 'inkscape.exe')
return _INKSCAPE
def getFFMpegExe():
global _FFMPEG
if _FFMPEG is not None:
return _FFMPEG
if platform.system() != 'Windows':
return _FFMPEG
# print(os.path.realpath())
_FFMPEG = searchInFolders({
'%s/../../../thirdparty/ffmpeg/bin' % os.path.dirname(__file__)
}, 'ffmpeg.exe')
return _FFMPEG
def convertSVGtoPNG(size=128):
inkscape = getInkscapeExe()
if not inkscape:
logging.root.warning('Inkscape not found :(')
return
dir = resourcePath('icons')
for f in os.listdir(dir):
if not f.endswith('.svg'):
continue
infilename = '%s/%s' % (dir, f)
outfilename = '%s/%s.png' % (dir, os.path.splitext(f)[0])
# only update when necessary
if os.path.exists(outfilename) and os.path.getmtime(infilename) < os.path.getmtime(outfilename):
print('File was up to date: %s' % outfilename)
continue
print('Writing: %s' % outfilename)
cmd = [
inkscape,
'-z',
'-e', outfilename,
'-w', size,
'-h', size,
infilename
]
cmd = [str(c) for c in cmd]
subprocess.Popen(cmd).communicate()
def __resourceFiles():
dirs = ('icons', 'fonts')
filetypes = ('ttf', 'png')
for d in dirs:
for f in sorted(walkDir(resourcePath(d))):
ext = os.path.splitext(f)[1][1:]
if ext in filetypes:
yield f
def generateQRC():
outfile = resourcePath('%s-resources.qrc' % _BASENAME)
print('Dumping %s' % outfile)
et = ElementTree
xml = et.Element('RCC')
xresource = et.Element('qresource')
xml.append(xresource)
resdir = resourcePath()
for f in __resourceFiles():
x = et.Element('file')
p = os.path.relpath(f, resdir).replace(os.sep, '/')
x.text = p
xresource.append(x)
xmlstring = et.tostring(xml)
xmldom = minidom.parseString(xmlstring)
xmlpretty = xmldom.toprettyxml()
with open(outfile, 'w') as fp:
fp.write(xmlpretty)
def generateHeader():
outfile = resourcePath('../src/%s-resources.h' % _BASENAME)
with open(outfile, 'w') as fp:
fp.write(
'#pragma once\n\n'
'#include <QString>\n\n'
'namespace %s\n{\n' % _NAMESPACE
)
resdir = resourcePath()
for f in __resourceFiles():
p = os.path.relpath(f, resdir)
p = p.replace('\\', '/')
name = p.replace('/', '_').upper()
name = name.replace('-', '_')
name = os.path.splitext(name)[0]
name = name.ljust(30, ' ')
fp.write('\tstatic const QString QRC_%s = ":/%s";\n' % (name, p))
fp.write(
'} // namespace %s\n\n' % _NAMESPACE
)
def generateICO():
ffmpeg = getFFMpegExe()
if not ffmpeg:
logging.root.warning('FFMPEG not found :(')
return
f = '%s-logo.png' % _BASENAME
folder = resourcePath('icons')
infilename = '%s/%s' % (folder, f)
outfilename = '%s/%s.ico' % (folder, os.path.splitext(f)[0])
print('Writing: %s' % outfilename)
cmd = [
ffmpeg,
'-loglevel', 'panic',
'-y',
'-i', infilename,
outfilename,
]
cmd = [str(c) for c in cmd]
subprocess.Popen(cmd).communicate()
if __name__ == '__main__':
convertSVGtoPNG()
generateQRC()
generateHeader()
if os.name == 'nt': # Windows
generateICO()
| 25.292929 | 104 | 0.55611 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | AartOdding/nap | tools/napkin/tools/prepareresources.py | 5,008 | Python |
#!/usr/bin/env python3
# Copyright (c) 2012-2021 The PIVX developers
# Copyright (c) 2020-2021 The PENGOLINCOIN developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Simple test checking chain movement after v5 enforcement."""
from test_framework.test_framework import PengolinCoinTestFramework
from test_framework.util import assert_equal
class MiningV5UpgradeTest(PengolinCoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [[]]
self.setup_clean_chain = True
def run_test(self):
assert_equal(self.nodes[0].getblockchaininfo()['upgrades']['v5 shield']['status'], 'pending')
self.nodes[0].generate(300) # v5 activation height
assert_equal(self.nodes[0].getblockchaininfo()['upgrades']['v5 shield']['status'], 'active')
self.nodes[0].generate(25) # 25 more to check chain movement
assert_equal(self.nodes[0].getblockchaininfo()['upgrades']['v5 shield']['status'], 'active')
assert_equal(self.nodes[0].getblockcount(), 325)
if __name__ == '__main__':
MiningV5UpgradeTest().main()
| 39.933333 | 101 | 0.717863 | [
"MIT"
] | pengolincoin/PengolinCoin-Core | test/functional/mining_v5_upgrade.py | 1,198 | Python |
import uuid
from django.db import models
from django.utils.translation import gettext_lazy
from modelcluster.fields import ParentalKey
from wagtail.admin.edit_handlers import (
FieldPanel,
InlinePanel,
ObjectList,
PageChooserPanel,
StreamFieldPanel,
TabbedInterface,
)
from wagtail.core import blocks
from wagtail.core.fields import RichTextField, StreamField
from wagtail.core.models import Orderable, Page, TranslatableMixin
from wagtail.documents.blocks import DocumentChooserBlock
from wagtail.embeds.blocks import EmbedBlock
from wagtail.images.blocks import ImageChooserBlock
from wagtail.snippets.blocks import SnippetChooserBlock
from wagtail.snippets.models import register_snippet
from wagtail_localize.components import register_translation_component
from wagtail_localize.fields import SynchronizedField, TranslatableField
from wagtail_localize.models import TranslationSource
from wagtail_localize.segments import StringSegmentValue
# Telepath added in Wagtail 2.13
try:
from wagtail.core import telepath
except ImportError:
telepath = False
@register_snippet
class TestSnippet(TranslatableMixin, models.Model):
field = models.TextField(gettext_lazy("field"))
# To test field level validation of snippets
small_charfield = models.CharField(max_length=10, blank=True)
translatable_fields = [
TranslatableField("field"),
TranslatableField("small_charfield"),
]
class TestUUIDModel(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
charfield = models.CharField(max_length=10, blank=True)
@register_snippet
class TestUUIDSnippet(TranslatableMixin, models.Model):
field = models.ForeignKey(TestUUIDModel, on_delete=models.CASCADE)
translatable_fields = [SynchronizedField("field")]
@register_snippet
class NonTranslatableSnippet(models.Model):
field = models.TextField()
class TestStructBlock(blocks.StructBlock):
field_a = blocks.TextBlock()
field_b = blocks.TextBlock()
class TestChooserStructBlock(blocks.StructBlock):
page = blocks.PageChooserBlock()
class TestNestedStreamBlock(blocks.StreamBlock):
block_a = blocks.TextBlock()
block_b = blocks.TextBlock()
block_l = blocks.ListBlock(blocks.CharBlock())
chooser = blocks.PageChooserBlock()
chooser_in_struct = TestChooserStructBlock()
chooser_in_list = blocks.ListBlock(blocks.PageChooserBlock())
class TestNestedChooserStructBlock(blocks.StructBlock):
nested_page = TestChooserStructBlock()
class TestStreamBlockInStructBlock(blocks.StructBlock):
nested_stream = blocks.StreamBlock(
[("page", blocks.PageChooserBlock()), ("checklist", TestChooserStructBlock())]
)
class CustomStructBlock(blocks.StructBlock):
field_a = blocks.TextBlock()
field_b = blocks.TextBlock()
def get_translatable_segments(self, value):
return [
StringSegmentValue(
"foo", "{} / {}".format(value["field_a"], value["field_b"])
)
]
def restore_translated_segments(self, value, segments):
for segment in segments:
if segment.path == "foo":
field_a, field_b = segment.render_text().split("/")
value["field_a"] = field_a.strip()
value["field_b"] = field_b.strip()
return value
class CustomBlockWithoutExtractMethod(blocks.Block):
def render_form(self, *args, **kwargs):
"""Placeholder for Wagtail < 2.13"""
return ""
class Meta:
default = None
class ListStructBlock(blocks.StructBlock):
title = blocks.CharBlock(required=False)
items = blocks.ListBlock(blocks.CharBlock)
if telepath:
class CustomBlockWithoutExtractMethodAdapter(telepath.Adapter):
js_constructor = "CustomBlockWithoutExtractMethod"
def js_args(self, block):
return []
telepath.register(
CustomBlockWithoutExtractMethodAdapter(), CustomBlockWithoutExtractMethod
)
class TestStreamBlock(blocks.StreamBlock):
test_charblock = blocks.CharBlock(max_length=255)
test_textblock = blocks.TextBlock(label=gettext_lazy("text block"))
test_emailblock = blocks.EmailBlock()
test_urlblock = blocks.URLBlock()
test_richtextblock = blocks.RichTextBlock()
test_rawhtmlblock = blocks.RawHTMLBlock()
test_blockquoteblock = blocks.BlockQuoteBlock()
test_structblock = TestStructBlock()
test_listblock = blocks.ListBlock(blocks.TextBlock())
test_listblock_in_structblock = ListStructBlock()
test_nestedstreamblock = TestNestedStreamBlock()
test_streamblock_in_structblock = TestStreamBlockInStructBlock()
test_customstructblock = CustomStructBlock()
test_customblockwithoutextractmethod = CustomBlockWithoutExtractMethod()
test_pagechooserblock = blocks.PageChooserBlock()
test_pagechooserblock_with_restricted_types = blocks.PageChooserBlock(
["wagtail_localize_test.TestHomePage", "wagtail_localize_test.TestPage"]
)
test_imagechooserblock = ImageChooserBlock()
test_documentchooserblock = DocumentChooserBlock()
test_snippetchooserblock = SnippetChooserBlock(TestSnippet)
test_nontranslatablesnippetchooserblock = SnippetChooserBlock(
NonTranslatableSnippet
)
test_embedblock = EmbedBlock()
test_chooserstructblock = TestChooserStructBlock()
test_nestedchooserstructblock = TestNestedChooserStructBlock()
class TestCustomField(models.TextField):
def get_translatable_segments(self, value):
if not value:
# Don't disrupt other tests
return []
return [StringSegmentValue("foo", "{} and some extra".format(value))]
class TestPage(Page):
test_charfield = models.CharField(
gettext_lazy("char field"), max_length=255, blank=True, null=True, default=""
)
test_textfield = models.TextField(blank=True)
test_emailfield = models.EmailField(blank=True)
test_slugfield = models.SlugField(blank=True)
test_urlfield = models.URLField(blank=True)
test_richtextfield = RichTextField(blank=True)
test_streamfield = StreamField(TestStreamBlock, blank=True)
test_snippet = models.ForeignKey(
TestSnippet, null=True, blank=True, on_delete=models.SET_NULL
)
test_customfield = TestCustomField(blank=True)
test_synchronized_charfield = models.CharField(max_length=255, blank=True)
test_synchronized_textfield = models.TextField(blank=True)
test_not_overridable_synchronized_textfield = models.TextField(blank=True)
test_synchronized_emailfield = models.EmailField(blank=True)
test_synchronized_slugfield = models.SlugField(blank=True)
test_synchronized_urlfield = models.URLField(blank=True)
test_synchronized_richtextfield = RichTextField(blank=True)
test_synchronized_streamfield = StreamField(TestStreamBlock, blank=True)
test_synchronized_image = models.ForeignKey(
"wagtailimages.Image",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="+",
)
test_synchronized_document = models.ForeignKey(
"wagtaildocs.Document",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="+",
)
test_synchronized_snippet = models.ForeignKey(
TestSnippet, null=True, blank=True, on_delete=models.SET_NULL, related_name="+"
)
test_page = models.ForeignKey(
Page, null=True, blank=True, on_delete=models.SET_NULL, related_name="+"
)
test_page_specific_type = models.ForeignKey(
"TestHomePage",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="+",
)
test_page_with_restricted_types = models.ForeignKey(
Page, null=True, blank=True, on_delete=models.SET_NULL, related_name="+"
)
test_synchronized_customfield = TestCustomField(blank=True)
translatable_fields = [
TranslatableField("test_charfield"),
TranslatableField("test_textfield"),
TranslatableField("test_emailfield"),
TranslatableField("test_slugfield"),
TranslatableField("test_urlfield"),
TranslatableField("test_richtextfield"),
TranslatableField("test_streamfield"),
TranslatableField("test_snippet"),
TranslatableField("test_childobjects"),
TranslatableField("test_customfield"),
SynchronizedField("test_synchronized_charfield"),
SynchronizedField("test_synchronized_textfield"),
SynchronizedField(
"test_not_overridable_synchronized_textfield", overridable=False
),
SynchronizedField("test_synchronized_emailfield"),
SynchronizedField("test_synchronized_slugfield"),
SynchronizedField("test_synchronized_urlfield"),
SynchronizedField("test_synchronized_richtextfield"),
SynchronizedField("test_synchronized_streamfield"),
SynchronizedField("test_synchronized_image"),
SynchronizedField("test_synchronized_document"),
SynchronizedField("test_synchronized_snippet"),
SynchronizedField("test_synchronized_childobjects"),
SynchronizedField("test_page"),
SynchronizedField("test_page_specific_type"),
SynchronizedField("test_page_with_restricted_types"),
SynchronizedField("test_synchronized_customfield"),
]
content_panels = Page.content_panels + [
FieldPanel("test_charfield"),
FieldPanel("test_textfield"),
FieldPanel("test_emailfield"),
FieldPanel("test_slugfield"),
FieldPanel("test_urlfield"),
FieldPanel("test_richtextfield"),
StreamFieldPanel("test_streamfield"),
FieldPanel("test_snippet"),
InlinePanel("test_childobjects"),
FieldPanel("test_customfield"),
FieldPanel("test_synchronized_charfield"),
FieldPanel("test_synchronized_textfield"),
FieldPanel("test_synchronized_emailfield"),
FieldPanel("test_synchronized_slugfield"),
FieldPanel("test_synchronized_urlfield"),
FieldPanel("test_synchronized_richtextfield"),
StreamFieldPanel("test_synchronized_streamfield"),
FieldPanel("test_synchronized_image"),
FieldPanel("test_synchronized_document"),
FieldPanel("test_synchronized_snippet"),
InlinePanel("test_synchronized_childobjects"),
PageChooserPanel("test_page"),
PageChooserPanel("test_page_specific_type"),
PageChooserPanel(
"test_page_with_restricted_types",
["wagtail_localize_test.TestHomePage", "wagtail_localize_test.TestPage"],
),
FieldPanel("test_synchronized_customfield"),
]
class TestWithTranslationModeDisabledPage(Page):
# Always keep the translation mode off, regardless of the global
# WAGTAIL_LOCALIZE_DEFAULT_TRANSLATION_MODE value
localize_default_translation_mode = "simple"
class TestWithTranslationModeEnabledPage(Page):
# Always keep the translation mode on, regardless of the global
# WAGTAIL_LOCALIZE_DEFAULT_TRANSLATION_MODE value
localize_default_translation_mode = "synced"
class TestModel(TranslatableMixin):
title = models.CharField(max_length=255)
test_charfield = models.CharField(max_length=255, blank=True)
test_textfield = models.TextField(blank=True)
test_emailfield = models.EmailField(blank=True)
translatable_fields = [
TranslatableField("test_charfield"),
TranslatableField("test_textfield"),
TranslatableField("test_emailfield"),
]
class InheritedTestModel(TestModel):
class Meta:
unique_together = None
class TestChildObject(TranslatableMixin, Orderable):
page = ParentalKey(TestPage, related_name="test_childobjects")
field = models.TextField()
translatable_fields = [TranslatableField("field")]
class Meta(TranslatableMixin.Meta, Orderable.Meta):
pass
# TODO: System check for TranslatableMixin here
class TestSynchronizedChildObject(Orderable):
page = ParentalKey(TestPage, related_name="test_synchronized_childobjects")
field = models.TextField()
translatable_fields = [TranslatableField("field")]
# FIXME: Rename me
class TestNonParentalChildObject(TranslatableMixin, Orderable):
page = models.ForeignKey(
TestPage,
on_delete=models.CASCADE,
related_name="test_nonparentalchildobjects", # FIXME: inconsistent related_name
)
field = models.TextField()
translatable_fields = [TranslatableField("field")]
class TestHomePage(Page):
pass
class TestGenerateTranslatableFieldsPage(Page):
"""
A page type that tests the builtin automatic generation of translatable fields.
"""
test_charfield = models.CharField(max_length=255, blank=True)
test_charfield_with_choices = models.CharField(
max_length=255, blank=True, choices=[("a", "A"), ("b", "B")]
)
test_textfield = models.TextField(blank=True)
test_emailfield = models.EmailField(blank=True)
test_slugfield = models.SlugField(blank=True)
test_urlfield = models.URLField(blank=True)
test_richtextfield = RichTextField(blank=True)
test_streamfield = StreamField(TestStreamBlock, blank=True)
test_snippet = models.ForeignKey(
TestSnippet, null=True, blank=True, on_delete=models.SET_NULL
)
test_nontranslatablesnippet = models.ForeignKey(
NonTranslatableSnippet,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="+",
)
test_customfield = TestCustomField(blank=True)
class TestOverrideTranslatableFieldsPage(TestGenerateTranslatableFieldsPage):
override_translatable_fields = [
SynchronizedField("test_charfield"),
TranslatableField("test_emailfield"),
]
class TranslatableChildObject(TranslatableMixin, Orderable):
page = ParentalKey(
TestGenerateTranslatableFieldsPage,
related_name="test_translatable_childobjects",
)
field = models.TextField()
class Meta(TranslatableMixin.Meta, Orderable.Meta):
pass
class NonTranslatableChildObject(Orderable):
page = ParentalKey(
TestGenerateTranslatableFieldsPage,
related_name="test_nontranslatable_childobjects",
)
field = models.TextField()
class TestModelWithInvalidForeignKey(TranslatableMixin, models.Model):
fk = models.ForeignKey("wagtailcore.Site", on_delete=models.CASCADE)
# This should raise an error as the model being pointed to is not
# translatable!
translatable_fields = [
TranslatableField("fk"),
]
class PageWithCustomEditHandler(Page):
foo_field = models.TextField()
bar_field = models.TextField()
baz_field = models.TextField()
foo_panels = [
FieldPanel("foo_field"),
]
bar_panels = [
FieldPanel("bar_field"),
FieldPanel("baz_field"),
]
edit_handler = TabbedInterface(
[
ObjectList(bar_panels, heading="Bar"),
ObjectList([InlinePanel("child_objects")], heading="Child objects"),
ObjectList(foo_panels, heading="Foo"),
ObjectList(Page.content_panels, heading="Content"),
ObjectList(Page.promote_panels, heading="Promote"),
ObjectList(Page.settings_panels, heading="Settings"),
]
)
class PageWithCustomEditHandlerChildObject(TranslatableMixin, Orderable):
page = ParentalKey(PageWithCustomEditHandler, related_name="child_objects")
field = models.TextField()
class Meta(TranslatableMixin.Meta, Orderable.Meta):
pass
@register_translation_component(
heading="Custom translation view component",
help_text="This is the component help text",
enable_text="Add custom data",
disable_text="Do not send add custom data",
)
class CustomTranslationData(models.Model):
translation_source = models.ForeignKey(
TranslationSource, on_delete=models.CASCADE, editable=False
)
custom_text_field = models.CharField(max_length=255)
@classmethod
def get_or_create_from_source_and_translation_data(
cls, translation_source, translations, **kwargs
):
custom_data, created = CustomTranslationData.objects.get_or_create(
translation_source=translation_source, **kwargs
)
return custom_data, created
@register_translation_component(
heading="Notes",
enable_text="Add notes",
disable_text="Do not add notes",
)
class CustomButSimpleTranslationData(models.Model):
notes = models.CharField(max_length=255)
| 33.007937 | 88 | 0.726136 | [
"BSD-3-Clause"
] | dinoperovic/wagtail-localize | wagtail_localize/test/models.py | 16,636 | Python |
#!/usr/bin/env python3
# Copyright (c) 2013-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 615801
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = {
""
}
import re
import sys
import dns.resolver
import collections
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(/BithostCore:2.2.(0|1|99)/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
if len(sline) > 11:
agent = sline[11][1:] + sline[12][:-1]
else:
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(re.sub(' ', '-', ip['agent']))]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
| 32.075581 | 186 | 0.567156 | [
"MIT"
] | BitHostCoin/BitHost | contrib/seeds/makeseeds.py | 5,517 | Python |
import fakeredis
import json
import mock
from app import bev
from app.cards import CARDS
class TestStartGame:
def test_start_with_two_players(self, two_player_game_unstarted):
fake_redis = fakeredis.FakeRedis()
fake_redis.set('cheers', json.dumps(two_player_game_unstarted))
bev.cache = fake_redis
bev.start_game('cheers')
g = json.loads(fake_redis.get('cheers'))
assert g['state'] == 'DEAL'
assert g['hand_size'] == 6
assert g['dealer'] != g['cutter']
assert g['dealer'] != g['first_to_score']
assert g['turn'] == g['dealer']
assert g['winning_score'] == 121
assert not g['jokers']
def test_start_with_three_players(self, three_player_game_unstarted):
fake_redis = fakeredis.FakeRedis()
fake_redis.set('cheers', json.dumps(three_player_game_unstarted))
bev.cache = fake_redis
bev.start_game('cheers')
g = json.loads(fake_redis.get('cheers'))
assert g['state'] == 'DEAL'
assert g['hand_size'] == 5
assert g['dealer'] != g['cutter']
assert g['dealer'] != g['first_to_score']
assert g['cutter'] != g['first_to_score']
assert g['turn'] == g['dealer']
assert g['winning_score'] == 121
assert not g['jokers']
def test_start_with_jokers(self, two_player_game_unstarted):
fake_redis = fakeredis.FakeRedis()
fake_redis.set('cheers', json.dumps(two_player_game_unstarted))
bev.cache = fake_redis
bev.start_game('cheers', jokers=True)
g = json.loads(fake_redis.get('cheers'))
assert g['state'] == 'DEAL'
assert g['hand_size'] == 6
assert g['dealer'] != g['cutter']
assert g['dealer'] != g['first_to_score']
assert g['turn'] == g['dealer']
assert g['winning_score'] == 121
assert g['jokers']
def test_start_shorter_game(self, two_player_game_unstarted):
fake_redis = fakeredis.FakeRedis()
fake_redis.set('cheers', json.dumps(two_player_game_unstarted))
bev.cache = fake_redis
bev.start_game('cheers', winning_score=60)
g = json.loads(fake_redis.get('cheers'))
assert g['state'] == 'DEAL'
assert g['hand_size'] == 6
assert g['dealer'] != g['cutter']
assert g['dealer'] != g['first_to_score']
assert g['turn'] == g['dealer']
assert g['winning_score'] == 60
assert not g['jokers']
class TestDeal:
def test_deal_unique_cards(self, three_player_game_unstarted):
fake_redis = fakeredis.FakeRedis()
fake_redis.set('cheers', json.dumps(three_player_game_unstarted))
bev.cache = fake_redis
bev.start_game('cheers')
bev.deal_hands('cheers')
g = json.loads(fake_redis.get('cheers'))
assert g['state'] == 'DISCARD'
for player in three_player_game_unstarted['players'].keys():
assert player in g['hands'].keys()
assert len(g['hands'][player]) == g['hand_size']
class TestDiscard:
def test_first_discard(self, two_player_game_unstarted):
fake_redis = fakeredis.FakeRedis()
fake_redis.set('cheers', json.dumps(two_player_game_unstarted))
bev.cache = fake_redis
bev.start_game('cheers')
bev.deal_hands('cheers')
g = json.loads(fake_redis.get('cheers'))
player_done, all_done = bev.discard('cheers', 'sam', g['hands']['sam'][0])
assert not player_done
assert not all_done
assert g['state'] == 'DISCARD'
def test_first_to_be_done_discarding(self, two_player_game_unstarted):
fake_redis = fakeredis.FakeRedis()
fake_redis.set('cheers', json.dumps(two_player_game_unstarted))
bev.cache = fake_redis
bev.start_game('cheers')
bev.deal_hands('cheers')
g = json.loads(fake_redis.get('cheers'))
_, _ = bev.discard('cheers', 'sam', g['hands']['sam'][0])
player_done, all_done = bev.discard('cheers', 'sam', g['hands']['sam'][1])
assert player_done
assert not all_done
assert g['state'] == 'DISCARD'
def test_all_have_discarded(self, two_player_game_unstarted):
fake_redis = fakeredis.FakeRedis()
fake_redis.set('cheers', json.dumps(two_player_game_unstarted))
bev.cache = fake_redis
bev.start_game('cheers')
bev.deal_hands('cheers')
g = json.loads(fake_redis.get('cheers'))
_, _ = bev.discard('cheers', 'sam', g['hands']['sam'][0])
player_done, all_done = bev.discard('cheers', 'sam', g['hands']['sam'][1])
assert player_done
assert not all_done
_, _ = bev.discard('cheers', 'diane', g['hands']['diane'][0])
player_done, all_done = bev.discard('cheers', 'diane', g['hands']['diane'][1])
assert player_done
assert all_done
g = json.loads(fake_redis.get('cheers'))
assert g['state'] == 'CUT'
class TestCut:
def test_cut(self, two_player_game_fully_dealt):
fake_redis = fakeredis.FakeRedis()
fake_redis.set('homemovies', json.dumps(two_player_game_fully_dealt))
bev.cache = fake_redis
bev.cut_deck('homemovies')
g = json.loads(fake_redis.get('homemovies'))
assert g['state'] == 'PLAY'
assert g['cut_card']
assert g['turn'] == g['first_to_score']
for player in g['players'].keys(): # ensure the cut card isn't also in anyone's hand
assert g['cut_card'] not in g['hands'][player]
class TestNextPlayer:
def test_next_must_pass(self):
"""
Kathy and Tom each have face cards, tom just played and the total is at 30
Expected: It is now kathy's turn and she must pass
"""
fake_redis = fakeredis.FakeRedis()
game_dict = {
'cards': CARDS,
'hands': {'kathy': ['5e1e7e60ab'], 'tom': ['95f92b2f0c']},
'pegging': {
'cards': ['75e734d054', '60575e1068', '1d5eb77128'],
'last_played': 'tom',
'passed': [],
'run': [],
'total': 30
},
'players': {
'tom': 0,
'kathy': 0
},
'state': 'PLAY',
'turn': 'tom',
'winning_score': 121,
}
fake_redis.set('test', json.dumps(game_dict))
bev.cache = fake_redis
bev.next_player('test')
g = json.loads(fake_redis.get('test'))
assert g['turn'] == 'kathy'
assert g['pegging']['total'] == 30
assert bev.get_player_action('test', g['turn']) == 'PASS'
def test_next_must_play(self):
"""
Kathy and Tom each have aces. Tom just played and the total is at 30
Expected: It is now kathy's turn and she must play
"""
fake_redis = fakeredis.FakeRedis()
game_dict = {
'cards': CARDS,
'hands': {'kathy': ['6d95c18472'], 'tom': ['ace1293f8a']},
'pegging': {
'cards': ['75e734d054', '60575e1068', '1d5eb77128'],
'last_played': 'tom',
'passed': [],
'run': [],
'total': 30},
'players': {
'tom': 0,
'kathy': 0
},
'state': 'PLAY',
'turn': 'tom',
'winning_score': 121,
}
fake_redis.set('test', json.dumps(game_dict))
bev.cache = fake_redis
bev.next_player('test')
g = json.loads(fake_redis.get('test'))
assert g['turn'] == 'kathy'
assert g['pegging']['total'] == 30
assert bev.get_player_action('test', g['turn']) == 'PLAY'
def test_everyone_has_passed_and_tom_cant_play_again_this_round(self):
"""
Kathy and Tom each have face cards, kathy just passed and the total is at 30
Expected: It is Tom's turn and he must pass.
"""
fake_redis = fakeredis.FakeRedis()
game_dict = {
'cards': CARDS,
'hands': {'kathy': ['5e1e7e60ab'], 'tom': ['95f92b2f0c']},
'pegging': {
'cards': ['75e734d054', '60575e1068', '1d5eb77128'],
'last_played': 'tom',
'passed': ['kathy'],
'run': [],
'total': 30},
'players': {
'tom': 0,
'kathy': 0
},
'scoring_stats':
{'kathy': {
'a_play': 0,
'b_hand': 0,
'c_crib': 0
}, 'tom': {
'a_play': 0,
'b_hand': 0,
'c_crib': 0
}
},
'state': 'PLAY',
'turn': 'kathy',
'winning_score': 121,
}
fake_redis.set('test', json.dumps(game_dict))
bev.cache = fake_redis
bev.next_player('test')
g = json.loads(fake_redis.get('test'))
assert g['players']['tom'] == 0
assert g['turn'] == 'tom'
assert g['pegging']['total'] == 30
assert bev.get_player_action('test', g['turn']) == 'PASS'
@mock.patch('app.award_points', mock.MagicMock(return_value=True))
def test_everyone_has_passed_and_tom_still_has_cards(self):
fake_redis = fakeredis.FakeRedis()
game_dict = {
'cards': CARDS,
'hands': {'kathy': ['5e1e7e60ab'], 'tom': ['95f92b2f0c']},
'pegging': {
'cards': ['75e734d054', '60575e1068', '1d5eb77128'],
'last_played': 'tom',
'passed': ['kathy', 'tom'],
'run': [],
'total': 30},
'players': {
'tom': 0,
'kathy': 0
},
'scoring_stats':
{'kathy': {
'a_play': 0,
'b_hand': 0,
'c_crib': 0
}, 'tom': {
'a_play': 0,
'b_hand': 0,
'c_crib': 0
}
},
'state': 'PLAY',
'turn': 'kathy',
'winning_score': 121,
}
fake_redis.set('test', json.dumps(game_dict))
bev.cache = fake_redis
bev.next_player('test')
g = json.loads(fake_redis.get('test'))
assert g['players']['tom'] == 1
assert g['turn'] == 'kathy'
assert g['pegging']['total'] == 0
assert bev.get_player_action('test', g['turn']) == 'PLAY'
def test_everyone_else_has_passed_and_tom_can_play_again_this_round(self):
"""
Tom has an Ace, kathy just passed and the total is at 30
Expected: It is now Tom's turn to play, he does not receive a point for go
"""
fake_redis = fakeredis.FakeRedis()
game_dict = {
'cards': CARDS,
'hands': {'kathy': ['5e1e7e60ab'], 'tom': ['6d95c18472']},
'pegging': {
'cards': ['75e734d054', '60575e1068', '1d5eb77128'],
'last_played': 'tom',
'passed': ['kathy'],
'run': [],
'total': 30},
'players': {
'tom': 0,
'kathy': 0
},
'state': 'PLAY',
'turn': 'kathy',
'winning_score': 121,
}
fake_redis.set('test', json.dumps(game_dict))
bev.cache = fake_redis
bev.next_player('test')
g = json.loads(fake_redis.get('test'))
assert g['players']['tom'] == 0
assert g['turn'] == 'tom'
assert g['pegging']['total'] == 30
assert bev.get_player_action('test', g['turn']) == 'PLAY'
def test_kathy_hit_thirtyone_still_has_cards(self):
"""
Kathy just hit 31, and still has cards
Expected: no new points for kathy, and its her turn with a fresh pegging area
"""
fake_redis = fakeredis.FakeRedis()
game_dict = {
'cards': CARDS,
'hands': {'kathy': ['5e1e7e60ab'], 'tom': ['95f92b2f0c']},
'pegging': {
'cards': ['75e734d054', '60575e1068', '1d5eb77128'],
'last_played': 'kathy',
'passed': [],
'run': [],
'total': 31},
'players': {
'tom': 0,
'kathy': 2
},
'turn': 'kathy',
'winning_score': 121,
}
fake_redis.set('test', json.dumps(game_dict))
bev.cache = fake_redis
bev.next_player('test')
g = json.loads(fake_redis.get('test'))
assert g['players']['kathy'] == 2
assert g['turn'] == 'tom'
assert g['pegging']['total'] == 0
def test_kathy_hit_thirtyone_has_no_cards_left_and_others_do(self):
"""
Kathy just hit 31, and has no cards left. Tom has a card left
Expected: no new points for kathy, and its now Tom's turn with a fresh pegging area
"""
fake_redis = fakeredis.FakeRedis()
game_dict = {
'cards': CARDS,
'hands': {'kathy': [], 'tom': ['95f92b2f0c']},
'pegging': {
'cards': ['75e734d054', '60575e1068', '1d5eb77128'],
'last_played': 'kathy',
'passed': [],
'run': [],
'total': 31},
'players': {
'tom': 0,
'kathy': 2
},
'turn': 'kathy',
'winning_score': 121,
}
fake_redis.set('test', json.dumps(game_dict))
bev.cache = fake_redis
bev.next_player('test')
g = json.loads(fake_redis.get('test'))
assert g['players']['kathy'] == 2
assert g['turn'] == 'tom'
assert g['pegging']['total'] == 0
def test_player_hit_thirtyone_and_no_one_has_cards_left(self):
"""
Kathy just hit 31, and everyone is out of cards
Expected: no new points for kathy, and it is now time to score hands
"""
fake_redis = fakeredis.FakeRedis()
game_dict = {
'cards': CARDS,
'first_to_score': 'tom',
'hands': {'kathy': [], 'tom': []},
'pegging': {
'cards': ['75e734d054', '60575e1068', '1d5eb77128'],
'last_played': 'kathy',
'passed': [],
'run': [],
'total': 31},
'players': {
'tom': 0,
'kathy': 2
},
'turn': 'kathy',
'winning_score': 121,
}
fake_redis.set('test', json.dumps(game_dict))
bev.cache = fake_redis
bev.next_player('test')
g = json.loads(fake_redis.get('test'))
assert g['players']['kathy'] == 2
assert g['pegging']['total'] == 0
assert g['state'] == 'SCORE'
assert g['turn'] == 'tom'
@mock.patch('app.award_points', mock.MagicMock(return_value=True))
def test_no_one_has_cards_left(self):
"""
Kathy just hit 24, and everyone is out of cards
Expected: Kathy gets 1 point for go, and it is now time to score hands
"""
fake_redis = fakeredis.FakeRedis()
game_dict = {
'cards': CARDS,
'first_to_score': 'tom',
'hands': {'kathy': [], 'tom': []},
'pegging': {
'cards': ['75e734d054', '60575e1068', '1d5eb77128'],
'last_played': 'kathy',
'passed': [],
'run': [],
'total': 24},
'players': {
'tom': 0,
'kathy': 2
},
'scoring_stats':
{'kathy': {
'a_play': 0,
'b_hand': 0,
'c_crib': 0
}, 'tom': {
'a_play': 0,
'b_hand': 0,
'c_crib': 0
}
},
'turn': 'kathy',
'winning_score': 121,
}
fake_redis.set('test', json.dumps(game_dict))
bev.cache = fake_redis
bev.next_player('test')
g = json.loads(fake_redis.get('test'))
assert g['players']['kathy'] == 3
assert g['pegging']['total'] == 0
assert g['state'] == 'SCORE'
assert g['turn'] == 'tom'
class TestPlayScoring:
@mock.patch('app.award_points', mock.MagicMock(return_value=False))
def test_fifteen_two(self):
fake_redis = fakeredis.FakeRedis()
game_dict = {
'cards': CARDS,
'hands': {'kathy': ['6d95c18472', 'c6f4900f82'], 'tom': ['ace1293f8a']},
'pegging': {
'cards': ['4de6b73ab8'], # eight of hearts
'last_played': 'tom',
'passed': [],
'run': [],
'total': 8},
'players': {
'tom': 0,
'kathy': 0
},
'played_cards': {
'kathy': [],
'tom': []
},
'scoring_stats':
{'tom': {
'a_play': 0,
'b_hand': 0,
'c_crib': 0
}, 'kathy': {
'a_play': 0,
'b_hand': 0,
'c_crib': 0
}
},
'state': 'PLAY',
'turn': 'kathy',
'winning_score': 121,
}
fake_redis.set('test', json.dumps(game_dict))
bev.cache = fake_redis
seven_of_clubs = 'c6f4900f82'
just_won, points, points_source = bev.score_play('test', 'kathy', seven_of_clubs)
assert not just_won
bev.record_play('test', 'kathy', seven_of_clubs)
g = json.loads(fake_redis.get('test'))
assert g['players']['kathy'] == 2
assert g['pegging']['total'] == 15
assert set(g['pegging']['cards']) == set(['4de6b73ab8', 'c6f4900f82'])
assert g['hands']['kathy'] == ['6d95c18472']
@mock.patch('app.award_points', mock.MagicMock(return_value=False))
def test_thirtyone(self):
"""
Verify two points for 31
"""
fake_redis = fakeredis.FakeRedis()
game_dict = {
'cards': CARDS,
'hands': {'kathy': ['6d95c18472', 'c6f4900f82'], 'tom': ['ace1293f8a']},
'pegging': {
'cards': ['4de6b73ab8', 'f6571e162f', 'c88523b677'], # eight, ten, six
'last_played': 'tom',
'passed': [],
'run': [],
'total': 24},
'players': {
'tom': 0,
'kathy': 0
},
'played_cards': {
'kathy': ['f6571e162f'],
'tom': ['4de6b73ab8', 'c88523b677']
},
'scoring_stats':
{'tom': {
'a_play': 0,
'b_hand': 0,
'c_crib': 0
}, 'kathy': {
'a_play': 0,
'b_hand': 0,
'c_crib': 0
}
},
'state': 'PLAY',
'turn': 'kathy',
'winning_score': 121,
}
fake_redis.set('test', json.dumps(game_dict))
bev.cache = fake_redis
seven_of_clubs = 'c6f4900f82'
just_won, points, points_source = bev.score_play('test', 'kathy', seven_of_clubs)
assert not just_won
bev.record_play('test', 'kathy', seven_of_clubs)
g = json.loads(fake_redis.get('test'))
assert set(g['pegging']['cards']) == set(['4de6b73ab8', 'f6571e162f', 'c88523b677', 'c6f4900f82'])
assert g['hands']['kathy'] == ['6d95c18472']
assert g['players']['kathy'] == 2
assert g['pegging']['total'] == 31
@mock.patch('app.award_points', mock.MagicMock(return_value=False))
def test_pair(self):
fake_redis = fakeredis.FakeRedis()
game_dict = {
'cards': CARDS,
'hands': {'kathy': ['6d95c18472', 'c6f4900f82'], 'tom': ['ace1293f8a']},
'pegging': {
'cards': ['32f7615119'], # seven of spades
'last_played': 'tom',
'passed': [],
'run': [],
'total': 7},
'players': {
'tom': 0,
'kathy': 0
},
'played_cards': {
'kathy': [],
'tom': []
},
'scoring_stats':
{'tom': {
'a_play': 0,
'b_hand': 0,
'c_crib': 0
}, 'kathy': {
'a_play': 0,
'b_hand': 0,
'c_crib': 0
}
},
'state': 'PLAY',
'turn': 'kathy',
'winning_score': 121,
}
fake_redis.set('test', json.dumps(game_dict))
bev.cache = fake_redis
seven_of_clubs = 'c6f4900f82'
just_won, points, points_source = bev.score_play('test', 'kathy', seven_of_clubs)
assert not just_won
bev.record_play('test', 'kathy', seven_of_clubs)
g = json.loads(fake_redis.get('test'))
assert g['players']['kathy'] == 2
assert g['pegging']['total'] == 14
assert set(g['pegging']['cards']) == set(['32f7615119', 'c6f4900f82'])
assert g['hands']['kathy'] == ['6d95c18472']
@mock.patch('app.award_points', mock.MagicMock(return_value=False))
def test_three_of_a_kind(self):
fake_redis = fakeredis.FakeRedis()
game_dict = {
'cards': CARDS,
'hands': {'kathy': ['6d95c18472', 'c6f4900f82'], 'tom': ['ace1293f8a']},
'pegging': {
'cards': ['32f7615119', '4f99bf15e5'], # seven of spades, seven of diamonds
'last_played': 'tom',
'passed': [],
'run': [],
'total': 14},
'players': {
'tom': 2,
'kathy': 0
},
'played_cards': {
'kathy': ['32f7615119'],
'tom': ['4f99bf15e5']
},
'scoring_stats':
{'tom': {
'a_play': 0,
'b_hand': 0,
'c_crib': 0
}, 'kathy': {
'a_play': 0,
'b_hand': 0,
'c_crib': 0
}
},
'state': 'PLAY',
'turn': 'kathy',
'winning_score': 121,
}
fake_redis.set('test', json.dumps(game_dict))
bev.cache = fake_redis
seven_of_clubs = 'c6f4900f82'
just_won, points, points_source = bev.score_play('test', 'kathy', seven_of_clubs)
assert not just_won
bev.record_play('test', 'kathy', seven_of_clubs)
g = json.loads(fake_redis.get('test'))
assert set(g['pegging']['cards']) == set(['32f7615119', '4f99bf15e5', 'c6f4900f82'])
assert g['hands']['kathy'] == ['6d95c18472']
assert g['pegging']['total'] == 21
assert g['players']['kathy'] == 6
@mock.patch('app.award_points', mock.MagicMock(return_value=False))
def test_four_of_a_kind(self):
fake_redis = fakeredis.FakeRedis()
game_dict = {
'cards': CARDS,
'hands': {'kathy': ['6d95c18472', 'c6f4900f82'], 'tom': ['ace1293f8a']},
'pegging': {
'cards': ['32f7615119', '4f99bf15e5', 'def8effef6'], # seven of spades, diamonds, hearts
'last_played': 'tom',
'passed': [],
'run': [],
'total': 21},
'players': {
'tom': 6,
'kathy': 2
},
'played_cards': {
'kathy': ['32f7615119'],
'tom': ['4f99bf15e5', 'def8effef6']
},
'scoring_stats':
{'tom': {
'a_play': 0,
'b_hand': 0,
'c_crib': 0
}, 'kathy': {
'a_play': 0,
'b_hand': 0,
'c_crib': 0
}
},
'state': 'PLAY',
'turn': 'kathy',
'winning_score': 121,
}
fake_redis.set('test', json.dumps(game_dict))
bev.cache = fake_redis
seven_of_clubs = 'c6f4900f82'
just_won, points, points_source = bev.score_play('test', 'kathy', seven_of_clubs)
assert not just_won
bev.record_play('test', 'kathy', seven_of_clubs)
g = json.loads(fake_redis.get('test'))
assert set(g['pegging']['cards']) == set(['32f7615119', '4f99bf15e5', 'def8effef6', 'c6f4900f82']) # all the sevens
assert g['hands']['kathy'] == ['6d95c18472']
assert g['pegging']['total'] == 28
assert g['players']['kathy'] == 14
@mock.patch('app.award_points', mock.MagicMock(return_value=False))
def test_run_of_three(self):
"""
test run of three scores three points
"""
fake_redis = fakeredis.FakeRedis()
game_dict = {
'cards': CARDS,
'hands': {'kathy': ['6d95c18472', 'c6f4900f82'], 'tom': ['ace1293f8a']},
'pegging': {
'cards': ['4de6b73ab8', 'c88523b677'], # eight, six
'last_played': 'tom',
'passed': [],
'run': [],
'total': 14},
'players': {
'tom': 0,
'kathy': 0
},
'played_cards': {
'kathy': ['32f7615119'],
'tom': ['4f99bf15e5', 'def8effef6']
},
'scoring_stats':
{'tom': {
'a_play': 0,
'b_hand': 0,
'c_crib': 0
}, 'kathy': {
'a_play': 0,
'b_hand': 0,
'c_crib': 0
}
},
'state': 'PLAY',
'turn': 'kathy',
'winning_score': 121,
}
fake_redis.set('test', json.dumps(game_dict))
bev.cache = fake_redis
seven_of_clubs = 'c6f4900f82'
just_won, points, points_source = bev.score_play('test', 'kathy', seven_of_clubs)
assert not just_won
bev.record_play('test', 'kathy', seven_of_clubs)
g = json.loads(fake_redis.get('test'))
assert set(g['pegging']['cards']) == set(['4de6b73ab8', 'c88523b677', 'c6f4900f82']) # all the sevens
assert g['hands']['kathy'] == ['6d95c18472']
assert g['pegging']['total'] == 21
assert g['players']['kathy'] == 3
def test_run_of_four(self):
pass
def test_run_of_five_and_fifteen_two(self):
pass
def test_fifteen_two_and_a_pair(self):
pass
def test_fifteen_two_and_three_of_a_kind(self):
pass
class TestResetGameDict:
def test_reset(self):
fake_redis = fakeredis.FakeRedis()
game_dict = {
'cards': CARDS,
'crib': ['04a70825ff', '5c6bdd4fee', '9aa045dd99', 'bd4b01946d'],
'cutter': 'brendon',
'dealer': 'jason',
'deck': ['d00bb3f3b7','64fe85d796','fc0f324620','276f33cf69','04f17d1351','f6571e162f','de1c863a7f',
'a482167f2a','ce46b344a3','ae2caea4bb','4dfe41e461','597e4519ac','c88623fa16','e26d0bead3',
'dd3749a1bc','83ef982410','4c8519af34','6d95c18472','b1fb3bec6f','c88523b677','32f7615119',
'd7ca85cf5e','30e1ddb610','85ba715700','a6a3e792b4','1d5eb77128','110e6e5b19','d1c9fde8ef',
'75e734d054','36493dcc05','e356ece3fc','95f92b2f0c','def8effef6','60575e1068','9eba093a9d',
'a20b6dac2c','f696d1f2d3','fa0873dd7d','ff2de622d8','3698fe0420'],
'first_to_score': 'brendon',
'hand_size': 6,
'hands': {
'brendon': [],
'jason': []},
'jokers': True,
'name': 'homemovies',
'ok_with_next_round': ['brendon', 'jason'],
'pegging': {
'cards': ['4de6b73ab8', 'e4fc8b9004', '5e1e7e60ab', 'ace1293f8a', 'd3a2460e93', '56594b3880',
'4f99bf15e5', 'c6f4900f82'],
'passed': [], 'run': [], 'total': 0
},
'play_again': [],
'played_cards': {'brendon': [],
'jason': []},
'players': {'brendon': 12, 'jason': 11},
'scored_hands': ['brendon', 'jason'],
'state': 'SCORE',
'turn': 'jason',
'winning_score': 121}
fake_redis.set('homemovies', json.dumps(game_dict))
bev.cache = fake_redis
bev.reset_game_dict('homemovies')
expected = {
"jokers": True,
"name": 'homemovies',
'players': {'brendon': 0, 'jason': 0},
'scoring_stats':
{'brendon': {
'a_play': 0,
'b_hand': 0,
'c_crib': 0
}, 'jason': {
'a_play': 0,
'b_hand': 0,
'c_crib': 0
}
},
"state": "INIT",
"winning_score": 121,
}
g = json.loads(fake_redis.get('homemovies'))
assert g == expected | 35.030552 | 124 | 0.475429 | [
"MIT"
] | zachcalvert/card-games | cribbage/app/tests/test_bev.py | 29,811 | Python |
"""
Base and utility classes for tseries type pandas objects.
"""
from __future__ import annotations
from datetime import datetime
from typing import (
TYPE_CHECKING,
Any,
Callable,
Sequence,
TypeVar,
cast,
final,
)
import warnings
import numpy as np
from pandas._libs import (
NaT,
Timedelta,
lib,
)
from pandas._libs.tslibs import (
BaseOffset,
NaTType,
Resolution,
Tick,
parsing,
to_offset,
)
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender,
cache_readonly,
doc,
)
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_dtype_equal,
is_integer,
is_list_like,
)
from pandas.core.dtypes.concat import concat_compat
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
PeriodArray,
TimedeltaArray,
)
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import (
Index,
_index_shared_docs,
)
from pandas.core.indexes.extension import (
NDArrayBackedExtensionIndex,
inherit_names,
)
from pandas.core.indexes.range import RangeIndex
from pandas.core.tools.timedeltas import to_timedelta
if TYPE_CHECKING:
from pandas import CategoricalIndex
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_T = TypeVar("_T", bound="DatetimeIndexOpsMixin")
_TDT = TypeVar("_TDT", bound="DatetimeTimedeltaMixin")
@inherit_names(
["inferred_freq", "_resolution_obj", "resolution"],
DatetimeLikeArrayMixin,
cache=True,
)
@inherit_names(["mean", "asi8", "freq", "freqstr"], DatetimeLikeArrayMixin)
class DatetimeIndexOpsMixin(NDArrayBackedExtensionIndex):
"""
Common ops mixin to support a unified interface datetimelike Index.
"""
_is_numeric_dtype = False
_can_hold_strings = False
_data: DatetimeArray | TimedeltaArray | PeriodArray
freq: BaseOffset | None
freqstr: str | None
_resolution_obj: Resolution
_bool_ops: list[str] = []
_field_ops: list[str] = []
# error: "Callable[[Any], Any]" has no attribute "fget"
hasnans = cache_readonly(
DatetimeLikeArrayMixin._hasnans.fget # type: ignore[attr-defined]
)
@property
def _is_all_dates(self) -> bool:
return True
# ------------------------------------------------------------------------
def equals(self, other: Any) -> bool:
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
elif other.dtype.kind in ["f", "i", "u", "c"]:
return False
elif not isinstance(other, type(self)):
should_try = False
inferable = self._data._infer_matches
if other.dtype == object:
should_try = other.inferred_type in inferable
elif is_categorical_dtype(other.dtype):
other = cast("CategoricalIndex", other)
should_try = other.categories.inferred_type in inferable
if should_try:
try:
other = type(self)(other)
except (ValueError, TypeError, OverflowError):
# e.g.
# ValueError -> cannot parse str entry, or OutOfBoundsDatetime
# TypeError -> trying to convert IntervalIndex to DatetimeIndex
# OverflowError -> Index([very_large_timedeltas])
return False
if not is_dtype_equal(self.dtype, other.dtype):
# have different timezone
return False
return np.array_equal(self.asi8, other.asi8)
@Appender(Index.__contains__.__doc__)
def __contains__(self, key: Any) -> bool:
hash(key)
try:
self.get_loc(key)
except (KeyError, TypeError, ValueError):
return False
return True
_can_hold_na = True
_na_value: NaTType = NaT
"""The expected NA value to use with this index."""
def _convert_tolerance(self, tolerance, target):
tolerance = np.asarray(to_timedelta(tolerance).to_numpy())
return super()._convert_tolerance(tolerance, target)
# --------------------------------------------------------------------
# Rendering Methods
def format(
self,
name: bool = False,
formatter: Callable | None = None,
na_rep: str = "NaT",
date_format: str | None = None,
) -> list[str]:
"""
Render a string representation of the Index.
"""
header = []
if name:
header.append(
ibase.pprint_thing(self.name, escape_chars=("\t", "\r", "\n"))
if self.name is not None
else ""
)
if formatter is not None:
return header + list(self.map(formatter))
return self._format_with_header(header, na_rep=na_rep, date_format=date_format)
def _format_with_header(
self, header: list[str], na_rep: str = "NaT", date_format: str | None = None
) -> list[str]:
# matches base class except for whitespace padding and date_format
return header + list(
self._format_native_types(na_rep=na_rep, date_format=date_format)
)
@property
def _formatter_func(self):
return self._data._formatter()
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value).
"""
attrs = super()._format_attrs()
for attrib in self._attributes:
# iterating over _attributes prevents us from doing this for PeriodIndex
if attrib == "freq":
freq = self.freqstr
if freq is not None:
freq = repr(freq) # e.g. D -> 'D'
attrs.append(("freq", freq))
return attrs
@Appender(Index._summary.__doc__)
def _summary(self, name=None) -> str:
result = super()._summary(name=name)
if self.freq:
result += f"\nFreq: {self.freqstr}"
return result
# --------------------------------------------------------------------
# Indexing Methods
def _can_partial_date_slice(self, reso: Resolution) -> bool:
raise NotImplementedError
def _parsed_string_to_bounds(self, reso: Resolution, parsed):
raise NotImplementedError
def _parse_with_reso(self, label: str):
# overridden by TimedeltaIndex
parsed, reso_str = parsing.parse_time_string(label, self.freq)
reso = Resolution.from_attrname(reso_str)
return parsed, reso
def _get_string_slice(self, key: str):
parsed, reso = self._parse_with_reso(key)
try:
return self._partial_date_slice(reso, parsed)
except KeyError as err:
raise KeyError(key) from err
@final
def _partial_date_slice(
self,
reso: Resolution,
parsed: datetime,
):
"""
Parameters
----------
reso : Resolution
parsed : datetime
Returns
-------
slice or ndarray[intp]
"""
if not self._can_partial_date_slice(reso):
raise ValueError
t1, t2 = self._parsed_string_to_bounds(reso, parsed)
vals = self._data._ndarray
unbox = self._data._unbox
if self.is_monotonic_increasing:
if len(self) and (
(t1 < self[0] and t2 < self[0]) or (t1 > self[-1] and t2 > self[-1])
):
# we are out of range
raise KeyError
# TODO: does this depend on being monotonic _increasing_?
# a monotonic (sorted) series can be sliced
left = vals.searchsorted(unbox(t1), side="left")
right = vals.searchsorted(unbox(t2), side="right")
return slice(left, right)
else:
lhs_mask = vals >= unbox(t1)
rhs_mask = vals <= unbox(t2)
# try to find the dates
return (lhs_mask & rhs_mask).nonzero()[0]
def _maybe_cast_slice_bound(self, label, side: str, kind=lib.no_default):
"""
If label is a string, cast it to scalar type according to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'loc', 'getitem'} or None
Returns
-------
label : object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
assert kind in ["loc", "getitem", None, lib.no_default]
self._deprecated_arg(kind, "kind", "_maybe_cast_slice_bound")
if isinstance(label, str):
try:
parsed, reso = self._parse_with_reso(label)
except ValueError as err:
# DTI -> parsing.DateParseError
# TDI -> 'unit abbreviation w/o a number'
# PI -> string cannot be parsed as datetime-like
raise self._invalid_indexer("slice", label) from err
lower, upper = self._parsed_string_to_bounds(reso, parsed)
return lower if side == "left" else upper
elif not isinstance(label, self._data._recognized_scalars):
raise self._invalid_indexer("slice", label)
return label
# --------------------------------------------------------------------
# Arithmetic Methods
def shift(self: _T, periods: int = 1, freq=None) -> _T:
"""
Shift index by desired number of time frequency increments.
This method is for shifting the values of datetime-like indexes
by a specified time increment a given number of times.
Parameters
----------
periods : int, default 1
Number of periods (or increments) to shift by,
can be positive or negative.
freq : pandas.DateOffset, pandas.Timedelta or string, optional
Frequency increment to shift by.
If None, the index is shifted by its own `freq` attribute.
Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.
Returns
-------
pandas.DatetimeIndex
Shifted index.
See Also
--------
Index.shift : Shift values of Index.
PeriodIndex.shift : Shift values of PeriodIndex.
"""
arr = self._data.view()
arr._freq = self.freq
result = arr._time_shift(periods, freq=freq)
return type(self)._simple_new(result, name=self.name)
# --------------------------------------------------------------------
@doc(Index._maybe_cast_listlike_indexer)
def _maybe_cast_listlike_indexer(self, keyarr):
try:
res = self._data._validate_listlike(keyarr, allow_object=True)
except (ValueError, TypeError):
if not isinstance(keyarr, ExtensionArray):
# e.g. we don't want to cast DTA to ndarray[object]
res = com.asarray_tuplesafe(keyarr)
# TODO: com.asarray_tuplesafe shouldn't cast e.g. DatetimeArray
else:
res = keyarr
return Index(res, dtype=res.dtype)
class DatetimeTimedeltaMixin(DatetimeIndexOpsMixin):
"""
Mixin class for methods shared by DatetimeIndex and TimedeltaIndex,
but not PeriodIndex
"""
_data: DatetimeArray | TimedeltaArray
_comparables = ["name", "freq"]
_attributes = ["name", "freq"]
# Compat for frequency inference, see GH#23789
_is_monotonic_increasing = Index.is_monotonic_increasing
_is_monotonic_decreasing = Index.is_monotonic_decreasing
_is_unique = Index.is_unique
_join_precedence = 10
def _with_freq(self, freq):
arr = self._data._with_freq(freq)
return type(self)._simple_new(arr, name=self._name)
def is_type_compatible(self, kind: str) -> bool:
warnings.warn(
f"{type(self).__name__}.is_type_compatible is deprecated and will be "
"removed in a future version.",
FutureWarning,
stacklevel=2,
)
return kind in self._data._infer_matches
@property
def values(self) -> np.ndarray:
# NB: For Datetime64TZ this is lossy
return self._data._ndarray
# --------------------------------------------------------------------
# Set Operation Methods
@cache_readonly
def _as_range_index(self) -> RangeIndex:
# Convert our i8 representations to RangeIndex
# Caller is responsible for checking isinstance(self.freq, Tick)
freq = cast(Tick, self.freq)
tick = freq.delta.value
rng = range(self[0].value, self[-1].value + tick, tick)
return RangeIndex(rng)
def _can_range_setop(self, other):
return isinstance(self.freq, Tick) and isinstance(other.freq, Tick)
def _wrap_range_setop(self, other, res_i8):
new_freq = None
if not len(res_i8):
# RangeIndex defaults to step=1, which we don't want.
new_freq = self.freq
elif isinstance(res_i8, RangeIndex):
new_freq = to_offset(Timedelta(res_i8.step))
res_i8 = res_i8
# TODO: we cannot just do
# type(self._data)(res_i8.values, dtype=self.dtype, freq=new_freq)
# because test_setops_preserve_freq fails with _validate_frequency raising.
# This raising is incorrect, as 'on_freq' is incorrect. This will
# be fixed by GH#41493
res_values = res_i8.values.view(self._data._ndarray.dtype)
result = type(self._data)._simple_new(
res_values, dtype=self.dtype, freq=new_freq
)
return self._wrap_setop_result(other, result)
def _range_intersect(self, other, sort):
# Dispatch to RangeIndex intersection logic.
left = self._as_range_index
right = other._as_range_index
res_i8 = left.intersection(right, sort=sort)
return self._wrap_range_setop(other, res_i8)
def _range_union(self, other, sort):
# Dispatch to RangeIndex union logic.
left = self._as_range_index
right = other._as_range_index
res_i8 = left.union(right, sort=sort)
return self._wrap_range_setop(other, res_i8)
def _intersection(self, other: Index, sort=False) -> Index:
"""
intersection specialized to the case with matching dtypes and both non-empty.
"""
other = cast("DatetimeTimedeltaMixin", other)
if self._can_range_setop(other):
return self._range_intersect(other, sort=sort)
if not self._can_fast_intersect(other):
result = Index._intersection(self, other, sort=sort)
# We need to invalidate the freq because Index._intersection
# uses _shallow_copy on a view of self._data, which will preserve
# self.freq if we're not careful.
# At this point we should have result.dtype == self.dtype
# and type(result) is type(self._data)
result = self._wrap_setop_result(other, result)
return result._with_freq(None)._with_freq("infer")
else:
return self._fast_intersect(other, sort)
def _fast_intersect(self, other, sort):
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
# after sorting, the intersection always starts with the right index
# and ends with the index of which the last elements is smallest
end = min(left[-1], right[-1])
start = right[0]
if end < start:
result = self[:0]
else:
lslice = slice(*left.slice_locs(start, end))
result = left._values[lslice]
return result
def _can_fast_intersect(self: _T, other: _T) -> bool:
# Note: we only get here with len(self) > 0 and len(other) > 0
if self.freq is None:
return False
elif other.freq != self.freq:
return False
elif not self.is_monotonic_increasing:
# Because freq is not None, we must then be monotonic decreasing
return False
# this along with matching freqs ensure that we "line up",
# so intersection will preserve freq
# Note we are assuming away Ticks, as those go through _range_intersect
# GH#42104
return self.freq.n == 1
def _can_fast_union(self: _T, other: _T) -> bool:
# Assumes that type(self) == type(other), as per the annotation
# The ability to fast_union also implies that `freq` should be
# retained on union.
freq = self.freq
if freq is None or freq != other.freq:
return False
if not self.is_monotonic_increasing:
# Because freq is not None, we must then be monotonic decreasing
# TODO: do union on the reversed indexes?
return False
if len(self) == 0 or len(other) == 0:
# only reached via union_many
return True
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
right_start = right[0]
left_end = left[-1]
# Only need to "adjoin", not overlap
return (right_start == left_end + freq) or right_start in left
def _fast_union(self: _TDT, other: _TDT, sort=None) -> _TDT:
# Caller is responsible for ensuring self and other are non-empty
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
elif sort is False:
# TDIs are not in the "correct" order and we don't want
# to sort but want to remove overlaps
left, right = self, other
left_start = left[0]
loc = right.searchsorted(left_start, side="left")
right_chunk = right._values[:loc]
dates = concat_compat((left._values, right_chunk))
result = type(self)._simple_new(dates, name=self.name)
return result
else:
left, right = other, self
left_end = left[-1]
right_end = right[-1]
# concatenate
if left_end < right_end:
loc = right.searchsorted(left_end, side="right")
right_chunk = right._values[loc:]
dates = concat_compat([left._values, right_chunk])
# The can_fast_union check ensures that the result.freq
# should match self.freq
dates = type(self._data)(dates, freq=self.freq)
result = type(self)._simple_new(dates)
return result
else:
return left
def _union(self, other, sort):
# We are called by `union`, which is responsible for this validation
assert isinstance(other, type(self))
assert self.dtype == other.dtype
if self._can_range_setop(other):
return self._range_union(other, sort=sort)
if self._can_fast_union(other):
result = self._fast_union(other, sort=sort)
# in the case with sort=None, the _can_fast_union check ensures
# that result.freq == self.freq
return result
else:
return super()._union(other, sort)._with_freq("infer")
# --------------------------------------------------------------------
# Join Methods
def _get_join_freq(self, other):
"""
Get the freq to attach to the result of a join operation.
"""
freq = None
if self._can_fast_union(other):
freq = self.freq
return freq
def _wrap_joined_index(self, joined, other):
assert other.dtype == self.dtype, (other.dtype, self.dtype)
result = super()._wrap_joined_index(joined, other)
result._data._freq = self._get_join_freq(other)
return result
def _get_engine_target(self) -> np.ndarray:
# engine methods and libjoin methods need dt64/td64 values cast to i8
return self._data._ndarray.view("i8")
def _from_join_target(self, result: np.ndarray):
# view e.g. i8 back to M8[ns]
result = result.view(self._data._ndarray.dtype)
return self._data._from_backing_data(result)
# --------------------------------------------------------------------
# List-like Methods
def _get_delete_freq(self, loc: int | slice | Sequence[int]):
"""
Find the `freq` for self.delete(loc).
"""
freq = None
if self.freq is not None:
if is_integer(loc):
if loc in (0, -len(self), -1, len(self) - 1):
freq = self.freq
else:
if is_list_like(loc):
# error: Incompatible types in assignment (expression has
# type "Union[slice, ndarray]", variable has type
# "Union[int, slice, Sequence[int]]")
loc = lib.maybe_indices_to_slice( # type: ignore[assignment]
np.asarray(loc, dtype=np.intp), len(self)
)
if isinstance(loc, slice) and loc.step in (1, None):
if loc.start in (0, None) or loc.stop in (len(self), None):
freq = self.freq
return freq
def _get_insert_freq(self, loc: int, item):
"""
Find the `freq` for self.insert(loc, item).
"""
value = self._data._validate_scalar(item)
item = self._data._box_func(value)
freq = None
if self.freq is not None:
# freq can be preserved on edge cases
if self.size:
if item is NaT:
pass
elif (loc == 0 or loc == -len(self)) and item + self.freq == self[0]:
freq = self.freq
elif (loc == len(self)) and item - self.freq == self[-1]:
freq = self.freq
else:
# Adding a single item to an empty index may preserve freq
if isinstance(self.freq, Tick):
# all TimedeltaIndex cases go through here; is_on_offset
# would raise TypeError
freq = self.freq
elif self.freq.is_on_offset(item):
freq = self.freq
return freq
@doc(NDArrayBackedExtensionIndex.delete)
def delete(self, loc):
result = super().delete(loc)
result._data._freq = self._get_delete_freq(loc)
return result
@doc(NDArrayBackedExtensionIndex.insert)
def insert(self, loc: int, item):
result = super().insert(loc, item)
if isinstance(result, type(self)):
# i.e. parent class method did not cast
result._data._freq = self._get_insert_freq(loc, item)
return result
# --------------------------------------------------------------------
# NDArray-Like Methods
@Appender(_index_shared_docs["take"] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs):
nv.validate_take((), kwargs)
indices = np.asarray(indices, dtype=np.intp)
result = NDArrayBackedExtensionIndex.take(
self, indices, axis, allow_fill, fill_value, **kwargs
)
maybe_slice = lib.maybe_indices_to_slice(indices, len(self))
if isinstance(maybe_slice, slice):
freq = self._data._get_getitem_freq(maybe_slice)
result._data._freq = freq
return result
| 33.65309 | 87 | 0.580151 | [
"BSD-3-Clause"
] | DiligentDolphin/pandas | pandas/core/indexes/datetimelike.py | 23,961 | Python |
# -*- coding: utf-8 -*-
"""
__author__ = "Jani Yli-Kantola"
__copyright__ = ""
__credits__ = ["Harri Hirvonsalo", "Aleksi Palomäki"]
__license__ = "MIT"
__version__ = "1.3.0"
__maintainer__ = "Jani Yli-Kantola"
__contact__ = "https://github.com/HIIT/mydata-stack"
__status__ = "Development"
"""
from app.helpers import get_custom_logger
# create logger
from app.mod_api_auth.services import clear_apikey_sqlite_db
from app.mod_blackbox.services import clear_blackbox_sqlite_db
from app.mod_database.controllers import get_db_statistics
from app.mod_database.helpers import drop_table_content
logger = get_custom_logger(__name__)
def clear_mysql_db():
"""
Clear MySQL Database
:return: true
"""
logger.info("Clearing MySQL Database")
try:
drop_table_content()
except Exception as exp:
logger.error("Could not clear MySQL Database: " + repr(exp))
raise
else:
logger.info("MySQL Database cleared")
return True
def clear_blackbox_db():
"""
Clear black box database
:return: true
"""
logger.info("Clearing Blackbox Database")
try:
clear_blackbox_sqlite_db()
except Exception as exp:
logger.error("Could not clear Blackbox Database: " + repr(exp))
raise
else:
logger.info("Blackbox Database cleared")
return True
def clear_api_key_db():
"""
Clear API Key database
:return: true
"""
logger.info("##########")
logger.info("Clearing ApiKey Database")
try:
clear_apikey_sqlite_db()
except Exception as exp:
logger.error("Could not clear ApiKey Database: " + repr(exp))
raise
else:
logger.info("ApiKey Database cleared")
return True
def system_check():
"""
Check system functionality
:return: dict
"""
logger.info("Checking system functionality")
try:
status_dict = {
"type": "StatusReport",
"attributes": {
"title": "System running as intended",
"db_row_counts": get_db_statistics()
}
}
except Exception as exp:
logger.error("System not running as intended: " + repr(exp))
raise
else:
logger.info("ApiKey Database cleared")
return status_dict
| 24.357895 | 71 | 0.637424 | [
"MIT"
] | TamSzaGot/mydata-sdk | Account/app/mod_system/controller.py | 2,315 | Python |
import http
import json
from openbrokerapi import errors
from openbrokerapi.catalog import ServicePlan
from openbrokerapi.service_broker import Binding, BindDetails, BindResource, VolumeMount, SharedDevice, BindState, \
Service
from tests import BrokerTestCase
expected_credentials = {"uri": "mysql://mysqluser:pass@mysqlhost:3306/dbname",
"username": "mysqluser",
"password": "pass",
"host": "mysqlhost",
"port": 3306,
"database": "dbname"}
class BindTest(BrokerTestCase):
def setUp(self):
self.broker.catalog.return_value = [
Service(
id='service-guid-here',
name='',
description='',
bindable=True,
plans=[
ServicePlan('plan-guid-here', name='', description='')
])
]
def test_bind_called_with_the_right_values(self):
self.broker.bind.return_value = Binding(
credentials=expected_credentials
)
self.client.put(
"/v2/service_instances/here-instance_id/service_bindings/here-binding_id",
data=json.dumps({
"service_id": "service-guid-here",
"plan_id": "plan-guid-here",
"bind_resource": {
"app_guid": "app-guid-here",
"route": "route-here"
},
"parameters": {
"parameter1": 1
}
}),
headers={
'X-Broker-Api-Version': '2.13',
'Content-Type': 'application/json',
'Authorization': self.auth_header
})
actual_instance_id, actual_binding_id, actual_details, async_allowed = self.broker.bind.call_args[0]
self.assertEqual(actual_instance_id, "here-instance_id")
self.assertEqual(actual_binding_id, "here-binding_id")
self.assertIsInstance(actual_details, BindDetails)
self.assertEqual(actual_details.service_id, "service-guid-here")
self.assertEqual(actual_details.plan_id, "plan-guid-here")
self.assertEqual(actual_details.parameters, dict(parameter1=1))
self.assertIsInstance(actual_details.bind_resource, BindResource)
self.assertEqual(actual_details.bind_resource.app_guid, "app-guid-here")
self.assertEqual(actual_details.bind_resource.route, "route-here")
self.assertFalse(async_allowed)
def test_bind_called_just_with_required_fields(self):
self.broker.bind.return_value = Binding(
credentials=expected_credentials
)
self.client.put(
"/v2/service_instances/here-instance_id/service_bindings/here-binding_id",
data=json.dumps({
"service_id": "service-guid-here",
"plan_id": "plan-guid-here"
}),
headers={
'X-Broker-Api-Version': '2.13',
'Content-Type': 'application/json',
'Authorization': self.auth_header
})
actual_instance_id, actual_binding_id, actual_details, async_allowed = self.broker.bind.call_args[0]
self.assertEqual(actual_instance_id, "here-instance_id")
self.assertEqual(actual_binding_id, "here-binding_id")
self.assertIsInstance(actual_details, BindDetails)
self.assertEqual(actual_details.service_id, "service-guid-here")
self.assertEqual(actual_details.plan_id, "plan-guid-here")
self.assertIsNone(actual_details.app_guid)
self.assertIsNone(actual_details.parameters)
self.assertIsNone(actual_details.bind_resource)
self.assertFalse(async_allowed)
def test_bind_ignores_unknown_parameters(self):
self.broker.bind.return_value = Binding(
credentials=expected_credentials
)
self.client.put(
"/v2/service_instances/here-instance_id/service_bindings/here-binding_id",
data=json.dumps({
"service_id": "service-guid-here",
"plan_id": "plan-guid-here",
"unknown": "unknown",
"bind_resource": {
"unknown": "unknown"
},
}),
headers={
'X-Broker-Api-Version': '2.13',
'Content-Type': 'application/json',
'Authorization': self.auth_header
})
actual_instance_id, actual_binding_id, actual_details, async_allowed = self.broker.bind.call_args[0]
self.assertEqual(actual_instance_id, "here-instance_id")
self.assertEqual(actual_binding_id, "here-binding_id")
self.assertIsInstance(actual_details, BindDetails)
self.assertEqual(actual_details.service_id, "service-guid-here")
self.assertEqual(actual_details.plan_id, "plan-guid-here")
self.assertIsNone(actual_details.app_guid)
self.assertIsNone(actual_details.parameters)
self.assertIsNotNone(actual_details.bind_resource)
self.assertFalse(async_allowed)
def test_returns_201_if_binding_has_been_created(self):
self.broker.bind.return_value = Binding(
credentials=expected_credentials
)
response = self.client.put(
"/v2/service_instances/here-instance_id/service_bindings/here-binding_id",
data=json.dumps({
"service_id": "service-guid-here",
"plan_id": "plan-guid-here",
"bind_resource": {
"app_guid": "app-guid-here"
}
}),
headers={
'X-Broker-Api-Version': '2.13',
'Content-Type': 'application/json',
'Authorization': self.auth_header
})
self.assertEqual(response.status_code, http.HTTPStatus.CREATED)
self.assertEqual(response.json, dict(
credentials=expected_credentials
))
def test_returns_202_for_async_binding(self):
self.broker.bind.return_value = Binding(
state=BindState.IS_ASYNC,
operation='bind'
)
response = self.client.put(
"/v2/service_instances/here-instance_id/service_bindings/here-binding_id&accepts_incomplete=true",
data=json.dumps({
"service_id": "service-guid-here",
"plan_id": "plan-guid-here",
"bind_resource": {
"app_guid": "app-guid-here"
}
}),
headers={
'X-Broker-Api-Version': '2.13',
'Content-Type': 'application/json',
'Authorization': self.auth_header
})
self.assertEqual(http.HTTPStatus.ACCEPTED, response.status_code)
self.assertEqual(response.json, {'operation': 'bind'})
def test_supports_volume_mounts(self):
self.broker.bind.return_value = Binding(
volume_mounts=[
VolumeMount(
driver="",
container_dir="",
mode="",
device_type="",
device=SharedDevice(
volume_id="",
mount_config=dict(config1="1")
)
)
]
)
response = self.client.put(
"/v2/service_instances/here-instance_id/service_bindings/here-binding_id",
data=json.dumps({
"service_id": "service-guid-here",
"plan_id": "plan-guid-here",
"bind_resource": {
"app_guid": "app-guid-here"
}
}),
headers={
'X-Broker-Api-Version': '2.13',
'Content-Type': 'application/json',
'Authorization': self.auth_header
})
self.assertEqual(response.status_code, http.HTTPStatus.CREATED)
self.assertEqual(response.json, dict(
volume_mounts=[
dict(
driver="",
container_dir="",
mode="",
device_type="",
device=dict(
volume_id="",
mount_config=dict(config1="1")
)
)
]
))
def test_returns_409_if_binding_already_exists(self):
self.broker.bind.side_effect = errors.ErrBindingAlreadyExists()
response = self.client.put(
"/v2/service_instances/here-instance_id/service_bindings/here-binding_id",
data=json.dumps({
"service_id": "service-guid-here",
"plan_id": "plan-guid-here",
"bind_resource": {
"app_guid": "app-guid-here"
}
}),
headers={
'X-Broker-Api-Version': '2.13',
'Content-Type': 'application/json',
'Authorization': self.auth_header
})
self.assertEqual(response.status_code, http.HTTPStatus.CONFLICT)
self.assertEqual(response.json, dict())
def test_returns_422_if_app_guid_is_required_but_not_given(self):
self.broker.bind.side_effect = errors.ErrAppGuidNotProvided()
response = self.client.put(
"/v2/service_instances/here-instance_id/service_bindings/here-binding_id",
data=json.dumps({
"service_id": "service-guid-here",
"plan_id": "plan-guid-here",
"bind_resource": {}
}),
headers={
'X-Broker-Api-Version': '2.13',
'Content-Type': 'application/json',
'Authorization': self.auth_header
})
self.assertEqual(response.status_code, http.HTTPStatus.UNPROCESSABLE_ENTITY)
self.assertEqual(response.json, dict(
error="RequiresApp",
description="This service supports generation of credentials through binding an application only."
))
def test_returns_401_if_request_does_not_contain_auth_header(self):
response = self.client.put(
"/v2/service_instances/here-instance_id/service_bindings/here-binding_id",
data=json.dumps({}),
headers={
'X-Broker-Api-Version': '2.13',
'Content-Type': 'application/json'
})
self.assertEqual(response.status_code, http.HTTPStatus.UNAUTHORIZED)
def test_returns_400_if_request_does_not_contain_content_type_header(self):
response = self.client.put(
"/v2/service_instances/here-instance_id/service_bindings/here-binding_id",
data=json.dumps({}),
headers={
'X-Broker-Api-Version': '2.13',
'Authorization': self.auth_header
})
self.assertEqual(response.status_code, http.HTTPStatus.BAD_REQUEST)
self.assertEqual(response.json, dict(description='Improper Content-Type header. Expecting "application/json"'))
def test_returns_400_if_request_does_not_contain_valid_json_body(self):
response = self.client.put(
"/v2/service_instances/here-instance_id/service_bindings/here-binding_id",
data='I am not a json object',
headers={
'X-Broker-Api-Version': '2.13',
'Content-Type': 'application/json',
'Authorization': self.auth_header
})
self.assertEqual(response.status_code, http.HTTPStatus.BAD_REQUEST)
self.assertEqual(response.json, dict(description='Improper Content-Type header. Expecting "application/json"'))
def test_returns_200_if_identical_binding_already_exists(self):
self.broker.bind.return_value = Binding(state=BindState.IDENTICAL_ALREADY_EXISTS)
response = self.client.put(
"/v2/service_instances/here-instance_id/service_bindings/here-binding_id",
data=json.dumps({
"service_id": "service-guid-here",
"plan_id": "plan-guid-here",
"bind_resource": {}
}),
headers={
'X-Broker-Api-Version': '2.13',
'Content-Type': 'application/json',
'Authorization': self.auth_header
})
self.assertEqual(response.status_code, http.HTTPStatus.OK)
self.assertEqual(response.json, dict())
def test_returns_422_if_instance_is_in_use(self):
self.broker.bind.side_effect = errors.ErrConcurrentInstanceAccess()
response = self.client.put(
"/v2/service_instances/here-instance_id/service_bindings/here-binding_id",
data=json.dumps({
"service_id": "service-guid-here",
"plan_id": "plan-guid-here",
"bind_resource": {}
}),
headers={
'X-Broker-Api-Version': '2.13',
'Content-Type': 'application/json',
'Authorization': self.auth_header
})
self.assertEqual(response.status_code, http.HTTPStatus.UNPROCESSABLE_ENTITY)
self.assertEqual(response.json, dict(
description='The Service Broker does not support concurrent requests that mutate the same resource.',
error='ConcurrencyError'
))
| 38.561254 | 119 | 0.572442 | [
"MIT"
] | eruvanos/openbrokerapi | tests/test_bind.py | 13,535 | Python |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: gym.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from gym.common.protobuf import vnf_br_pb2 as vnf__br__pb2
from gym.common.protobuf import vnf_bd_pb2 as vnf__bd__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='gym.proto',
package='gym',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\tgym.proto\x12\x03gym\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x0cvnf_br.proto\x1a\x0cvnf_bd.proto\"`\n\tApparatus\x12\x19\n\x06\x61gents\x18\x01 \x03(\x0b\x32\t.gym.Info\x12\x1b\n\x08monitors\x18\x02 \x03(\x0b\x32\t.gym.Info\x12\x1b\n\x08managers\x18\x03 \x03(\x0b\x32\t.gym.Info\"E\n\tArtifacts\x12\x1a\n\x07probers\x18\x06 \x03(\x0b\x32\t.gym.Tool\x12\x1c\n\tlisteners\x18\x07 \x03(\x0b\x32\t.gym.Tool\"\xc5\x05\n\x04Info\x12\x0c\n\x04uuid\x18\x01 \x01(\t\x12\x0c\n\x04role\x18\x02 \x01(\t\x12\x0f\n\x07\x61\x64\x64ress\x18\x03 \x01(\t\x12*\n\x0b\x65nvironment\x18\x04 \x01(\x0b\x32\x15.gym.Info.Environment\x12-\n\ttimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12!\n\tartifacts\x18\x06 \x01(\x0b\x32\x0e.gym.Artifacts\x12!\n\tapparatus\x18\x07 \x01(\x0b\x32\x0e.gym.Apparatus\x12\x10\n\x08\x63ontacts\x18\x08 \x03(\t\x1a\xdc\x03\n\x0b\x45nvironment\x12\x0e\n\x06system\x18\x01 \x01(\t\x12\x0c\n\x04host\x18\x02 \x01(\t\x12\x0f\n\x07release\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x11\n\tprocessor\x18\x05 \x01(\t\x12+\n\x03\x63pu\x18\x06 \x03(\x0b\x32\x1e.gym.Info.Environment.CpuEntry\x12\x31\n\x06memory\x18\x07 \x03(\x0b\x32!.gym.Info.Environment.MemoryEntry\x12-\n\x04\x64isk\x18\x08 \x03(\x0b\x32\x1f.gym.Info.Environment.DiskEntry\x12\x33\n\x07network\x18\t \x03(\x0b\x32\".gym.Info.Environment.NetworkEntry\x1a*\n\x08\x43puEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a-\n\x0bMemoryEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x01:\x02\x38\x01\x1a+\n\tDiskEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x01:\x02\x38\x01\x1a.\n\x0cNetworkEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"X\n\x05Sched\x12\x0c\n\x04\x66rom\x18\x01 \x01(\r\x12\r\n\x05until\x18\x02 \x01(\r\x12\x10\n\x08\x64uration\x18\x03 \x01(\r\x12\x10\n\x08interval\x18\x04 \x01(\r\x12\x0e\n\x06repeat\x18\x05 \x01(\r\"\xa1\x01\n\x06\x41\x63tion\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x10\n\x08instance\x18\x02 \x01(\x05\x12\x0c\n\x04name\x18\x03 \x01(\t\x12#\n\x04\x61rgs\x18\x04 \x03(\x0b\x32\x15.gym.Action.ArgsEntry\x12\x19\n\x05sched\x18\x05 \x01(\x0b\x32\n.gym.Sched\x1a+\n\tArgsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"F\n\x0bInstruction\x12\n\n\x02id\x18\x01 \x01(\x05\x12\r\n\x05trial\x18\x02 \x01(\x05\x12\x1c\n\x07\x61\x63tions\x18\x03 \x03(\x0b\x32\x0b.gym.Action\"\xc3\x04\n\nEvaluation\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x10\n\x08instance\x18\x02 \x01(\x05\x12\x0e\n\x06repeat\x18\x03 \x01(\x05\x12&\n\x06source\x18\x04 \x01(\x0b\x32\x16.gym.Evaluation.Source\x12-\n\x07metrics\x18\x05 \x03(\x0b\x32\x1c.gym.Evaluation.MetricsEntry\x12,\n\ttimestamp\x18\x06 \x01(\x0b\x32\x19.gym.Evaluation.Timestamp\x12\r\n\x05\x65rror\x18\x07 \x01(\t\x1aO\n\x06Source\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0c\n\x04type\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x0c\n\x04\x63\x61ll\x18\x05 \x01(\t\x1ax\n\x06Metric\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x0c\n\x04unit\x18\x03 \x01(\t\x12\x10\n\x06scalar\x18\x04 \x01(\x01H\x00\x12)\n\x06series\x18\x05 \x01(\x0b\x32\x17.google.protobuf.StructH\x00\x42\x07\n\x05value\x1a`\n\tTimestamp\x12)\n\x05start\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12(\n\x04stop\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x1a\x46\n\x0cMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.gym.Evaluation.Metric:\x02\x38\x01\"\xb5\x02\n\x08Snapshot\x12\n\n\x02id\x18\x01 \x01(\x05\x12\r\n\x05trial\x18\x02 \x01(\x05\x12$\n\x06origin\x18\x03 \x01(\x0b\x32\x14.gym.Snapshot.Origin\x12\x33\n\x0b\x65valuations\x18\x04 \x03(\x0b\x32\x1e.gym.Snapshot.EvaluationsEntry\x12-\n\ttimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\r\n\x05\x65rror\x18\x06 \x01(\t\x1a\x30\n\x06Origin\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04role\x18\x02 \x01(\t\x12\x0c\n\x04host\x18\x03 \x01(\t\x1a\x43\n\x10\x45valuationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\x1e\n\x05value\x18\x02 \x01(\x0b\x32\x0f.gym.Evaluation:\x02\x38\x01\"\x88\x02\n\x04Tool\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x0c\n\x04name\x18\x02 \x01(\t\x12-\n\nparameters\x18\x03 \x03(\x0b\x32\x19.gym.Tool.ParametersEntry\x12\'\n\x07metrics\x18\x04 \x03(\x0b\x32\x16.gym.Tool.MetricsEntry\x12\x19\n\x05sched\x18\x05 \x01(\x0b\x32\n.gym.Sched\x12\x10\n\x08instance\x18\x06 \x01(\x05\x1a\x31\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a.\n\x0cMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xe0\x01\n\x04Task\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x0e\n\x06trials\x18\x02 \x01(\x05\x12\x0c\n\x04test\x18\x03 \x01(\x05\x12\x1f\n\x06\x61gents\x18\x04 \x03(\x0b\x32\x0f.gym.Task.Agent\x12#\n\x08monitors\x18\x05 \x03(\x0b\x32\x11.gym.Task.Monitor\x1a\x31\n\x05\x41gent\x12\x0c\n\x04uuid\x18\x01 \x01(\t\x12\x1a\n\x07probers\x18\x02 \x03(\x0b\x32\t.gym.Tool\x1a\x35\n\x07Monitor\x12\x0c\n\x04uuid\x18\x01 \x01(\t\x12\x1c\n\tlisteners\x18\x03 \x03(\x0b\x32\t.gym.Tool\"\xd0\x01\n\x06Report\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x0c\n\x04test\x18\x02 \x01(\x05\x12-\n\tsnapshots\x18\x03 \x03(\x0b\x32\x1a.gym.Report.SnapshotsEntry\x12-\n\ttimestamp\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\r\n\x05\x65rror\x18\x05 \x01(\t\x1a?\n\x0eSnapshotsEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.gym.Snapshot:\x02\x38\x01\"=\n\x06Layout\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x19\n\x05vnfbr\x18\x02 \x01(\x0b\x32\n.gym.VnfBr\x12\x0c\n\x04\x66\x65\x61t\x18\x03 \x01(\t\"^\n\x06Result\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x19\n\x05vnfbr\x18\x02 \x01(\x0b\x32\n.gym.VnfBr\x12-\n\ttimestamp\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"n\n\x06\x44\x65ploy\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x10\n\x08workflow\x18\x02 \x01(\t\x12\x1f\n\x08scenario\x18\x03 \x01(\x0b\x32\r.gym.Scenario\x12%\n\x0b\x65nvironment\x18\x04 \x01(\x0b\x32\x10.gym.Environment\"=\n\x05\x42uilt\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x0b\n\x03\x61\x63k\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12\x0c\n\x04info\x18\x04 \x01(\x0c\"\x98\x03\n\x05Stats\x12\x13\n\x0b\x65nvironment\x18\x01 \x01(\t\x12\x0e\n\x06source\x18\x02 \x01(\t\x12,\n\x0cmeasurements\x18\x03 \x03(\x0b\x32\x16.gym.Stats.Measurement\x1a\xbb\x02\n\x0bMeasurement\x12\x0c\n\x04name\x18\x01 \x01(\t\x12.\n\x04tags\x18\x02 \x03(\x0b\x32 .gym.Stats.Measurement.TagsEntry\x12\x32\n\x06\x66ields\x18\x03 \x03(\x0b\x32\".gym.Stats.Measurement.FieldsEntry\x1a@\n\x05\x46ield\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x0c\n\x04unit\x18\x03 \x01(\t\x12\r\n\x05value\x18\x04 \x01(\t\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1aK\n\x0b\x46ieldsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12+\n\x05value\x18\x02 \x01(\x0b\x32\x1c.gym.Stats.Measurement.Field:\x02\x38\x01\"\x8d\x01\n\x05State\x12\x0e\n\x06source\x18\x01 \x01(\t\x12$\n\x08messages\x18\x02 \x03(\x0b\x32\x12.gym.State.Content\x12&\n\x02ts\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x1a&\n\x07\x43ontent\x12\x0c\n\x04info\x18\x01 \x01(\t\x12\r\n\x05\x65rror\x18\x02 \x01(\t\"`\n\x06Status\x12\n\n\x02id\x18\x01 \x01(\t\x12\r\n\x05\x65rror\x18\x02 \x01(\t\x12\x0c\n\x04info\x18\x03 \x01(\x0c\x12-\n\ttimestamp\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp2O\n\x06Player\x12\x1d\n\x05Greet\x12\t.gym.Info\x1a\t.gym.Info\x12&\n\nCallLayout\x12\x0b.gym.Layout\x1a\x0b.gym.Result2L\n\x07Manager\x12\x1d\n\x05Greet\x12\t.gym.Info\x1a\t.gym.Info\x12\"\n\x08\x43\x61llTask\x12\t.gym.Task\x1a\x0b.gym.Report2Z\n\x05\x41gent\x12\x1d\n\x05Greet\x12\t.gym.Info\x1a\t.gym.Info\x12\x32\n\x0f\x43\x61llInstruction\x12\x10.gym.Instruction\x1a\r.gym.Snapshot2\\\n\x07Monitor\x12\x1d\n\x05Greet\x12\t.gym.Info\x1a\t.gym.Info\x12\x32\n\x0f\x43\x61llInstruction\x12\x10.gym.Instruction\x1a\r.gym.Snapshot2\'\n\x05Infra\x12\x1e\n\x03Run\x12\x0b.gym.Deploy\x1a\n.gym.Built2L\n\x03\x43LI\x12!\n\x06Inform\x12\n.gym.State\x1a\x0b.gym.Status\x12\"\n\x07\x43ollect\x12\n.gym.Stats\x1a\x0b.gym.Statusb\x06proto3'
,
dependencies=[google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,vnf__br__pb2.DESCRIPTOR,vnf__bd__pb2.DESCRIPTOR,])
_APPARATUS = _descriptor.Descriptor(
name='Apparatus',
full_name='gym.Apparatus',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='agents', full_name='gym.Apparatus.agents', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='monitors', full_name='gym.Apparatus.monitors', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='managers', full_name='gym.Apparatus.managers', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=109,
serialized_end=205,
)
_ARTIFACTS = _descriptor.Descriptor(
name='Artifacts',
full_name='gym.Artifacts',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='probers', full_name='gym.Artifacts.probers', index=0,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='listeners', full_name='gym.Artifacts.listeners', index=1,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=207,
serialized_end=276,
)
_INFO_ENVIRONMENT_CPUENTRY = _descriptor.Descriptor(
name='CpuEntry',
full_name='gym.Info.Environment.CpuEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='gym.Info.Environment.CpuEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='gym.Info.Environment.CpuEntry.value', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=806,
serialized_end=848,
)
_INFO_ENVIRONMENT_MEMORYENTRY = _descriptor.Descriptor(
name='MemoryEntry',
full_name='gym.Info.Environment.MemoryEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='gym.Info.Environment.MemoryEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='gym.Info.Environment.MemoryEntry.value', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=850,
serialized_end=895,
)
_INFO_ENVIRONMENT_DISKENTRY = _descriptor.Descriptor(
name='DiskEntry',
full_name='gym.Info.Environment.DiskEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='gym.Info.Environment.DiskEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='gym.Info.Environment.DiskEntry.value', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=897,
serialized_end=940,
)
_INFO_ENVIRONMENT_NETWORKENTRY = _descriptor.Descriptor(
name='NetworkEntry',
full_name='gym.Info.Environment.NetworkEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='gym.Info.Environment.NetworkEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='gym.Info.Environment.NetworkEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=942,
serialized_end=988,
)
_INFO_ENVIRONMENT = _descriptor.Descriptor(
name='Environment',
full_name='gym.Info.Environment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='system', full_name='gym.Info.Environment.system', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='host', full_name='gym.Info.Environment.host', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='release', full_name='gym.Info.Environment.release', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='version', full_name='gym.Info.Environment.version', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='processor', full_name='gym.Info.Environment.processor', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cpu', full_name='gym.Info.Environment.cpu', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='memory', full_name='gym.Info.Environment.memory', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='disk', full_name='gym.Info.Environment.disk', index=7,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='network', full_name='gym.Info.Environment.network', index=8,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_INFO_ENVIRONMENT_CPUENTRY, _INFO_ENVIRONMENT_MEMORYENTRY, _INFO_ENVIRONMENT_DISKENTRY, _INFO_ENVIRONMENT_NETWORKENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=512,
serialized_end=988,
)
_INFO = _descriptor.Descriptor(
name='Info',
full_name='gym.Info',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='uuid', full_name='gym.Info.uuid', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='role', full_name='gym.Info.role', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='address', full_name='gym.Info.address', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='environment', full_name='gym.Info.environment', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='timestamp', full_name='gym.Info.timestamp', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='artifacts', full_name='gym.Info.artifacts', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='apparatus', full_name='gym.Info.apparatus', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='contacts', full_name='gym.Info.contacts', index=7,
number=8, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_INFO_ENVIRONMENT, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=279,
serialized_end=988,
)
_SCHED = _descriptor.Descriptor(
name='Sched',
full_name='gym.Sched',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='from', full_name='gym.Sched.from', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='until', full_name='gym.Sched.until', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='duration', full_name='gym.Sched.duration', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='interval', full_name='gym.Sched.interval', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='repeat', full_name='gym.Sched.repeat', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=990,
serialized_end=1078,
)
_ACTION_ARGSENTRY = _descriptor.Descriptor(
name='ArgsEntry',
full_name='gym.Action.ArgsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='gym.Action.ArgsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='gym.Action.ArgsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1199,
serialized_end=1242,
)
_ACTION = _descriptor.Descriptor(
name='Action',
full_name='gym.Action',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='gym.Action.id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='instance', full_name='gym.Action.instance', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='gym.Action.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='args', full_name='gym.Action.args', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sched', full_name='gym.Action.sched', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_ACTION_ARGSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1081,
serialized_end=1242,
)
_INSTRUCTION = _descriptor.Descriptor(
name='Instruction',
full_name='gym.Instruction',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='gym.Instruction.id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='trial', full_name='gym.Instruction.trial', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='actions', full_name='gym.Instruction.actions', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1244,
serialized_end=1314,
)
_EVALUATION_SOURCE = _descriptor.Descriptor(
name='Source',
full_name='gym.Evaluation.Source',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='gym.Evaluation.Source.id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='gym.Evaluation.Source.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='type', full_name='gym.Evaluation.Source.type', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='version', full_name='gym.Evaluation.Source.version', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='call', full_name='gym.Evaluation.Source.call', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1525,
serialized_end=1604,
)
_EVALUATION_METRIC = _descriptor.Descriptor(
name='Metric',
full_name='gym.Evaluation.Metric',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='gym.Evaluation.Metric.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='type', full_name='gym.Evaluation.Metric.type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='unit', full_name='gym.Evaluation.Metric.unit', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='scalar', full_name='gym.Evaluation.Metric.scalar', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='series', full_name='gym.Evaluation.Metric.series', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='value', full_name='gym.Evaluation.Metric.value',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=1606,
serialized_end=1726,
)
_EVALUATION_TIMESTAMP = _descriptor.Descriptor(
name='Timestamp',
full_name='gym.Evaluation.Timestamp',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='start', full_name='gym.Evaluation.Timestamp.start', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='stop', full_name='gym.Evaluation.Timestamp.stop', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1728,
serialized_end=1824,
)
_EVALUATION_METRICSENTRY = _descriptor.Descriptor(
name='MetricsEntry',
full_name='gym.Evaluation.MetricsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='gym.Evaluation.MetricsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='gym.Evaluation.MetricsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1826,
serialized_end=1896,
)
_EVALUATION = _descriptor.Descriptor(
name='Evaluation',
full_name='gym.Evaluation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='gym.Evaluation.id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='instance', full_name='gym.Evaluation.instance', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='repeat', full_name='gym.Evaluation.repeat', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='source', full_name='gym.Evaluation.source', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='metrics', full_name='gym.Evaluation.metrics', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='timestamp', full_name='gym.Evaluation.timestamp', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='error', full_name='gym.Evaluation.error', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_EVALUATION_SOURCE, _EVALUATION_METRIC, _EVALUATION_TIMESTAMP, _EVALUATION_METRICSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1317,
serialized_end=1896,
)
_SNAPSHOT_ORIGIN = _descriptor.Descriptor(
name='Origin',
full_name='gym.Snapshot.Origin',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='gym.Snapshot.Origin.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='role', full_name='gym.Snapshot.Origin.role', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='host', full_name='gym.Snapshot.Origin.host', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2091,
serialized_end=2139,
)
_SNAPSHOT_EVALUATIONSENTRY = _descriptor.Descriptor(
name='EvaluationsEntry',
full_name='gym.Snapshot.EvaluationsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='gym.Snapshot.EvaluationsEntry.key', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='gym.Snapshot.EvaluationsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2141,
serialized_end=2208,
)
_SNAPSHOT = _descriptor.Descriptor(
name='Snapshot',
full_name='gym.Snapshot',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='gym.Snapshot.id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='trial', full_name='gym.Snapshot.trial', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='origin', full_name='gym.Snapshot.origin', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='evaluations', full_name='gym.Snapshot.evaluations', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='timestamp', full_name='gym.Snapshot.timestamp', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='error', full_name='gym.Snapshot.error', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_SNAPSHOT_ORIGIN, _SNAPSHOT_EVALUATIONSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1899,
serialized_end=2208,
)
_TOOL_PARAMETERSENTRY = _descriptor.Descriptor(
name='ParametersEntry',
full_name='gym.Tool.ParametersEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='gym.Tool.ParametersEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='gym.Tool.ParametersEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2378,
serialized_end=2427,
)
_TOOL_METRICSENTRY = _descriptor.Descriptor(
name='MetricsEntry',
full_name='gym.Tool.MetricsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='gym.Tool.MetricsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='gym.Tool.MetricsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2429,
serialized_end=2475,
)
_TOOL = _descriptor.Descriptor(
name='Tool',
full_name='gym.Tool',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='gym.Tool.id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='gym.Tool.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='parameters', full_name='gym.Tool.parameters', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='metrics', full_name='gym.Tool.metrics', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sched', full_name='gym.Tool.sched', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='instance', full_name='gym.Tool.instance', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_TOOL_PARAMETERSENTRY, _TOOL_METRICSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2211,
serialized_end=2475,
)
_TASK_AGENT = _descriptor.Descriptor(
name='Agent',
full_name='gym.Task.Agent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='uuid', full_name='gym.Task.Agent.uuid', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='probers', full_name='gym.Task.Agent.probers', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2598,
serialized_end=2647,
)
_TASK_MONITOR = _descriptor.Descriptor(
name='Monitor',
full_name='gym.Task.Monitor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='uuid', full_name='gym.Task.Monitor.uuid', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='listeners', full_name='gym.Task.Monitor.listeners', index=1,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2649,
serialized_end=2702,
)
_TASK = _descriptor.Descriptor(
name='Task',
full_name='gym.Task',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='gym.Task.id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='trials', full_name='gym.Task.trials', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='test', full_name='gym.Task.test', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='agents', full_name='gym.Task.agents', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='monitors', full_name='gym.Task.monitors', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_TASK_AGENT, _TASK_MONITOR, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2478,
serialized_end=2702,
)
_REPORT_SNAPSHOTSENTRY = _descriptor.Descriptor(
name='SnapshotsEntry',
full_name='gym.Report.SnapshotsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='gym.Report.SnapshotsEntry.key', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='gym.Report.SnapshotsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2850,
serialized_end=2913,
)
_REPORT = _descriptor.Descriptor(
name='Report',
full_name='gym.Report',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='gym.Report.id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='test', full_name='gym.Report.test', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='snapshots', full_name='gym.Report.snapshots', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='timestamp', full_name='gym.Report.timestamp', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='error', full_name='gym.Report.error', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_REPORT_SNAPSHOTSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2705,
serialized_end=2913,
)
_LAYOUT = _descriptor.Descriptor(
name='Layout',
full_name='gym.Layout',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='gym.Layout.id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='vnfbr', full_name='gym.Layout.vnfbr', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='feat', full_name='gym.Layout.feat', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2915,
serialized_end=2976,
)
_RESULT = _descriptor.Descriptor(
name='Result',
full_name='gym.Result',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='gym.Result.id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='vnfbr', full_name='gym.Result.vnfbr', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='timestamp', full_name='gym.Result.timestamp', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2978,
serialized_end=3072,
)
_DEPLOY = _descriptor.Descriptor(
name='Deploy',
full_name='gym.Deploy',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='gym.Deploy.id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='workflow', full_name='gym.Deploy.workflow', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='scenario', full_name='gym.Deploy.scenario', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='environment', full_name='gym.Deploy.environment', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3074,
serialized_end=3184,
)
_BUILT = _descriptor.Descriptor(
name='Built',
full_name='gym.Built',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='gym.Built.id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='ack', full_name='gym.Built.ack', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='error', full_name='gym.Built.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='info', full_name='gym.Built.info', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3186,
serialized_end=3247,
)
_STATS_MEASUREMENT_FIELD = _descriptor.Descriptor(
name='Field',
full_name='gym.Stats.Measurement.Field',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='gym.Stats.Measurement.Field.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='type', full_name='gym.Stats.Measurement.Field.type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='unit', full_name='gym.Stats.Measurement.Field.unit', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='gym.Stats.Measurement.Field.value', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3472,
serialized_end=3536,
)
_STATS_MEASUREMENT_TAGSENTRY = _descriptor.Descriptor(
name='TagsEntry',
full_name='gym.Stats.Measurement.TagsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='gym.Stats.Measurement.TagsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='gym.Stats.Measurement.TagsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3538,
serialized_end=3581,
)
_STATS_MEASUREMENT_FIELDSENTRY = _descriptor.Descriptor(
name='FieldsEntry',
full_name='gym.Stats.Measurement.FieldsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='gym.Stats.Measurement.FieldsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='gym.Stats.Measurement.FieldsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3583,
serialized_end=3658,
)
_STATS_MEASUREMENT = _descriptor.Descriptor(
name='Measurement',
full_name='gym.Stats.Measurement',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='gym.Stats.Measurement.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tags', full_name='gym.Stats.Measurement.tags', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='fields', full_name='gym.Stats.Measurement.fields', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_STATS_MEASUREMENT_FIELD, _STATS_MEASUREMENT_TAGSENTRY, _STATS_MEASUREMENT_FIELDSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3343,
serialized_end=3658,
)
_STATS = _descriptor.Descriptor(
name='Stats',
full_name='gym.Stats',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='environment', full_name='gym.Stats.environment', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='source', full_name='gym.Stats.source', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='measurements', full_name='gym.Stats.measurements', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_STATS_MEASUREMENT, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3250,
serialized_end=3658,
)
_STATE_CONTENT = _descriptor.Descriptor(
name='Content',
full_name='gym.State.Content',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='info', full_name='gym.State.Content.info', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='error', full_name='gym.State.Content.error', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3764,
serialized_end=3802,
)
_STATE = _descriptor.Descriptor(
name='State',
full_name='gym.State',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='source', full_name='gym.State.source', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='messages', full_name='gym.State.messages', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='ts', full_name='gym.State.ts', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_STATE_CONTENT, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3661,
serialized_end=3802,
)
_STATUS = _descriptor.Descriptor(
name='Status',
full_name='gym.Status',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='gym.Status.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='error', full_name='gym.Status.error', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='info', full_name='gym.Status.info', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='timestamp', full_name='gym.Status.timestamp', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3804,
serialized_end=3900,
)
_APPARATUS.fields_by_name['agents'].message_type = _INFO
_APPARATUS.fields_by_name['monitors'].message_type = _INFO
_APPARATUS.fields_by_name['managers'].message_type = _INFO
_ARTIFACTS.fields_by_name['probers'].message_type = _TOOL
_ARTIFACTS.fields_by_name['listeners'].message_type = _TOOL
_INFO_ENVIRONMENT_CPUENTRY.containing_type = _INFO_ENVIRONMENT
_INFO_ENVIRONMENT_MEMORYENTRY.containing_type = _INFO_ENVIRONMENT
_INFO_ENVIRONMENT_DISKENTRY.containing_type = _INFO_ENVIRONMENT
_INFO_ENVIRONMENT_NETWORKENTRY.containing_type = _INFO_ENVIRONMENT
_INFO_ENVIRONMENT.fields_by_name['cpu'].message_type = _INFO_ENVIRONMENT_CPUENTRY
_INFO_ENVIRONMENT.fields_by_name['memory'].message_type = _INFO_ENVIRONMENT_MEMORYENTRY
_INFO_ENVIRONMENT.fields_by_name['disk'].message_type = _INFO_ENVIRONMENT_DISKENTRY
_INFO_ENVIRONMENT.fields_by_name['network'].message_type = _INFO_ENVIRONMENT_NETWORKENTRY
_INFO_ENVIRONMENT.containing_type = _INFO
_INFO.fields_by_name['environment'].message_type = _INFO_ENVIRONMENT
_INFO.fields_by_name['timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_INFO.fields_by_name['artifacts'].message_type = _ARTIFACTS
_INFO.fields_by_name['apparatus'].message_type = _APPARATUS
_ACTION_ARGSENTRY.containing_type = _ACTION
_ACTION.fields_by_name['args'].message_type = _ACTION_ARGSENTRY
_ACTION.fields_by_name['sched'].message_type = _SCHED
_INSTRUCTION.fields_by_name['actions'].message_type = _ACTION
_EVALUATION_SOURCE.containing_type = _EVALUATION
_EVALUATION_METRIC.fields_by_name['series'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_EVALUATION_METRIC.containing_type = _EVALUATION
_EVALUATION_METRIC.oneofs_by_name['value'].fields.append(
_EVALUATION_METRIC.fields_by_name['scalar'])
_EVALUATION_METRIC.fields_by_name['scalar'].containing_oneof = _EVALUATION_METRIC.oneofs_by_name['value']
_EVALUATION_METRIC.oneofs_by_name['value'].fields.append(
_EVALUATION_METRIC.fields_by_name['series'])
_EVALUATION_METRIC.fields_by_name['series'].containing_oneof = _EVALUATION_METRIC.oneofs_by_name['value']
_EVALUATION_TIMESTAMP.fields_by_name['start'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_EVALUATION_TIMESTAMP.fields_by_name['stop'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_EVALUATION_TIMESTAMP.containing_type = _EVALUATION
_EVALUATION_METRICSENTRY.fields_by_name['value'].message_type = _EVALUATION_METRIC
_EVALUATION_METRICSENTRY.containing_type = _EVALUATION
_EVALUATION.fields_by_name['source'].message_type = _EVALUATION_SOURCE
_EVALUATION.fields_by_name['metrics'].message_type = _EVALUATION_METRICSENTRY
_EVALUATION.fields_by_name['timestamp'].message_type = _EVALUATION_TIMESTAMP
_SNAPSHOT_ORIGIN.containing_type = _SNAPSHOT
_SNAPSHOT_EVALUATIONSENTRY.fields_by_name['value'].message_type = _EVALUATION
_SNAPSHOT_EVALUATIONSENTRY.containing_type = _SNAPSHOT
_SNAPSHOT.fields_by_name['origin'].message_type = _SNAPSHOT_ORIGIN
_SNAPSHOT.fields_by_name['evaluations'].message_type = _SNAPSHOT_EVALUATIONSENTRY
_SNAPSHOT.fields_by_name['timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_TOOL_PARAMETERSENTRY.containing_type = _TOOL
_TOOL_METRICSENTRY.containing_type = _TOOL
_TOOL.fields_by_name['parameters'].message_type = _TOOL_PARAMETERSENTRY
_TOOL.fields_by_name['metrics'].message_type = _TOOL_METRICSENTRY
_TOOL.fields_by_name['sched'].message_type = _SCHED
_TASK_AGENT.fields_by_name['probers'].message_type = _TOOL
_TASK_AGENT.containing_type = _TASK
_TASK_MONITOR.fields_by_name['listeners'].message_type = _TOOL
_TASK_MONITOR.containing_type = _TASK
_TASK.fields_by_name['agents'].message_type = _TASK_AGENT
_TASK.fields_by_name['monitors'].message_type = _TASK_MONITOR
_REPORT_SNAPSHOTSENTRY.fields_by_name['value'].message_type = _SNAPSHOT
_REPORT_SNAPSHOTSENTRY.containing_type = _REPORT
_REPORT.fields_by_name['snapshots'].message_type = _REPORT_SNAPSHOTSENTRY
_REPORT.fields_by_name['timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_LAYOUT.fields_by_name['vnfbr'].message_type = vnf__br__pb2._VNFBR
_RESULT.fields_by_name['vnfbr'].message_type = vnf__br__pb2._VNFBR
_RESULT.fields_by_name['timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_DEPLOY.fields_by_name['scenario'].message_type = vnf__bd__pb2._SCENARIO
_DEPLOY.fields_by_name['environment'].message_type = vnf__br__pb2._ENVIRONMENT
_STATS_MEASUREMENT_FIELD.containing_type = _STATS_MEASUREMENT
_STATS_MEASUREMENT_TAGSENTRY.containing_type = _STATS_MEASUREMENT
_STATS_MEASUREMENT_FIELDSENTRY.fields_by_name['value'].message_type = _STATS_MEASUREMENT_FIELD
_STATS_MEASUREMENT_FIELDSENTRY.containing_type = _STATS_MEASUREMENT
_STATS_MEASUREMENT.fields_by_name['tags'].message_type = _STATS_MEASUREMENT_TAGSENTRY
_STATS_MEASUREMENT.fields_by_name['fields'].message_type = _STATS_MEASUREMENT_FIELDSENTRY
_STATS_MEASUREMENT.containing_type = _STATS
_STATS.fields_by_name['measurements'].message_type = _STATS_MEASUREMENT
_STATE_CONTENT.containing_type = _STATE
_STATE.fields_by_name['messages'].message_type = _STATE_CONTENT
_STATE.fields_by_name['ts'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_STATUS.fields_by_name['timestamp'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
DESCRIPTOR.message_types_by_name['Apparatus'] = _APPARATUS
DESCRIPTOR.message_types_by_name['Artifacts'] = _ARTIFACTS
DESCRIPTOR.message_types_by_name['Info'] = _INFO
DESCRIPTOR.message_types_by_name['Sched'] = _SCHED
DESCRIPTOR.message_types_by_name['Action'] = _ACTION
DESCRIPTOR.message_types_by_name['Instruction'] = _INSTRUCTION
DESCRIPTOR.message_types_by_name['Evaluation'] = _EVALUATION
DESCRIPTOR.message_types_by_name['Snapshot'] = _SNAPSHOT
DESCRIPTOR.message_types_by_name['Tool'] = _TOOL
DESCRIPTOR.message_types_by_name['Task'] = _TASK
DESCRIPTOR.message_types_by_name['Report'] = _REPORT
DESCRIPTOR.message_types_by_name['Layout'] = _LAYOUT
DESCRIPTOR.message_types_by_name['Result'] = _RESULT
DESCRIPTOR.message_types_by_name['Deploy'] = _DEPLOY
DESCRIPTOR.message_types_by_name['Built'] = _BUILT
DESCRIPTOR.message_types_by_name['Stats'] = _STATS
DESCRIPTOR.message_types_by_name['State'] = _STATE
DESCRIPTOR.message_types_by_name['Status'] = _STATUS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Apparatus = _reflection.GeneratedProtocolMessageType('Apparatus', (_message.Message,), {
'DESCRIPTOR' : _APPARATUS,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Apparatus)
})
_sym_db.RegisterMessage(Apparatus)
Artifacts = _reflection.GeneratedProtocolMessageType('Artifacts', (_message.Message,), {
'DESCRIPTOR' : _ARTIFACTS,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Artifacts)
})
_sym_db.RegisterMessage(Artifacts)
Info = _reflection.GeneratedProtocolMessageType('Info', (_message.Message,), {
'Environment' : _reflection.GeneratedProtocolMessageType('Environment', (_message.Message,), {
'CpuEntry' : _reflection.GeneratedProtocolMessageType('CpuEntry', (_message.Message,), {
'DESCRIPTOR' : _INFO_ENVIRONMENT_CPUENTRY,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Info.Environment.CpuEntry)
})
,
'MemoryEntry' : _reflection.GeneratedProtocolMessageType('MemoryEntry', (_message.Message,), {
'DESCRIPTOR' : _INFO_ENVIRONMENT_MEMORYENTRY,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Info.Environment.MemoryEntry)
})
,
'DiskEntry' : _reflection.GeneratedProtocolMessageType('DiskEntry', (_message.Message,), {
'DESCRIPTOR' : _INFO_ENVIRONMENT_DISKENTRY,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Info.Environment.DiskEntry)
})
,
'NetworkEntry' : _reflection.GeneratedProtocolMessageType('NetworkEntry', (_message.Message,), {
'DESCRIPTOR' : _INFO_ENVIRONMENT_NETWORKENTRY,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Info.Environment.NetworkEntry)
})
,
'DESCRIPTOR' : _INFO_ENVIRONMENT,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Info.Environment)
})
,
'DESCRIPTOR' : _INFO,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Info)
})
_sym_db.RegisterMessage(Info)
_sym_db.RegisterMessage(Info.Environment)
_sym_db.RegisterMessage(Info.Environment.CpuEntry)
_sym_db.RegisterMessage(Info.Environment.MemoryEntry)
_sym_db.RegisterMessage(Info.Environment.DiskEntry)
_sym_db.RegisterMessage(Info.Environment.NetworkEntry)
Sched = _reflection.GeneratedProtocolMessageType('Sched', (_message.Message,), {
'DESCRIPTOR' : _SCHED,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Sched)
})
_sym_db.RegisterMessage(Sched)
Action = _reflection.GeneratedProtocolMessageType('Action', (_message.Message,), {
'ArgsEntry' : _reflection.GeneratedProtocolMessageType('ArgsEntry', (_message.Message,), {
'DESCRIPTOR' : _ACTION_ARGSENTRY,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Action.ArgsEntry)
})
,
'DESCRIPTOR' : _ACTION,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Action)
})
_sym_db.RegisterMessage(Action)
_sym_db.RegisterMessage(Action.ArgsEntry)
Instruction = _reflection.GeneratedProtocolMessageType('Instruction', (_message.Message,), {
'DESCRIPTOR' : _INSTRUCTION,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Instruction)
})
_sym_db.RegisterMessage(Instruction)
Evaluation = _reflection.GeneratedProtocolMessageType('Evaluation', (_message.Message,), {
'Source' : _reflection.GeneratedProtocolMessageType('Source', (_message.Message,), {
'DESCRIPTOR' : _EVALUATION_SOURCE,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Evaluation.Source)
})
,
'Metric' : _reflection.GeneratedProtocolMessageType('Metric', (_message.Message,), {
'DESCRIPTOR' : _EVALUATION_METRIC,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Evaluation.Metric)
})
,
'Timestamp' : _reflection.GeneratedProtocolMessageType('Timestamp', (_message.Message,), {
'DESCRIPTOR' : _EVALUATION_TIMESTAMP,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Evaluation.Timestamp)
})
,
'MetricsEntry' : _reflection.GeneratedProtocolMessageType('MetricsEntry', (_message.Message,), {
'DESCRIPTOR' : _EVALUATION_METRICSENTRY,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Evaluation.MetricsEntry)
})
,
'DESCRIPTOR' : _EVALUATION,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Evaluation)
})
_sym_db.RegisterMessage(Evaluation)
_sym_db.RegisterMessage(Evaluation.Source)
_sym_db.RegisterMessage(Evaluation.Metric)
_sym_db.RegisterMessage(Evaluation.Timestamp)
_sym_db.RegisterMessage(Evaluation.MetricsEntry)
Snapshot = _reflection.GeneratedProtocolMessageType('Snapshot', (_message.Message,), {
'Origin' : _reflection.GeneratedProtocolMessageType('Origin', (_message.Message,), {
'DESCRIPTOR' : _SNAPSHOT_ORIGIN,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Snapshot.Origin)
})
,
'EvaluationsEntry' : _reflection.GeneratedProtocolMessageType('EvaluationsEntry', (_message.Message,), {
'DESCRIPTOR' : _SNAPSHOT_EVALUATIONSENTRY,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Snapshot.EvaluationsEntry)
})
,
'DESCRIPTOR' : _SNAPSHOT,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Snapshot)
})
_sym_db.RegisterMessage(Snapshot)
_sym_db.RegisterMessage(Snapshot.Origin)
_sym_db.RegisterMessage(Snapshot.EvaluationsEntry)
Tool = _reflection.GeneratedProtocolMessageType('Tool', (_message.Message,), {
'ParametersEntry' : _reflection.GeneratedProtocolMessageType('ParametersEntry', (_message.Message,), {
'DESCRIPTOR' : _TOOL_PARAMETERSENTRY,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Tool.ParametersEntry)
})
,
'MetricsEntry' : _reflection.GeneratedProtocolMessageType('MetricsEntry', (_message.Message,), {
'DESCRIPTOR' : _TOOL_METRICSENTRY,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Tool.MetricsEntry)
})
,
'DESCRIPTOR' : _TOOL,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Tool)
})
_sym_db.RegisterMessage(Tool)
_sym_db.RegisterMessage(Tool.ParametersEntry)
_sym_db.RegisterMessage(Tool.MetricsEntry)
Task = _reflection.GeneratedProtocolMessageType('Task', (_message.Message,), {
'Agent' : _reflection.GeneratedProtocolMessageType('Agent', (_message.Message,), {
'DESCRIPTOR' : _TASK_AGENT,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Task.Agent)
})
,
'Monitor' : _reflection.GeneratedProtocolMessageType('Monitor', (_message.Message,), {
'DESCRIPTOR' : _TASK_MONITOR,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Task.Monitor)
})
,
'DESCRIPTOR' : _TASK,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Task)
})
_sym_db.RegisterMessage(Task)
_sym_db.RegisterMessage(Task.Agent)
_sym_db.RegisterMessage(Task.Monitor)
Report = _reflection.GeneratedProtocolMessageType('Report', (_message.Message,), {
'SnapshotsEntry' : _reflection.GeneratedProtocolMessageType('SnapshotsEntry', (_message.Message,), {
'DESCRIPTOR' : _REPORT_SNAPSHOTSENTRY,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Report.SnapshotsEntry)
})
,
'DESCRIPTOR' : _REPORT,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Report)
})
_sym_db.RegisterMessage(Report)
_sym_db.RegisterMessage(Report.SnapshotsEntry)
Layout = _reflection.GeneratedProtocolMessageType('Layout', (_message.Message,), {
'DESCRIPTOR' : _LAYOUT,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Layout)
})
_sym_db.RegisterMessage(Layout)
Result = _reflection.GeneratedProtocolMessageType('Result', (_message.Message,), {
'DESCRIPTOR' : _RESULT,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Result)
})
_sym_db.RegisterMessage(Result)
Deploy = _reflection.GeneratedProtocolMessageType('Deploy', (_message.Message,), {
'DESCRIPTOR' : _DEPLOY,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Deploy)
})
_sym_db.RegisterMessage(Deploy)
Built = _reflection.GeneratedProtocolMessageType('Built', (_message.Message,), {
'DESCRIPTOR' : _BUILT,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Built)
})
_sym_db.RegisterMessage(Built)
Stats = _reflection.GeneratedProtocolMessageType('Stats', (_message.Message,), {
'Measurement' : _reflection.GeneratedProtocolMessageType('Measurement', (_message.Message,), {
'Field' : _reflection.GeneratedProtocolMessageType('Field', (_message.Message,), {
'DESCRIPTOR' : _STATS_MEASUREMENT_FIELD,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Stats.Measurement.Field)
})
,
'TagsEntry' : _reflection.GeneratedProtocolMessageType('TagsEntry', (_message.Message,), {
'DESCRIPTOR' : _STATS_MEASUREMENT_TAGSENTRY,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Stats.Measurement.TagsEntry)
})
,
'FieldsEntry' : _reflection.GeneratedProtocolMessageType('FieldsEntry', (_message.Message,), {
'DESCRIPTOR' : _STATS_MEASUREMENT_FIELDSENTRY,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Stats.Measurement.FieldsEntry)
})
,
'DESCRIPTOR' : _STATS_MEASUREMENT,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Stats.Measurement)
})
,
'DESCRIPTOR' : _STATS,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Stats)
})
_sym_db.RegisterMessage(Stats)
_sym_db.RegisterMessage(Stats.Measurement)
_sym_db.RegisterMessage(Stats.Measurement.Field)
_sym_db.RegisterMessage(Stats.Measurement.TagsEntry)
_sym_db.RegisterMessage(Stats.Measurement.FieldsEntry)
State = _reflection.GeneratedProtocolMessageType('State', (_message.Message,), {
'Content' : _reflection.GeneratedProtocolMessageType('Content', (_message.Message,), {
'DESCRIPTOR' : _STATE_CONTENT,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.State.Content)
})
,
'DESCRIPTOR' : _STATE,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.State)
})
_sym_db.RegisterMessage(State)
_sym_db.RegisterMessage(State.Content)
Status = _reflection.GeneratedProtocolMessageType('Status', (_message.Message,), {
'DESCRIPTOR' : _STATUS,
'__module__' : 'gym_pb2'
# @@protoc_insertion_point(class_scope:gym.Status)
})
_sym_db.RegisterMessage(Status)
_INFO_ENVIRONMENT_CPUENTRY._options = None
_INFO_ENVIRONMENT_MEMORYENTRY._options = None
_INFO_ENVIRONMENT_DISKENTRY._options = None
_INFO_ENVIRONMENT_NETWORKENTRY._options = None
_ACTION_ARGSENTRY._options = None
_EVALUATION_METRICSENTRY._options = None
_SNAPSHOT_EVALUATIONSENTRY._options = None
_TOOL_PARAMETERSENTRY._options = None
_TOOL_METRICSENTRY._options = None
_REPORT_SNAPSHOTSENTRY._options = None
_STATS_MEASUREMENT_TAGSENTRY._options = None
_STATS_MEASUREMENT_FIELDSENTRY._options = None
_PLAYER = _descriptor.ServiceDescriptor(
name='Player',
full_name='gym.Player',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=3902,
serialized_end=3981,
methods=[
_descriptor.MethodDescriptor(
name='Greet',
full_name='gym.Player.Greet',
index=0,
containing_service=None,
input_type=_INFO,
output_type=_INFO,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='CallLayout',
full_name='gym.Player.CallLayout',
index=1,
containing_service=None,
input_type=_LAYOUT,
output_type=_RESULT,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_PLAYER)
DESCRIPTOR.services_by_name['Player'] = _PLAYER
_MANAGER = _descriptor.ServiceDescriptor(
name='Manager',
full_name='gym.Manager',
file=DESCRIPTOR,
index=1,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=3983,
serialized_end=4059,
methods=[
_descriptor.MethodDescriptor(
name='Greet',
full_name='gym.Manager.Greet',
index=0,
containing_service=None,
input_type=_INFO,
output_type=_INFO,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='CallTask',
full_name='gym.Manager.CallTask',
index=1,
containing_service=None,
input_type=_TASK,
output_type=_REPORT,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_MANAGER)
DESCRIPTOR.services_by_name['Manager'] = _MANAGER
_AGENT = _descriptor.ServiceDescriptor(
name='Agent',
full_name='gym.Agent',
file=DESCRIPTOR,
index=2,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=4061,
serialized_end=4151,
methods=[
_descriptor.MethodDescriptor(
name='Greet',
full_name='gym.Agent.Greet',
index=0,
containing_service=None,
input_type=_INFO,
output_type=_INFO,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='CallInstruction',
full_name='gym.Agent.CallInstruction',
index=1,
containing_service=None,
input_type=_INSTRUCTION,
output_type=_SNAPSHOT,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_AGENT)
DESCRIPTOR.services_by_name['Agent'] = _AGENT
_MONITOR = _descriptor.ServiceDescriptor(
name='Monitor',
full_name='gym.Monitor',
file=DESCRIPTOR,
index=3,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=4153,
serialized_end=4245,
methods=[
_descriptor.MethodDescriptor(
name='Greet',
full_name='gym.Monitor.Greet',
index=0,
containing_service=None,
input_type=_INFO,
output_type=_INFO,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='CallInstruction',
full_name='gym.Monitor.CallInstruction',
index=1,
containing_service=None,
input_type=_INSTRUCTION,
output_type=_SNAPSHOT,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_MONITOR)
DESCRIPTOR.services_by_name['Monitor'] = _MONITOR
_INFRA = _descriptor.ServiceDescriptor(
name='Infra',
full_name='gym.Infra',
file=DESCRIPTOR,
index=4,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=4247,
serialized_end=4286,
methods=[
_descriptor.MethodDescriptor(
name='Run',
full_name='gym.Infra.Run',
index=0,
containing_service=None,
input_type=_DEPLOY,
output_type=_BUILT,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_INFRA)
DESCRIPTOR.services_by_name['Infra'] = _INFRA
_CLI = _descriptor.ServiceDescriptor(
name='CLI',
full_name='gym.CLI',
file=DESCRIPTOR,
index=5,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=4288,
serialized_end=4364,
methods=[
_descriptor.MethodDescriptor(
name='Inform',
full_name='gym.CLI.Inform',
index=0,
containing_service=None,
input_type=_STATE,
output_type=_STATUS,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Collect',
full_name='gym.CLI.Collect',
index=1,
containing_service=None,
input_type=_STATS,
output_type=_STATUS,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_CLI)
DESCRIPTOR.services_by_name['CLI'] = _CLI
# @@protoc_insertion_point(module_scope)
| 40.885233 | 8,284 | 0.743127 | [
"Apache-2.0"
] | raphaelvrosa/gym | gym/common/protobuf/gym_pb2.py | 106,874 | Python |
# led_hello.py - blink external LED to test GPIO pins
# (c) BotBook.com - Karvinen, Karvinen, Valtokari
"led_hello.py - light a LED using Raspberry Pi GPIO"
# Copyright 2013 http://Botbook.com */
import time # <1>
import os # <2>
def writeFile(filename, contents): # <3>
with open(filename, 'w') as f: # <4>
f.write(contents) # <5>
# main
print "Blinking LED on GPIO 27 once..." # <6>
if not os.path.isfile("/sys/class/gpio/gpio27/direction"): # <7>
writeFile("/sys/class/gpio/export", "27") # <8>
writeFile("/sys/class/gpio/gpio27/direction", "out") # <9>
writeFile("/sys/class/gpio/gpio27/value", "1") # <10>
time.sleep(2) # seconds # <11>
writeFile("/sys/class/gpio/gpio27/value", "0") # <12>
| 25.392857 | 64 | 0.655415 | [
"MIT"
] | AlexRogalskiy/Duino | getting-started-code-101/raspberrypi/led_hello/led_hello.py | 711 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from Nets.UNetBatchNorm import UNetBatchNorm
import tensorflow as tf
import numpy as np
from sklearn.metrics import mean_squared_error
from datetime import datetime
from optparse import OptionParser
from Data.ImageTransform import ListTransform
from Data.DataGenClass import DataGen3, DataGenMulti, DataGen3reduce
from Nets.DataReadDecode import read_and_decode
import pdb
from skimage.io import imsave
from os.path import join
import os
from utils import CheckOrCreate
class UNetDistance(UNetBatchNorm):
def __init__(
self,
TF_RECORDS,
LEARNING_RATE=0.01,
K=0.96,
BATCH_SIZE=10,
IMAGE_SIZE=28,
NUM_CHANNELS=1,
NUM_TEST=10000,
STEPS=2000,
LRSTEP=200,
DECAY_EMA=0.9999,
N_PRINT = 100,
LOG="/tmp/net",
SEED=42,
DEBUG=True,
WEIGHT_DECAY=0.00005,
LOSS_FUNC=tf.nn.l2_loss,
N_FEATURES=16,
N_EPOCH=1,
N_THREADS=1,
MEAN_FILE=None,
DROPOUT=0.5):
self.LEARNING_RATE = LEARNING_RATE
self.K = K
self.BATCH_SIZE = BATCH_SIZE
self.IMAGE_SIZE = IMAGE_SIZE
self.NUM_CHANNELS = NUM_CHANNELS
self.N_FEATURES = N_FEATURES
# self.NUM_TEST = NUM_TEST
self.STEPS = STEPS
self.N_PRINT = N_PRINT
self.LRSTEP = LRSTEP
self.DECAY_EMA = DECAY_EMA
self.LOG = LOG
self.SEED = SEED
self.N_EPOCH = N_EPOCH
self.N_THREADS = N_THREADS
self.DROPOUT = DROPOUT
if MEAN_FILE is not None:
MEAN_ARRAY = tf.constant(np.load(MEAN_FILE), dtype=tf.float32) # (3)
self.MEAN_ARRAY = tf.reshape(MEAN_ARRAY, [1, 1, 3])
self.SUB_MEAN = True
else:
self.SUB_MEAN = False
self.sess = tf.InteractiveSession()
self.sess.as_default()
self.var_to_reg = []
self.var_to_sum = []
self.TF_RECORDS = TF_RECORDS
self.init_queue(TF_RECORDS)
self.init_vars()
self.init_model_architecture()
self.init_training_graph()
self.Saver()
self.DEBUG = DEBUG
self.loss_func = LOSS_FUNC
self.weight_decay = WEIGHT_DECAY
def init_queue(self, tfrecords_filename):
self.filename_queue = tf.train.string_input_producer(
[tfrecords_filename], num_epochs=10)
with tf.device('/cpu:0'):
self.image, self.annotation = read_and_decode(self.filename_queue,
self.IMAGE_SIZE[0],
self.IMAGE_SIZE[1],
self.BATCH_SIZE,
self.N_THREADS,
True,
self.NUM_CHANNELS)
#self.annotation = tf.divide(self.annotation, 255.)
print("Queue initialized")
def input_node_f(self):
if self.SUB_MEAN:
self.images_queue = self.image - self.MEAN_ARRAY
else:
self.images_queue = self.image
self.image_PH = tf.placeholder_with_default(self.images_queue, shape=[None,
None,
None,
3])
return self.image_PH
def label_node_f(self):
self.labels_queue = self.annotation
self.labels_PH = tf.placeholder_with_default(self.labels_queue, shape=[None,
None,
None,
1])
return self.labels_PH
def init_training_graph(self):
with tf.name_scope('Evaluation'):
# self.logits = self.conv_layer_f(self.last, self.logits_weight, strides=[1,1,1,1], scope_name="logits/")
with tf.name_scope("logits/"):
self.logits2 = tf.nn.conv2d(self.last, self.logits_weight, strides=[1,1,1,1], padding="VALID")
self.logits = tf.nn.bias_add(self.logits2, self.logits_biases)
self.predictions = self.logits
#self.predictions = tf.squeeze(self.logits, [3])
#softmax = tf.nn.softmax(self.logits)
#print softmax.get_shape()
#self.predictions = tf.slice(softmax, [0, 0, 0, 0], [-1, -1, -1, 1])
with tf.name_scope('Loss'):
self.loss = tf.reduce_mean(tf.losses.mean_squared_error(self.logits, self.train_labels_node))
#self.loss = tf.reduce_mean(tf.losses.mean_squared_error(self.predictions, self.train_labels_node))
tf.summary.scalar("mean_squared_error", self.loss)
self.predictions = tf.squeeze(self.predictions, [3])
self.train_prediction = self.predictions
self.test_prediction = self.predictions
tf.global_variables_initializer().run()
print('Computational graph initialised')
def init_vars(self):
#### had to add is_training, self.reuse
self.is_training = tf.placeholder_with_default(True, shape=[])
####
self.input_node = self.input_node_f()
self.train_labels_node = self.label_node_f()
n_features = self.N_FEATURES
self.conv1_1weights = self.weight_xavier(3, self.NUM_CHANNELS, n_features, "conv1_1/")
self.conv1_1biases = self.biases_const_f(0.1, n_features, "conv1_1/")
self.conv1_2weights = self.weight_xavier(3, n_features, n_features, "conv1_2/")
self.conv1_2biases = self.biases_const_f(0.1, n_features, "conv1_2/")
self.conv1_3weights = self.weight_xavier(3, 2 * n_features, n_features, "conv1_3/")
self.conv1_3biases = self.biases_const_f(0.1, n_features, "conv1_3/")
self.conv1_4weights = self.weight_xavier(3, n_features, n_features, "conv1_4/")
self.conv1_4biases = self.biases_const_f(0.1, n_features, "conv1_4/")
self.conv2_1weights = self.weight_xavier(3, n_features, 2 * n_features, "conv2_1/")
self.conv2_1biases = self.biases_const_f(0.1, 2 * n_features, "conv2_1/")
self.conv2_2weights = self.weight_xavier(3, 2 * n_features, 2 * n_features, "conv2_2/")
self.conv2_2biases = self.biases_const_f(0.1, 2 * n_features, "conv2_2/")
self.conv2_3weights = self.weight_xavier(3, 4 * n_features, 2 * n_features, "conv2_3/")
self.conv2_3biases = self.biases_const_f(0.1, 2 * n_features, "conv2_3/")
self.conv2_4weights = self.weight_xavier(3, 2 * n_features, 2 * n_features, "conv2_4/")
self.conv2_4biases = self.biases_const_f(0.1, 2 * n_features, "conv2_4/")
self.conv3_1weights = self.weight_xavier(3, 2 * n_features, 4 * n_features, "conv3_1/")
self.conv3_1biases = self.biases_const_f(0.1, 4 * n_features, "conv3_1/")
self.conv3_2weights = self.weight_xavier(3, 4 * n_features, 4 * n_features, "conv3_2/")
self.conv3_2biases = self.biases_const_f(0.1, 4 * n_features, "conv3_2/")
self.conv3_3weights = self.weight_xavier(3, 8 * n_features, 4 * n_features, "conv3_3/")
self.conv3_3biases = self.biases_const_f(0.1, 4 * n_features, "conv3_3/")
self.conv3_4weights = self.weight_xavier(3, 4 * n_features, 4 * n_features, "conv3_4/")
self.conv3_4biases = self.biases_const_f(0.1, 4 * n_features, "conv3_4/")
self.conv4_1weights = self.weight_xavier(3, 4 * n_features, 8 * n_features, "conv4_1/")
self.conv4_1biases = self.biases_const_f(0.1, 8 * n_features, "conv4_1/")
self.conv4_2weights = self.weight_xavier(3, 8 * n_features, 8 * n_features, "conv4_2/")
self.conv4_2biases = self.biases_const_f(0.1, 8 * n_features, "conv4_2/")
self.conv4_3weights = self.weight_xavier(3, 16 * n_features, 8 * n_features, "conv4_3/")
self.conv4_3biases = self.biases_const_f(0.1, 8 * n_features, "conv4_3/")
self.conv4_4weights = self.weight_xavier(3, 8 * n_features, 8 * n_features, "conv4_4/")
self.conv4_4biases = self.biases_const_f(0.1, 8 * n_features, "conv4_4/")
self.conv5_1weights = self.weight_xavier(3, 8 * n_features, 16 * n_features, "conv5_1/")
self.conv5_1biases = self.biases_const_f(0.1, 16 * n_features, "conv5_1/")
self.conv5_2weights = self.weight_xavier(3, 16 * n_features, 16 * n_features, "conv5_2/")
self.conv5_2biases = self.biases_const_f(0.1, 16 * n_features, "conv5_2/")
self.tconv5_4weights = self.weight_xavier(2, 8 * n_features, 16 * n_features, "tconv5_4/")
self.tconv5_4biases = self.biases_const_f(0.1, 8 * n_features, "tconv5_4/")
self.tconv4_3weights = self.weight_xavier(2, 4 * n_features, 8 * n_features, "tconv4_3/")
self.tconv4_3biases = self.biases_const_f(0.1, 4 * n_features, "tconv4_3/")
self.tconv3_2weights = self.weight_xavier(2, 2 * n_features, 4 * n_features, "tconv3_2/")
self.tconv3_2biases = self.biases_const_f(0.1, 2 * n_features, "tconv3_2/")
self.tconv2_1weights = self.weight_xavier(2, n_features, 2 * n_features, "tconv2_1/")
self.tconv2_1biases = self.biases_const_f(0.1, n_features, "tconv2_1/")
self.logits_weight = self.weight_xavier(1, n_features, 1, "logits/")
self.logits_biases = self.biases_const_f(0.1, 1, "logits/")
self.keep_prob = tf.Variable(self.DROPOUT, name="dropout_prob")
def init_model_architecture(self):
self.conv1_1 = self.conv_layer_f(self.input_node, self.conv1_1weights, "conv1_1/")
self.relu1_1 = self.relu_layer_f(self.conv1_1, self.conv1_1biases, "conv1_1/")
self.conv1_2 = self.conv_layer_f(self.relu1_1, self.conv1_2weights, "conv1_2/")
self.relu1_2 = self.relu_layer_f(self.conv1_2, self.conv1_2biases, "conv1_2/")
self.pool1_2 = self.max_pool(self.relu1_2, name="pool1_2")
self.conv2_1 = self.conv_layer_f(self.pool1_2, self.conv2_1weights, "conv2_1/")
self.relu2_1 = self.relu_layer_f(self.conv2_1, self.conv2_1biases, "conv2_1/")
self.conv2_2 = self.conv_layer_f(self.relu2_1, self.conv2_2weights, "conv2_2/")
self.relu2_2 = self.relu_layer_f(self.conv2_2, self.conv2_2biases, "conv2_2/")
self.pool2_3 = self.max_pool(self.relu2_2, name="pool2_3")
self.conv3_1 = self.conv_layer_f(self.pool2_3, self.conv3_1weights, "conv3_1/")
self.relu3_1 = self.relu_layer_f(self.conv3_1, self.conv3_1biases, "conv3_1/")
self.conv3_2 = self.conv_layer_f(self.relu3_1, self.conv3_2weights, "conv3_2/")
self.relu3_2 = self.relu_layer_f(self.conv3_2, self.conv3_2biases, "conv3_2/")
self.pool3_4 = self.max_pool(self.relu3_2, name="pool3_4")
self.conv4_1 = self.conv_layer_f(self.pool3_4, self.conv4_1weights, "conv4_1/")
self.relu4_1 = self.relu_layer_f(self.conv4_1, self.conv4_1biases, "conv4_1/")
self.conv4_2 = self.conv_layer_f(self.relu4_1, self.conv4_2weights, "conv4_2/")
self.relu4_2 = self.relu_layer_f(self.conv4_2, self.conv4_2biases, "conv4_2/")
self.pool4_5 = self.max_pool(self.relu4_2, name="pool4_5")
self.conv5_1 = self.conv_layer_f(self.pool4_5, self.conv5_1weights, "conv5_1/")
self.relu5_1 = self.relu_layer_f(self.conv5_1, self.conv5_1biases, "conv5_1/")
self.conv5_2 = self.conv_layer_f(self.relu5_1, self.conv5_2weights, "conv5_2/")
self.relu5_2 = self.relu_layer_f(self.conv5_2, self.conv5_2biases, "conv5_2/")
self.tconv5_4 = self.transposeconv_layer_f(self.relu5_2, self.tconv5_4weights, "tconv5_4/")
self.trelu5_4 = self.relu_layer_f(self.tconv5_4, self.tconv5_4biases, "tconv5_4/")
self.bridge4 = self.CropAndMerge(self.relu4_2, self.trelu5_4, "bridge4")
self.conv4_3 = self.conv_layer_f(self.bridge4, self.conv4_3weights, "conv4_3/")
self.relu4_3 = self.relu_layer_f(self.conv4_3, self.conv4_3biases, "conv4_3/")
self.conv4_4 = self.conv_layer_f(self.relu4_3, self.conv4_4weights, "conv4_4/")
self.relu4_4 = self.relu_layer_f(self.conv4_4, self.conv4_4biases, "conv4_4/")
self.tconv4_3 = self.transposeconv_layer_f(self.relu4_4, self.tconv4_3weights, "tconv4_3/")
self.trelu4_3 = self.relu_layer_f(self.tconv4_3, self.tconv4_3biases, "tconv4_3/")
self.bridge3 = self.CropAndMerge(self.relu3_2, self.trelu4_3, "bridge3")
self.conv3_3 = self.conv_layer_f(self.bridge3, self.conv3_3weights, "conv3_3/")
self.relu3_3 = self.relu_layer_f(self.conv3_3, self.conv3_3biases, "conv3_3/")
self.conv3_4 = self.conv_layer_f(self.relu3_3, self.conv3_4weights, "conv3_4/")
self.relu3_4 = self.relu_layer_f(self.conv3_4, self.conv3_4biases, "conv3_4/")
self.tconv3_2 = self.transposeconv_layer_f(self.relu3_4, self.tconv3_2weights, "tconv3_2/")
self.trelu3_2 = self.relu_layer_f(self.tconv3_2, self.tconv3_2biases, "tconv3_2/")
self.bridge2 = self.CropAndMerge(self.relu2_2, self.trelu3_2, "bridge2")
self.conv2_3 = self.conv_layer_f(self.bridge2, self.conv2_3weights, "conv2_3/")
self.relu2_3 = self.relu_layer_f(self.conv2_3, self.conv2_3biases, "conv2_3/")
self.conv2_4 = self.conv_layer_f(self.relu2_3, self.conv2_4weights, "conv2_4/")
self.relu2_4 = self.relu_layer_f(self.conv2_4, self.conv2_4biases, "conv2_4/")
self.tconv2_1 = self.transposeconv_layer_f(self.relu2_4, self.tconv2_1weights, "tconv2_1/")
self.trelu2_1 = self.relu_layer_f(self.tconv2_1, self.tconv2_1biases, "tconv2_1/")
self.bridge1 = self.CropAndMerge(self.relu1_2, self.trelu2_1, "bridge1")
self.conv1_3 = self.conv_layer_f(self.bridge1, self.conv1_3weights, "conv1_3/")
self.relu1_3 = self.relu_layer_f(self.conv1_3, self.conv1_3biases, "conv1_3/")
self.conv1_4 = self.conv_layer_f(self.relu1_3, self.conv1_4weights, "conv1_4/")
self.relu1_4 = self.relu_layer_f(self.conv1_4, self.conv1_4biases, "conv1_4/")
self.last = self.relu1_4
print('Model architecture initialised')
def error_rate(self, predictions, labels, iter):
error = mean_squared_error(labels.flatten(), predictions.flatten())
return error
def Validation(self, DG_TEST, step):
if DG_TEST is None:
print "no validation"
else:
n_test = DG_TEST.length
n_batch = int(np.ceil(float(n_test) / self.BATCH_SIZE))
l = 0.
for i in range(n_batch):
Xval, Yval = DG_TEST.Batch(0, self.BATCH_SIZE)
#Yval = Yval / 255.
feed_dict = {self.input_node: Xval,
self.train_labels_node: Yval,
self.is_training: False}
l_tmp, pred, s = self.sess.run([self.loss,
self.predictions,
self.merged_summary],
feed_dict=feed_dict)
l += l_tmp
for j in range(self.BATCH_SIZE):
CheckOrCreate('step_{}'.format(step))
xval_name = join('step_{}'.format(step), "X_val_{}_{}.png".format(i, j))
yval_name = join('step_{}'.format(step), "Y_val_{}_{}.png".format(i, j))
pred_name = join('step_{}'.format(step), "pred_{}_{}.png".format(i, j))
bin_name = join('step_{}'.format(step), "Y_bin_{}_{}.png".format(i, j))
imsave(xval_name, (Xval[j, 92:-92, 92:-92]).astype(np.uint8))
imsave(yval_name, Yval[j, :, :, 0].astype(np.uint8))
imsave(pred_name, pred[j].astype(np.uint8))
GT = Yval[j, :, :, 0].copy()
GT[GT > 0] = 255
imsave(bin_name, GT.astype(np.uint8))
l = l / n_batch
summary = tf.Summary()
summary.value.add(tag="TestMan/Loss", simple_value=l)
self.summary_test_writer.add_summary(summary, step)
self.summary_test_writer.add_summary(s, step)
print(' Validation loss: %.1f' % l)
self.saver.save(self.sess, self.LOG + '/' + "model.ckpt", step)
def train(self, DGTest):
epoch = self.STEPS * self.BATCH_SIZE // self.N_EPOCH
self.Saver()
trainable_var = tf.trainable_variables()
self.LearningRateSchedule(self.LEARNING_RATE, self.K, epoch)
self.optimization(trainable_var)
self.ExponentialMovingAverage(trainable_var, self.DECAY_EMA)
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
self.sess.run(init_op)
self.regularize_model()
self.Saver()
self.summary_test_writer = tf.summary.FileWriter(self.LOG + '/test',
graph=self.sess.graph)
self.summary_writer = tf.summary.FileWriter(self.LOG + '/train', graph=self.sess.graph)
self.merged_summary = tf.summary.merge_all()
steps = self.STEPS
print "self.global step", int(self.global_step.eval())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
begin = int(self.global_step.eval())
print "begin", begin
for step in range(begin, steps + begin):
# self.optimizer is replaced by self.training_op for the exponential moving decay
_, l, lr, predictions, batch_labels, s = self.sess.run(
[self.training_op, self.loss, self.learning_rate,
self.train_prediction, self.train_labels_node,
self.merged_summary])
if step % self.N_PRINT == 0:
i = datetime.now()
print i.strftime('%Y/%m/%d %H:%M:%S: \n ')
self.summary_writer.add_summary(s, step)
print(' Step %d of %d' % (step, steps))
print(' Learning rate: %.5f \n') % lr
print(' Mini-batch loss: %.5f \n ') % l
print(' Max value: %.5f \n ') % np.max(predictions)
self.Validation(DGTest, step)
coord.request_stop()
coord.join(threads)
def predict(self, tensor):
feed_dict = {self.input_node: tensor,
self.is_training: False}
pred = self.sess.run(self.predictions,
feed_dict=feed_dict)
return pred
if __name__== "__main__":
parser = OptionParser()
# parser.add_option("--gpu", dest="gpu", default="0", type="string",
# help="Input file (raw data)")
parser.add_option("--tf_record", dest="TFRecord", type="string",
help="Where to find the TFrecord file")
parser.add_option("--path", dest="path", type="string",
help="Where to collect the patches")
parser.add_option("--log", dest="log",
help="log dir")
parser.add_option("--learning_rate", dest="lr", type="float",
help="learning_rate")
parser.add_option("--batch_size", dest="bs", type="int",
help="batch size")
parser.add_option("--epoch", dest="epoch", type="int",
help="number of epochs")
parser.add_option("--n_features", dest="n_features", type="int",
help="number of channels on first layers")
parser.add_option("--weight_decay", dest="weight_decay", type="float",
help="weight decay value")
parser.add_option("--dropout", dest="dropout", type="float",
default=0.5, help="dropout value to apply to the FC layers.")
parser.add_option("--mean_file", dest="mean_file", type="str",
help="where to find the mean file to substract to the original image.")
parser.add_option('--n_threads', dest="THREADS", type=int, default=100,
help="number of threads to use for the preprocessing.")
(options, args) = parser.parse_args()
TFRecord = options.TFRecord
N_FEATURES = options.n_features
WEIGHT_DECAY = options.weight_decay
DROPOUT = options.dropout
MEAN_FILE = options.mean_file
N_THREADS = options.THREADS
LEARNING_RATE = options.lr
if int(str(LEARNING_RATE)[-1]) > 7:
lr_str = "1E-{}".format(str(LEARNING_RATE)[-1])
else:
lr_str = "{0:.8f}".format(LEARNING_RATE).rstrip("0")
SAVE_DIR = options.log + "/" + "{}".format(N_FEATURES) + "_" +"{0:.8f}".format(WEIGHT_DECAY).rstrip("0") + "_" + lr_str
HEIGHT = 224
WIDTH = 224
BATCH_SIZE = options.bs
LRSTEP = "4epoch"
SUMMARY = True
S = SUMMARY
N_EPOCH = options.epoch
PATH = options.path
HEIGHT = 212
WIDTH = 212
SIZE = (HEIGHT, WIDTH)
N_TRAIN_SAVE = 10
CROP = 4
transform_list, transform_list_test = ListTransform(n_elastic=0)
DG_TRAIN = DataGenMulti(PATH, split='train', crop = CROP, size=(HEIGHT, WIDTH),
transforms=transform_list, num="test", UNet=True, mean_file=None)
test_patient = ["test"]
DG_TRAIN.SetPatient(test_patient)
N_ITER_MAX = N_EPOCH * DG_TRAIN.length // BATCH_SIZE
DG_TEST = DataGenMulti(PATH, split="test", crop = 1, size=(500, 500), num="test",
transforms=transform_list_test, UNet=True, mean_file=MEAN_FILE)
DG_TEST.SetPatient(test_patient)
model = UNetDistance(TFRecord, LEARNING_RATE=LEARNING_RATE,
BATCH_SIZE=BATCH_SIZE,
IMAGE_SIZE=SIZE,
NUM_CHANNELS=3,
STEPS=N_ITER_MAX,
LRSTEP=LRSTEP,
N_PRINT=N_TRAIN_SAVE,
LOG=SAVE_DIR,
SEED=42,
WEIGHT_DECAY=WEIGHT_DECAY,
N_FEATURES=N_FEATURES,
N_EPOCH=N_EPOCH,
N_THREADS=N_THREADS,
MEAN_FILE=MEAN_FILE,
DROPOUT=DROPOUT)
model.train(DG_TEST) | 41.148746 | 123 | 0.592483 | [
"MIT"
] | PeterJackNaylor/DRFNS | src_DummyDataSet/UNet_UNNormalized.py | 22,961 | Python |
import pytest
from plenum.common.constants import AUDIT_LEDGER_ID, AUDIT_TXN_VIEW_NO, AUDIT_TXN_PP_SEQ_NO, AUDIT_TXN_PRIMARIES
from plenum.common.messages.node_messages import Checkpoint, CheckpointState
from plenum.test.checkpoints.helper import cp_digest
from plenum.test.test_node import getNonPrimaryReplicas, getAllReplicas, \
getPrimaryReplica
from plenum.test.view_change.helper import ensure_view_change_complete
CHK_FREQ = 5
@pytest.fixture(scope="module")
def view_setup(looper, txnPoolNodeSet):
for i in range(2):
ensure_view_change_complete(looper, txnPoolNodeSet)
for node in txnPoolNodeSet:
assert node.viewNo == 2
@pytest.fixture(scope="module")
def view_change_in_progress(view_setup, txnPoolNodeSet):
# Initiate view change to the next view
for node in txnPoolNodeSet:
node.view_changer.propagate_primary = False
node.view_changer.view_no += 1
node.view_changer.view_change_in_progress = True
node.view_changer.previous_master_primary = node.master_primary_name
node.view_changer.set_defaults()
for inst_id, replica in node.replicas.items():
replica.primaryName = None
@pytest.fixture(scope="function")
def clear_checkpoints(txnPoolNodeSet):
for node in txnPoolNodeSet:
for inst_id, replica in node.replicas.items():
replica._checkpointer._reset_checkpoints()
replica._checkpointer._stashed_recvd_checkpoints.clear()
def test_checkpoints_removed_on_master_replica_after_catchup_during_view_change(
chkFreqPatched, txnPoolNodeSet, view_change_in_progress, clear_checkpoints):
master_replicas = getAllReplicas(txnPoolNodeSet, 0)
replica = master_replicas[-1]
others = master_replicas[:-1]
node = replica.node
node.master_replica.last_ordered_3pc = (2, 12)
replica._checkpointer._checkpoint_state[(6, 10)] = CheckpointState(seqNo=10,
digests=[],
digest=cp_digest(6, 10),
receivedDigests={r.name: cp_digest(6, 10) for r in others},
isStable=True)
replica._checkpointer._checkpoint_state[(11, 15)] = CheckpointState(seqNo=12,
digests=['digest-11', 'digest-12'],
digest=None,
receivedDigests={},
isStable=False)
replica._checkpointer._stashed_recvd_checkpoints[2] = {}
replica._checkpointer._stashed_recvd_checkpoints[2][(11, 15)] = {}
for r in others:
replica._checkpointer._stashed_recvd_checkpoints[2][(11, 15)][r.name] = \
Checkpoint(instId=0,
viewNo=2,
seqNoStart=11,
seqNoEnd=15,
digest=cp_digest(11, 15))
replica._checkpointer._stashed_recvd_checkpoints[2][(16, 20)] = {}
for r in others:
replica._checkpointer._stashed_recvd_checkpoints[2][(16, 20)][r.name] = \
Checkpoint(instId=0,
viewNo=2,
seqNoStart=16,
seqNoEnd=20,
digest=cp_digest(16, 20))
replica._checkpointer._stashed_recvd_checkpoints[2][(21, 25)] = {}
replica._checkpointer._stashed_recvd_checkpoints[2][(21, 25)][others[0].name] = \
Checkpoint(instId=0,
viewNo=2,
seqNoStart=21,
seqNoEnd=25,
digest=cp_digest(21, 25))
# Simulate catch-up completion
node.ledgerManager.last_caught_up_3PC = (2, 20)
audit_ledger = node.getLedger(AUDIT_LEDGER_ID)
txn_with_last_seq_no = {'txn': {'data': {AUDIT_TXN_VIEW_NO: 2,
AUDIT_TXN_PP_SEQ_NO: 20,
AUDIT_TXN_PRIMARIES: ['Gamma', 'Delta']}}}
audit_ledger.get_last_committed_txn = lambda *args: txn_with_last_seq_no
node.allLedgersCaughtUp()
assert len(replica._checkpointer._checkpoint_state) == 0
assert len(replica._checkpointer._stashed_recvd_checkpoints) == 1
assert 2 in replica._checkpointer._stashed_recvd_checkpoints
assert len(replica._checkpointer._stashed_recvd_checkpoints[2]) == 1
assert (21, 25) in replica._checkpointer._stashed_recvd_checkpoints[2]
assert len(replica._checkpointer._stashed_recvd_checkpoints[2][(21, 25)]) == 1
def test_checkpoints_removed_on_backup_replica_after_catchup_during_view_change(
chkFreqPatched, txnPoolNodeSet, view_change_in_progress, clear_checkpoints):
backup_replicas = getAllReplicas(txnPoolNodeSet, 1)
replica = backup_replicas[-1]
others = backup_replicas[:-1]
node = replica.node
node.master_replica.last_ordered_3pc = (2, 12)
replica._checkpointer._checkpoint_state[(6, 10)] = CheckpointState(seqNo=10,
digests=[],
digest=cp_digest(6, 10),
receivedDigests={r.name: cp_digest(6, 10) for r in others},
isStable=True)
replica._checkpointer._checkpoint_state[(11, 15)] = CheckpointState(seqNo=13,
digests=['digest-11', 'digest-12', 'digest-13'],
digest=None,
receivedDigests={},
isStable=False)
replica._checkpointer._stashed_recvd_checkpoints[2] = {}
replica._checkpointer._stashed_recvd_checkpoints[2][(11, 15)] = {}
for r in others:
replica._checkpointer._stashed_recvd_checkpoints[2][(11, 15)][r.name] = \
Checkpoint(instId=1,
viewNo=2,
seqNoStart=11,
seqNoEnd=15,
digest=cp_digest(11, 15))
replica._checkpointer._stashed_recvd_checkpoints[2][(16, 20)] = {}
for r in others:
replica._checkpointer._stashed_recvd_checkpoints[2][(16, 20)][r.name] = \
Checkpoint(instId=1,
viewNo=2,
seqNoStart=16,
seqNoEnd=20,
digest=cp_digest(16, 20))
replica._checkpointer._stashed_recvd_checkpoints[2][(21, 25)] = {}
replica._checkpointer._stashed_recvd_checkpoints[2][(21, 25)][next(iter(others)).name] = \
Checkpoint(instId=1,
viewNo=2,
seqNoStart=21,
seqNoEnd=25,
digest=cp_digest(21, 25))
# Simulate catch-up completion
node.ledgerManager.last_caught_up_3PC = (2, 20)
audit_ledger = node.getLedger(AUDIT_LEDGER_ID)
txn_with_last_seq_no = {'txn': {'data': {AUDIT_TXN_VIEW_NO: 2,
AUDIT_TXN_PP_SEQ_NO: 20,
AUDIT_TXN_PRIMARIES: ['Gamma', 'Delta']}}}
audit_ledger.get_last_committed_txn = lambda *args: txn_with_last_seq_no
node.allLedgersCaughtUp()
assert len(replica._checkpointer._checkpoint_state) == 0
assert len(replica._checkpointer._stashed_recvd_checkpoints) == 0
| 43.97076 | 112 | 0.590637 | [
"Apache-2.0"
] | Toktar/indy-plenum | plenum/test/checkpoints/test_checkpoints_removal_after_catchup_during_view_change.py | 7,519 | Python |
import urllib3
import random
import string
from util.conf import CROWD_SETTINGS
from util.api.crowd_clients import CrowdRestClient
from util.project_paths import CROWD_USERS
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
USERS = "users"
DEFAULT_USER_PASSWORD = 'password'
DEFAULT_USER_PREFIX = 'performance_'
USER_SEARCH_CQL = f'name={DEFAULT_USER_PREFIX}*'
ERROR_LIMIT = 10
USERS_COUNT = 100000
def generate_random_string(length=20):
return "".join([random.choice(string.ascii_lowercase) for _ in range(length)])
def __get_users(crowd_api, count):
cur_perf_users = crowd_api.users_search_parallel(cql=USER_SEARCH_CQL, max_results=count)
if len(cur_perf_users) >= count:
print(f'{USERS_COUNT} performance test users were found')
return cur_perf_users
else:
raise SystemExit(f'Your Atlassian Crowd instance does not have enough users. '
f'Current users count {len(cur_perf_users)} out of {count}.')
def __create_data_set(crowd_api):
dataset = dict()
dataset[USERS] = __get_users(crowd_api, USERS_COUNT)
print(f'Users count: {len(dataset[USERS])}')
return dataset
def write_test_data_to_files(dataset):
users = [f"{user},{DEFAULT_USER_PASSWORD}" for user in dataset[USERS]]
__write_to_file(CROWD_USERS, users)
def __write_to_file(file_path, items):
with open(file_path, 'w') as f:
for item in items:
f.write(f"{item}\n")
def main():
print("Started preparing data")
url = CROWD_SETTINGS.server_url
print("Server url: ", url)
client = CrowdRestClient(url, CROWD_SETTINGS.application_name,
CROWD_SETTINGS.application_password, verify=CROWD_SETTINGS.secure)
dataset = __create_data_set(client)
write_test_data_to_files(dataset)
print("Finished preparing data")
if __name__ == "__main__":
main()
| 25.824324 | 95 | 0.720565 | [
"Apache-2.0"
] | 213hans/dc-app-performance-toolkit | app/util/data_preparation/crowd_prepare_data.py | 1,911 | Python |
"""Tests for classes defining properties of ground domains, e.g. ZZ, QQ, ZZ[x] ... """
from sympy import S, sqrt, sin, oo, nan, Poly, Integer, Rational
from sympy.abc import x, y, z
from sympy.polys.domains import (ZZ, QQ, RR, CC, FF, GF,
PolynomialRing, FractionField, EX)
from sympy.polys.rings import ring
from sympy.polys.fields import field
from sympy.polys.domains.modularinteger import ModularIntegerFactory
from sympy.polys.polyerrors import (
UnificationFailed,
GeneratorsNeeded,
GeneratorsError,
CoercionFailed,
NotInvertible,
DomainError)
from sympy.utilities.pytest import raises, XFAIL
ALG = QQ.algebraic_field(sqrt(2), sqrt(3))
def unify(K0, K1):
return K0.unify(K1)
def test_Domain_unify():
F3 = GF(3)
assert unify(F3, F3) == F3
assert unify(F3, ZZ) == ZZ
assert unify(F3, QQ) == QQ
assert unify(F3, ALG) == ALG
assert unify(F3, RR) == RR
assert unify(F3, CC) == CC
assert unify(F3, ZZ[x]) == ZZ[x]
assert unify(F3, ZZ.frac_field(x)) == ZZ.frac_field(x)
assert unify(F3, EX) == EX
assert unify(ZZ, F3) == ZZ
assert unify(ZZ, ZZ) == ZZ
assert unify(ZZ, QQ) == QQ
assert unify(ZZ, ALG) == ALG
assert unify(ZZ, RR) == RR
assert unify(ZZ, CC) == CC
assert unify(ZZ, ZZ[x]) == ZZ[x]
assert unify(ZZ, ZZ.frac_field(x)) == ZZ.frac_field(x)
assert unify(ZZ, EX) == EX
assert unify(QQ, F3) == QQ
assert unify(QQ, ZZ) == QQ
assert unify(QQ, QQ) == QQ
assert unify(QQ, ALG) == ALG
assert unify(QQ, RR) == RR
assert unify(QQ, CC) == CC
assert unify(QQ, ZZ[x]) == QQ[x]
assert unify(QQ, ZZ.frac_field(x)) == QQ.frac_field(x)
assert unify(QQ, EX) == EX
assert unify(RR, F3) == RR
assert unify(RR, ZZ) == RR
assert unify(RR, QQ) == RR
assert unify(RR, ALG) == RR
assert unify(RR, RR) == RR
assert unify(RR, CC) == CC
assert unify(RR, ZZ[x]) == RR[x]
assert unify(RR, ZZ.frac_field(x)) == RR.frac_field(x)
assert unify(RR, EX) == EX
assert unify(CC, F3) == CC
assert unify(CC, ZZ) == CC
assert unify(CC, QQ) == CC
assert unify(CC, ALG) == CC
assert unify(CC, RR) == CC
assert unify(CC, CC) == CC
assert unify(CC, ZZ[x]) == CC[x]
assert unify(CC, ZZ.frac_field(x)) == CC.frac_field(x)
assert unify(CC, EX) == EX
assert unify(ZZ[x], F3) == ZZ[x]
assert unify(ZZ[x], ZZ) == ZZ[x]
assert unify(ZZ[x], QQ) == QQ[x]
assert unify(ZZ[x], ALG) == ALG[x]
assert unify(ZZ[x], RR) == RR[x]
assert unify(ZZ[x], CC) == CC[x]
assert unify(ZZ[x], ZZ[x]) == ZZ[x]
assert unify(ZZ[x], ZZ.frac_field(x)) == ZZ.frac_field(x)
assert unify(ZZ[x], EX) == EX
assert unify(ZZ.frac_field(x), F3) == ZZ.frac_field(x)
assert unify(ZZ.frac_field(x), ZZ) == ZZ.frac_field(x)
assert unify(ZZ.frac_field(x), QQ) == QQ.frac_field(x)
assert unify(ZZ.frac_field(x), ALG) == ALG.frac_field(x)
assert unify(ZZ.frac_field(x), RR) == RR.frac_field(x)
assert unify(ZZ.frac_field(x), CC) == CC.frac_field(x)
assert unify(ZZ.frac_field(x), ZZ[x]) == ZZ.frac_field(x)
assert unify(ZZ.frac_field(x), ZZ.frac_field(x)) == ZZ.frac_field(x)
assert unify(ZZ.frac_field(x), EX) == EX
assert unify(EX, F3) == EX
assert unify(EX, ZZ) == EX
assert unify(EX, QQ) == EX
assert unify(EX, ALG) == EX
assert unify(EX, RR) == EX
assert unify(EX, CC) == EX
assert unify(EX, ZZ[x]) == EX
assert unify(EX, ZZ.frac_field(x)) == EX
assert unify(EX, EX) == EX
def test_Domain_unify_composite():
assert unify(ZZ.poly_ring(x), ZZ) == ZZ.poly_ring(x)
assert unify(ZZ.poly_ring(x), QQ) == QQ.poly_ring(x)
assert unify(QQ.poly_ring(x), ZZ) == QQ.poly_ring(x)
assert unify(QQ.poly_ring(x), QQ) == QQ.poly_ring(x)
assert unify(ZZ, ZZ.poly_ring(x)) == ZZ.poly_ring(x)
assert unify(QQ, ZZ.poly_ring(x)) == QQ.poly_ring(x)
assert unify(ZZ, QQ.poly_ring(x)) == QQ.poly_ring(x)
assert unify(QQ, QQ.poly_ring(x)) == QQ.poly_ring(x)
assert unify(ZZ.poly_ring(x, y), ZZ) == ZZ.poly_ring(x, y)
assert unify(ZZ.poly_ring(x, y), QQ) == QQ.poly_ring(x, y)
assert unify(QQ.poly_ring(x, y), ZZ) == QQ.poly_ring(x, y)
assert unify(QQ.poly_ring(x, y), QQ) == QQ.poly_ring(x, y)
assert unify(ZZ, ZZ.poly_ring(x, y)) == ZZ.poly_ring(x, y)
assert unify(QQ, ZZ.poly_ring(x, y)) == QQ.poly_ring(x, y)
assert unify(ZZ, QQ.poly_ring(x, y)) == QQ.poly_ring(x, y)
assert unify(QQ, QQ.poly_ring(x, y)) == QQ.poly_ring(x, y)
assert unify(ZZ.frac_field(x), ZZ) == ZZ.frac_field(x)
assert unify(ZZ.frac_field(x), QQ) == QQ.frac_field(x)
assert unify(QQ.frac_field(x), ZZ) == QQ.frac_field(x)
assert unify(QQ.frac_field(x), QQ) == QQ.frac_field(x)
assert unify(ZZ, ZZ.frac_field(x)) == ZZ.frac_field(x)
assert unify(QQ, ZZ.frac_field(x)) == QQ.frac_field(x)
assert unify(ZZ, QQ.frac_field(x)) == QQ.frac_field(x)
assert unify(QQ, QQ.frac_field(x)) == QQ.frac_field(x)
assert unify(ZZ.frac_field(x, y), ZZ) == ZZ.frac_field(x, y)
assert unify(ZZ.frac_field(x, y), QQ) == QQ.frac_field(x, y)
assert unify(QQ.frac_field(x, y), ZZ) == QQ.frac_field(x, y)
assert unify(QQ.frac_field(x, y), QQ) == QQ.frac_field(x, y)
assert unify(ZZ, ZZ.frac_field(x, y)) == ZZ.frac_field(x, y)
assert unify(QQ, ZZ.frac_field(x, y)) == QQ.frac_field(x, y)
assert unify(ZZ, QQ.frac_field(x, y)) == QQ.frac_field(x, y)
assert unify(QQ, QQ.frac_field(x, y)) == QQ.frac_field(x, y)
assert unify(ZZ.poly_ring(x), ZZ.poly_ring(x)) == ZZ.poly_ring(x)
assert unify(ZZ.poly_ring(x), QQ.poly_ring(x)) == QQ.poly_ring(x)
assert unify(QQ.poly_ring(x), ZZ.poly_ring(x)) == QQ.poly_ring(x)
assert unify(QQ.poly_ring(x), QQ.poly_ring(x)) == QQ.poly_ring(x)
assert unify(ZZ.poly_ring(x, y), ZZ.poly_ring(x)) == ZZ.poly_ring(x, y)
assert unify(ZZ.poly_ring(x, y), QQ.poly_ring(x)) == QQ.poly_ring(x, y)
assert unify(QQ.poly_ring(x, y), ZZ.poly_ring(x)) == QQ.poly_ring(x, y)
assert unify(QQ.poly_ring(x, y), QQ.poly_ring(x)) == QQ.poly_ring(x, y)
assert unify(ZZ.poly_ring(x), ZZ.poly_ring(x, y)) == ZZ.poly_ring(x, y)
assert unify(ZZ.poly_ring(x), QQ.poly_ring(x, y)) == QQ.poly_ring(x, y)
assert unify(QQ.poly_ring(x), ZZ.poly_ring(x, y)) == QQ.poly_ring(x, y)
assert unify(QQ.poly_ring(x), QQ.poly_ring(x, y)) == QQ.poly_ring(x, y)
assert unify(ZZ.poly_ring(x, y), ZZ.poly_ring(x, z)) == ZZ.poly_ring(x, y, z)
assert unify(ZZ.poly_ring(x, y), QQ.poly_ring(x, z)) == QQ.poly_ring(x, y, z)
assert unify(QQ.poly_ring(x, y), ZZ.poly_ring(x, z)) == QQ.poly_ring(x, y, z)
assert unify(QQ.poly_ring(x, y), QQ.poly_ring(x, z)) == QQ.poly_ring(x, y, z)
assert unify(ZZ.frac_field(x), ZZ.frac_field(x)) == ZZ.frac_field(x)
assert unify(ZZ.frac_field(x), QQ.frac_field(x)) == QQ.frac_field(x)
assert unify(QQ.frac_field(x), ZZ.frac_field(x)) == QQ.frac_field(x)
assert unify(QQ.frac_field(x), QQ.frac_field(x)) == QQ.frac_field(x)
assert unify(ZZ.frac_field(x, y), ZZ.frac_field(x)) == ZZ.frac_field(x, y)
assert unify(ZZ.frac_field(x, y), QQ.frac_field(x)) == QQ.frac_field(x, y)
assert unify(QQ.frac_field(x, y), ZZ.frac_field(x)) == QQ.frac_field(x, y)
assert unify(QQ.frac_field(x, y), QQ.frac_field(x)) == QQ.frac_field(x, y)
assert unify(ZZ.frac_field(x), ZZ.frac_field(x, y)) == ZZ.frac_field(x, y)
assert unify(ZZ.frac_field(x), QQ.frac_field(x, y)) == QQ.frac_field(x, y)
assert unify(QQ.frac_field(x), ZZ.frac_field(x, y)) == QQ.frac_field(x, y)
assert unify(QQ.frac_field(x), QQ.frac_field(x, y)) == QQ.frac_field(x, y)
assert unify(ZZ.frac_field(x, y), ZZ.frac_field(x, z)) == ZZ.frac_field(x, y, z)
assert unify(ZZ.frac_field(x, y), QQ.frac_field(x, z)) == QQ.frac_field(x, y, z)
assert unify(QQ.frac_field(x, y), ZZ.frac_field(x, z)) == QQ.frac_field(x, y, z)
assert unify(QQ.frac_field(x, y), QQ.frac_field(x, z)) == QQ.frac_field(x, y, z)
assert unify(ZZ.poly_ring(x), ZZ.frac_field(x)) == ZZ.frac_field(x)
assert unify(ZZ.poly_ring(x), QQ.frac_field(x)) == ZZ.frac_field(x)
assert unify(QQ.poly_ring(x), ZZ.frac_field(x)) == ZZ.frac_field(x)
assert unify(QQ.poly_ring(x), QQ.frac_field(x)) == QQ.frac_field(x)
assert unify(ZZ.poly_ring(x, y), ZZ.frac_field(x)) == ZZ.frac_field(x, y)
assert unify(ZZ.poly_ring(x, y), QQ.frac_field(x)) == ZZ.frac_field(x, y)
assert unify(QQ.poly_ring(x, y), ZZ.frac_field(x)) == ZZ.frac_field(x, y)
assert unify(QQ.poly_ring(x, y), QQ.frac_field(x)) == QQ.frac_field(x, y)
assert unify(ZZ.poly_ring(x), ZZ.frac_field(x, y)) == ZZ.frac_field(x, y)
assert unify(ZZ.poly_ring(x), QQ.frac_field(x, y)) == ZZ.frac_field(x, y)
assert unify(QQ.poly_ring(x), ZZ.frac_field(x, y)) == ZZ.frac_field(x, y)
assert unify(QQ.poly_ring(x), QQ.frac_field(x, y)) == QQ.frac_field(x, y)
assert unify(ZZ.poly_ring(x, y), ZZ.frac_field(x, z)) == ZZ.frac_field(x, y, z)
assert unify(ZZ.poly_ring(x, y), QQ.frac_field(x, z)) == ZZ.frac_field(x, y, z)
assert unify(QQ.poly_ring(x, y), ZZ.frac_field(x, z)) == ZZ.frac_field(x, y, z)
assert unify(QQ.poly_ring(x, y), QQ.frac_field(x, z)) == QQ.frac_field(x, y, z)
assert unify(ZZ.frac_field(x), ZZ.poly_ring(x)) == ZZ.frac_field(x)
assert unify(ZZ.frac_field(x), QQ.poly_ring(x)) == ZZ.frac_field(x)
assert unify(QQ.frac_field(x), ZZ.poly_ring(x)) == ZZ.frac_field(x)
assert unify(QQ.frac_field(x), QQ.poly_ring(x)) == QQ.frac_field(x)
assert unify(ZZ.frac_field(x, y), ZZ.poly_ring(x)) == ZZ.frac_field(x, y)
assert unify(ZZ.frac_field(x, y), QQ.poly_ring(x)) == ZZ.frac_field(x, y)
assert unify(QQ.frac_field(x, y), ZZ.poly_ring(x)) == ZZ.frac_field(x, y)
assert unify(QQ.frac_field(x, y), QQ.poly_ring(x)) == QQ.frac_field(x, y)
assert unify(ZZ.frac_field(x), ZZ.poly_ring(x, y)) == ZZ.frac_field(x, y)
assert unify(ZZ.frac_field(x), QQ.poly_ring(x, y)) == ZZ.frac_field(x, y)
assert unify(QQ.frac_field(x), ZZ.poly_ring(x, y)) == ZZ.frac_field(x, y)
assert unify(QQ.frac_field(x), QQ.poly_ring(x, y)) == QQ.frac_field(x, y)
assert unify(ZZ.frac_field(x, y), ZZ.poly_ring(x, z)) == ZZ.frac_field(x, y, z)
assert unify(ZZ.frac_field(x, y), QQ.poly_ring(x, z)) == ZZ.frac_field(x, y, z)
assert unify(QQ.frac_field(x, y), ZZ.poly_ring(x, z)) == ZZ.frac_field(x, y, z)
assert unify(QQ.frac_field(x, y), QQ.poly_ring(x, z)) == QQ.frac_field(x, y, z)
def test_Domain_unify_algebraic():
sqrt5 = QQ.algebraic_field(sqrt(5))
sqrt7 = QQ.algebraic_field(sqrt(7))
sqrt57 = QQ.algebraic_field(sqrt(5), sqrt(7))
assert sqrt5.unify(sqrt7) == sqrt57
assert sqrt5.unify(sqrt5[x, y]) == sqrt5[x, y]
assert sqrt5[x, y].unify(sqrt5) == sqrt5[x, y]
assert sqrt5.unify(sqrt5.frac_field(x, y)) == sqrt5.frac_field(x, y)
assert sqrt5.frac_field(x, y).unify(sqrt5) == sqrt5.frac_field(x, y)
assert sqrt5.unify(sqrt7[x, y]) == sqrt57[x, y]
assert sqrt5[x, y].unify(sqrt7) == sqrt57[x, y]
assert sqrt5.unify(sqrt7.frac_field(x, y)) == sqrt57.frac_field(x, y)
assert sqrt5.frac_field(x, y).unify(sqrt7) == sqrt57.frac_field(x, y)
def test_Domain_unify_with_symbols():
raises(UnificationFailed, lambda: ZZ[x, y].unify_with_symbols(ZZ, (y, z)))
raises(UnificationFailed, lambda: ZZ.unify_with_symbols(ZZ[x, y], (y, z)))
def test_Domain__contains__():
assert (0 in EX) is True
assert (0 in ZZ) is True
assert (0 in QQ) is True
assert (0 in RR) is True
assert (0 in CC) is True
assert (0 in ALG) is True
assert (0 in ZZ[x, y]) is True
assert (0 in QQ[x, y]) is True
assert (0 in RR[x, y]) is True
assert (-7 in EX) is True
assert (-7 in ZZ) is True
assert (-7 in QQ) is True
assert (-7 in RR) is True
assert (-7 in CC) is True
assert (-7 in ALG) is True
assert (-7 in ZZ[x, y]) is True
assert (-7 in QQ[x, y]) is True
assert (-7 in RR[x, y]) is True
assert (17 in EX) is True
assert (17 in ZZ) is True
assert (17 in QQ) is True
assert (17 in RR) is True
assert (17 in CC) is True
assert (17 in ALG) is True
assert (17 in ZZ[x, y]) is True
assert (17 in QQ[x, y]) is True
assert (17 in RR[x, y]) is True
assert (-S(1)/7 in EX) is True
assert (-S(1)/7 in ZZ) is False
assert (-S(1)/7 in QQ) is True
assert (-S(1)/7 in RR) is True
assert (-S(1)/7 in CC) is True
assert (-S(1)/7 in ALG) is True
assert (-S(1)/7 in ZZ[x, y]) is False
assert (-S(1)/7 in QQ[x, y]) is True
assert (-S(1)/7 in RR[x, y]) is True
assert (S(3)/5 in EX) is True
assert (S(3)/5 in ZZ) is False
assert (S(3)/5 in QQ) is True
assert (S(3)/5 in RR) is True
assert (S(3)/5 in CC) is True
assert (S(3)/5 in ALG) is True
assert (S(3)/5 in ZZ[x, y]) is False
assert (S(3)/5 in QQ[x, y]) is True
assert (S(3)/5 in RR[x, y]) is True
assert (3.0 in EX) is True
assert (3.0 in ZZ) is True
assert (3.0 in QQ) is True
assert (3.0 in RR) is True
assert (3.0 in CC) is True
assert (3.0 in ALG) is True
assert (3.0 in ZZ[x, y]) is True
assert (3.0 in QQ[x, y]) is True
assert (3.0 in RR[x, y]) is True
assert (3.14 in EX) is True
assert (3.14 in ZZ) is False
assert (3.14 in QQ) is True
assert (3.14 in RR) is True
assert (3.14 in CC) is True
assert (3.14 in ALG) is True
assert (3.14 in ZZ[x, y]) is False
assert (3.14 in QQ[x, y]) is True
assert (3.14 in RR[x, y]) is True
assert (oo in EX) is True
assert (oo in ZZ) is False
assert (oo in QQ) is False
assert (oo in RR) is True
assert (oo in CC) is True
assert (oo in ALG) is False
assert (oo in ZZ[x, y]) is False
assert (oo in QQ[x, y]) is False
assert (oo in RR[x, y]) is True
assert (-oo in EX) is True
assert (-oo in ZZ) is False
assert (-oo in QQ) is False
assert (-oo in RR) is True
assert (-oo in CC) is True
assert (-oo in ALG) is False
assert (-oo in ZZ[x, y]) is False
assert (-oo in QQ[x, y]) is False
assert (-oo in RR[x, y]) is True
assert (sqrt(7) in EX) is True
assert (sqrt(7) in ZZ) is False
assert (sqrt(7) in QQ) is False
assert (sqrt(7) in RR) is True
assert (sqrt(7) in CC) is True
assert (sqrt(7) in ALG) is False
assert (sqrt(7) in ZZ[x, y]) is False
assert (sqrt(7) in QQ[x, y]) is False
assert (sqrt(7) in RR[x, y]) is True
assert (2*sqrt(3) + 1 in EX) is True
assert (2*sqrt(3) + 1 in ZZ) is False
assert (2*sqrt(3) + 1 in QQ) is False
assert (2*sqrt(3) + 1 in RR) is True
assert (2*sqrt(3) + 1 in CC) is True
assert (2*sqrt(3) + 1 in ALG) is True
assert (2*sqrt(3) + 1 in ZZ[x, y]) is False
assert (2*sqrt(3) + 1 in QQ[x, y]) is False
assert (2*sqrt(3) + 1 in RR[x, y]) is True
assert (sin(1) in EX) is True
assert (sin(1) in ZZ) is False
assert (sin(1) in QQ) is False
assert (sin(1) in RR) is True
assert (sin(1) in CC) is True
assert (sin(1) in ALG) is False
assert (sin(1) in ZZ[x, y]) is False
assert (sin(1) in QQ[x, y]) is False
assert (sin(1) in RR[x, y]) is True
assert (x**2 + 1 in EX) is True
assert (x**2 + 1 in ZZ) is False
assert (x**2 + 1 in QQ) is False
assert (x**2 + 1 in RR) is False
assert (x**2 + 1 in CC) is False
assert (x**2 + 1 in ALG) is False
assert (x**2 + 1 in ZZ[x]) is True
assert (x**2 + 1 in QQ[x]) is True
assert (x**2 + 1 in RR[x]) is True
assert (x**2 + 1 in ZZ[x, y]) is True
assert (x**2 + 1 in QQ[x, y]) is True
assert (x**2 + 1 in RR[x, y]) is True
assert (x**2 + y**2 in EX) is True
assert (x**2 + y**2 in ZZ) is False
assert (x**2 + y**2 in QQ) is False
assert (x**2 + y**2 in RR) is False
assert (x**2 + y**2 in CC) is False
assert (x**2 + y**2 in ALG) is False
assert (x**2 + y**2 in ZZ[x]) is False
assert (x**2 + y**2 in QQ[x]) is False
assert (x**2 + y**2 in RR[x]) is False
assert (x**2 + y**2 in ZZ[x, y]) is True
assert (x**2 + y**2 in QQ[x, y]) is True
assert (x**2 + y**2 in RR[x, y]) is True
assert (S(3)/2*x/(y + 1) - z in QQ[x, y, z]) is False
def test_Domain_get_ring():
assert ZZ.has_assoc_Ring is True
assert QQ.has_assoc_Ring is True
assert ZZ[x].has_assoc_Ring is True
assert QQ[x].has_assoc_Ring is True
assert ZZ[x, y].has_assoc_Ring is True
assert QQ[x, y].has_assoc_Ring is True
assert ZZ.frac_field(x).has_assoc_Ring is True
assert QQ.frac_field(x).has_assoc_Ring is True
assert ZZ.frac_field(x, y).has_assoc_Ring is True
assert QQ.frac_field(x, y).has_assoc_Ring is True
assert EX.has_assoc_Ring is False
assert RR.has_assoc_Ring is False
assert ALG.has_assoc_Ring is False
assert ZZ.get_ring() == ZZ
assert QQ.get_ring() == ZZ
assert ZZ[x].get_ring() == ZZ[x]
assert QQ[x].get_ring() == QQ[x]
assert ZZ[x, y].get_ring() == ZZ[x, y]
assert QQ[x, y].get_ring() == QQ[x, y]
assert ZZ.frac_field(x).get_ring() == ZZ[x]
assert QQ.frac_field(x).get_ring() == QQ[x]
assert ZZ.frac_field(x, y).get_ring() == ZZ[x, y]
assert QQ.frac_field(x, y).get_ring() == QQ[x, y]
assert EX.get_ring() == EX
raises(DomainError, lambda: RR.get_ring())
raises(DomainError, lambda: ALG.get_ring())
def test_Domain_get_field():
assert EX.has_assoc_Field is True
assert ZZ.has_assoc_Field is True
assert QQ.has_assoc_Field is True
assert RR.has_assoc_Field is True
assert ALG.has_assoc_Field is True
assert ZZ[x].has_assoc_Field is True
assert QQ[x].has_assoc_Field is True
assert ZZ[x, y].has_assoc_Field is True
assert QQ[x, y].has_assoc_Field is True
assert EX.get_field() == EX
assert ZZ.get_field() == QQ
assert QQ.get_field() == QQ
assert RR.get_field() == RR
assert ALG.get_field() == ALG
assert ZZ[x].get_field() == ZZ.frac_field(x)
assert QQ[x].get_field() == QQ.frac_field(x)
assert ZZ[x, y].get_field() == ZZ.frac_field(x, y)
assert QQ[x, y].get_field() == QQ.frac_field(x, y)
def test_Domain_get_exact():
assert EX.get_exact() == EX
assert ZZ.get_exact() == ZZ
assert QQ.get_exact() == QQ
assert RR.get_exact() == QQ
assert ALG.get_exact() == ALG
assert ZZ[x].get_exact() == ZZ[x]
assert QQ[x].get_exact() == QQ[x]
assert ZZ[x, y].get_exact() == ZZ[x, y]
assert QQ[x, y].get_exact() == QQ[x, y]
assert ZZ.frac_field(x).get_exact() == ZZ.frac_field(x)
assert QQ.frac_field(x).get_exact() == QQ.frac_field(x)
assert ZZ.frac_field(x, y).get_exact() == ZZ.frac_field(x, y)
assert QQ.frac_field(x, y).get_exact() == QQ.frac_field(x, y)
def test_Domain_convert():
assert QQ.convert(10e-52) == QQ(1684996666696915, 1684996666696914987166688442938726917102321526408785780068975640576)
R, x = ring("x", ZZ)
assert ZZ.convert(x - x) == 0
assert ZZ.convert(x - x, R.to_domain()) == 0
def test_PolynomialRing__init():
raises(GeneratorsNeeded, lambda: ZZ.poly_ring())
def test_FractionField__init():
raises(GeneratorsNeeded, lambda: ZZ.frac_field())
def test_inject():
assert ZZ.inject(x, y, z) == ZZ[x, y, z]
assert ZZ[x].inject(y, z) == ZZ[x, y, z]
assert ZZ.frac_field(x).inject(y, z) == ZZ.frac_field(x, y, z)
raises(GeneratorsError, lambda: ZZ[x].inject(x))
def test_Domain_map():
seq = ZZ.map([1, 2, 3, 4])
assert all(ZZ.of_type(elt) for elt in seq)
seq = ZZ.map([[1, 2, 3, 4]])
assert all(ZZ.of_type(elt) for elt in seq[0]) and len(seq) == 1
def test_Domain___eq__():
assert (ZZ[x, y] == ZZ[x, y]) is True
assert (QQ[x, y] == QQ[x, y]) is True
assert (ZZ[x, y] == QQ[x, y]) is False
assert (QQ[x, y] == ZZ[x, y]) is False
assert (ZZ.frac_field(x, y) == ZZ.frac_field(x, y)) is True
assert (QQ.frac_field(x, y) == QQ.frac_field(x, y)) is True
assert (ZZ.frac_field(x, y) == QQ.frac_field(x, y)) is False
assert (QQ.frac_field(x, y) == ZZ.frac_field(x, y)) is False
def test_Domain__algebraic_field():
alg = ZZ.algebraic_field(sqrt(2))
assert alg.ext.minpoly == Poly(x**2 - 2)
assert alg.dom == QQ
alg = QQ.algebraic_field(sqrt(2))
assert alg.ext.minpoly == Poly(x**2 - 2)
assert alg.dom == QQ
alg = alg.algebraic_field(sqrt(3))
assert alg.ext.minpoly == Poly(x**4 - 10*x**2 + 1)
assert alg.dom == QQ
def test_PolynomialRing_from_FractionField():
F, x,y = field("x,y", ZZ)
R, X,Y = ring("x,y", ZZ)
f = (x**2 + y**2)/(x + 1)
g = (x**2 + y**2)/4
h = x**2 + y**2
assert R.to_domain().from_FractionField(f, F.to_domain()) is None
assert R.to_domain().from_FractionField(g, F.to_domain()) == X**2/4 + Y**2/4
assert R.to_domain().from_FractionField(h, F.to_domain()) == X**2 + Y**2
F, x,y = field("x,y", QQ)
R, X,Y = ring("x,y", QQ)
f = (x**2 + y**2)/(x + 1)
g = (x**2 + y**2)/4
h = x**2 + y**2
assert R.to_domain().from_FractionField(f, F.to_domain()) is None
assert R.to_domain().from_FractionField(g, F.to_domain()) == X**2/4 + Y**2/4
assert R.to_domain().from_FractionField(h, F.to_domain()) == X**2 + Y**2
def test_FractionField_from_PolynomialRing():
R, x,y = ring("x,y", QQ)
F, X,Y = field("x,y", ZZ)
f = 3*x**2 + 5*y**2
g = x**2/3 + y**2/5
assert F.to_domain().from_PolynomialRing(f, R.to_domain()) == 3*X**2 + 5*Y**2
assert F.to_domain().from_PolynomialRing(g, R.to_domain()) == (5*X**2 + 3*Y**2)/15
def test_FF_of_type():
assert FF(3).of_type(FF(3)(1)) is True
assert FF(5).of_type(FF(5)(3)) is True
assert FF(5).of_type(FF(7)(3)) is False
def test___eq__():
assert not QQ[x] == ZZ[x]
assert not QQ.frac_field(x) == ZZ.frac_field(x)
def test_RealField_from_sympy():
assert RR.convert(S(0)) == RR.dtype(0)
assert RR.convert(S(0.0)) == RR.dtype(0.0)
assert RR.convert(S(1)) == RR.dtype(1)
assert RR.convert(S(1.0)) == RR.dtype(1.0)
assert RR.convert(sin(1)) == RR.dtype(sin(1).evalf())
assert RR.convert(oo) == RR("+inf")
assert RR.convert(-oo) == RR("-inf")
raises(CoercionFailed, lambda: RR.convert(x))
def test_ModularInteger():
F3 = FF(3)
a = F3(0)
assert isinstance(a, F3.dtype) and a == 0
a = F3(1)
assert isinstance(a, F3.dtype) and a == 1
a = F3(2)
assert isinstance(a, F3.dtype) and a == 2
a = F3(3)
assert isinstance(a, F3.dtype) and a == 0
a = F3(4)
assert isinstance(a, F3.dtype) and a == 1
a = F3(F3(0))
assert isinstance(a, F3.dtype) and a == 0
a = F3(F3(1))
assert isinstance(a, F3.dtype) and a == 1
a = F3(F3(2))
assert isinstance(a, F3.dtype) and a == 2
a = F3(F3(3))
assert isinstance(a, F3.dtype) and a == 0
a = F3(F3(4))
assert isinstance(a, F3.dtype) and a == 1
a = -F3(1)
assert isinstance(a, F3.dtype) and a == 2
a = -F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = 2 + F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(2) + 2
assert isinstance(a, F3.dtype) and a == 1
a = F3(2) + F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(2) + F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = 3 - F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(3) - 2
assert isinstance(a, F3.dtype) and a == 1
a = F3(3) - F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(3) - F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = 2*F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(2)*2
assert isinstance(a, F3.dtype) and a == 1
a = F3(2)*F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(2)*F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = 2/F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(2)/2
assert isinstance(a, F3.dtype) and a == 1
a = F3(2)/F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(2)/F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = 1 % F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(1) % 2
assert isinstance(a, F3.dtype) and a == 1
a = F3(1) % F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(1) % F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(2)**0
assert isinstance(a, F3.dtype) and a == 1
a = F3(2)**1
assert isinstance(a, F3.dtype) and a == 2
a = F3(2)**2
assert isinstance(a, F3.dtype) and a == 1
assert bool(F3(3)) is False
assert bool(F3(4)) is True
F5 = FF(5)
a = F5(1)**(-1)
assert isinstance(a, F5.dtype) and a == 1
a = F5(2)**(-1)
assert isinstance(a, F5.dtype) and a == 3
a = F5(3)**(-1)
assert isinstance(a, F5.dtype) and a == 2
a = F5(4)**(-1)
assert isinstance(a, F5.dtype) and a == 4
assert (F5(1) < F5(2)) is True
assert (F5(1) <= F5(2)) is True
assert (F5(1) > F5(2)) is False
assert (F5(1) >= F5(2)) is False
assert (F5(3) < F5(2)) is False
assert (F5(3) <= F5(2)) is False
assert (F5(3) > F5(2)) is True
assert (F5(3) >= F5(2)) is True
assert (F5(1) < F5(7)) is True
assert (F5(1) <= F5(7)) is True
assert (F5(1) > F5(7)) is False
assert (F5(1) >= F5(7)) is False
assert (F5(3) < F5(7)) is False
assert (F5(3) <= F5(7)) is False
assert (F5(3) > F5(7)) is True
assert (F5(3) >= F5(7)) is True
assert (F5(1) < 2) is True
assert (F5(1) <= 2) is True
assert (F5(1) > 2) is False
assert (F5(1) >= 2) is False
assert (F5(3) < 2) is False
assert (F5(3) <= 2) is False
assert (F5(3) > 2) is True
assert (F5(3) >= 2) is True
assert (F5(1) < 7) is True
assert (F5(1) <= 7) is True
assert (F5(1) > 7) is False
assert (F5(1) >= 7) is False
assert (F5(3) < 7) is False
assert (F5(3) <= 7) is False
assert (F5(3) > 7) is True
assert (F5(3) >= 7) is True
raises(NotInvertible, lambda: F5(0)**(-1))
raises(NotInvertible, lambda: F5(5)**(-1))
raises(ValueError, lambda: FF(0))
raises(ValueError, lambda: FF(2.1))
def test_QQ_int():
assert int(QQ(2**2000, 3**1250)) == 455431
assert int(QQ(2**100, 3)) == 422550200076076467165567735125
| 35.662602 | 122 | 0.605228 | [
"MIT"
] | 18padx08/PPTex | PPTexEnv_x86_64/lib/python2.7/site-packages/sympy/polys/domains/tests/test_domains.py | 26,319 | Python |
Subsets and Splits