filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_13648
|
import json
from django.http import JsonResponse
from django.utils.decorators import method_decorator
from django.views import View
from authorization.decorator import permission_needed
class UserView(View):
def get(self, request, *args, **kwargs):
user = request.fb_user
print(user)
return JsonResponse({
'stranger': not user.isAuthenticated,
'anonim': user.isAnonymous,
'authenticated': user.isAuthenticated and not user.isAnonymous and not user.isAdmin,
'admin': user.isAdmin,
'name': user.name
})
@method_decorator(permission_needed(lambda request: request.fb_user.isAnonymous, 'Log in to change your name',
'Anonymous accounts can\'t change name'))
def put(self, request, *args, **kwargs):
decoded = json.loads(request.body)
if 'name' not in decoded or type(decoded['name']) != str:
return JsonResponse({'error': 'name parameter not found'}, status=400)
user = request.fb_user
user.name = decoded['name']
user.save()
return JsonResponse({'name': user.name})
@method_decorator(permission_needed(lambda request: request.fb_user.isAnonymous, 'Log in to delete your account',
'Anonymous accounts can\'t delete themselves'))
def delete(self, request, *args, **kwargs):
request.fb_user.delete()
return JsonResponse({'success': 'Deleted successfully'})
|
the-stack_106_13649
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Train the ASR model."""
import argparse
import copy
import cProfile
import logging
import os
from setproctitle import setproctitle
import shutil
import sys
import time
import torch
from tqdm import tqdm
from neural_sp.bin.args_asr import parse_args_train
from neural_sp.bin.model_name import set_asr_model_name
from neural_sp.bin.train_utils import (
compute_susampling_factor,
load_checkpoint,
load_config,
save_config,
set_logger,
set_save_path
)
from neural_sp.datasets.asr import build_dataloader
from neural_sp.models.data_parallel import CustomDataParallel
from neural_sp.models.data_parallel import CPUWrapperASR
from neural_sp.models.lm.build import build_lm
from neural_sp.models.seq2seq.speech2text import Speech2Text
from neural_sp.trainers.lr_scheduler import LRScheduler
from neural_sp.trainers.optimizer import set_optimizer
from neural_sp.trainers.reporter import Reporter
from neural_sp.utils import mkdir_join
torch.manual_seed(1)
torch.cuda.manual_seed_all(1)
logger = logging.getLogger(__name__)
def main():
args = parse_args_train(sys.argv[1:])
args_init = copy.deepcopy(args)
args_teacher = copy.deepcopy(args)
# Load a conf file
if args.resume:
conf = load_config(os.path.join(os.path.dirname(args.resume), 'conf.yml'))
for k, v in conf.items():
if k != 'resume':
setattr(args, k, v)
recog_params = vars(args)
args = compute_susampling_factor(args)
# for multi-GPUs
if args.n_gpus >= 1:
batch_size = args.batch_size * args.n_gpus
accum_grad_n_steps = max(1, args.accum_grad_n_steps // args.n_gpus)
else:
batch_size = args.batch_size
accum_grad_n_steps = args.accum_grad_n_steps
# Load dataloader
train_set = build_dataloader(args=args,
tsv_path=args.train_set,
tsv_path_sub1=args.train_set_sub1,
tsv_path_sub2=args.train_set_sub2,
batch_size=batch_size,
n_epochs=args.n_epochs,
sort_by='input',
short2long=args.sort_short2long,
sort_stop_epoch=args.sort_stop_epoch,
num_workers=args.n_gpus,
pin_memory=True,
alignment_dir=args.train_alignment)
dev_set = build_dataloader(args=args,
tsv_path=args.dev_set,
tsv_path_sub1=args.dev_set_sub1,
tsv_path_sub2=args.dev_set_sub2,
batch_size=batch_size,
num_workers=args.n_gpus,
pin_memory=True,
alignment_dir=args.dev_alignment)
eval_sets = [build_dataloader(args=args,
tsv_path=s,
batch_size=1,
is_test=True) for s in args.eval_sets]
args.vocab = train_set.vocab
args.vocab_sub1 = train_set.vocab_sub1
args.vocab_sub2 = train_set.vocab_sub2
args.input_dim = train_set.input_dim
# Set save path
if args.resume:
save_path = os.path.dirname(args.resume)
dir_name = os.path.basename(save_path)
else:
dir_name = set_asr_model_name(args)
if args.mbr_training:
assert args.asr_init
save_path = mkdir_join(os.path.dirname(args.asr_init), dir_name)
else:
save_path = mkdir_join(args.model_save_dir, '_'.join(
os.path.basename(args.train_set).split('.')[:-1]), dir_name)
save_path = set_save_path(save_path) # avoid overwriting
# Set logger
set_logger(os.path.join(save_path, 'train.log'), stdout=args.stdout)
# Load a LM conf file for LM fusion & LM initialization
if not args.resume and args.external_lm:
lm_conf = load_config(os.path.join(os.path.dirname(args.external_lm), 'conf.yml'))
args.lm_conf = argparse.Namespace()
for k, v in lm_conf.items():
setattr(args.lm_conf, k, v)
assert args.unit == args.lm_conf.unit
assert args.vocab == args.lm_conf.vocab
# Model setting
model = Speech2Text(args, save_path, train_set.idx2token[0])
if not args.resume:
# Save the conf file as a yaml file
save_config(vars(args), os.path.join(save_path, 'conf.yml'))
if args.external_lm:
save_config(args.lm_conf, os.path.join(save_path, 'conf_lm.yml'))
# Save the nlsyms, dictionary, and wp_model
if args.nlsyms:
shutil.copy(args.nlsyms, os.path.join(save_path, 'nlsyms.txt'))
for sub in ['', '_sub1', '_sub2']:
if getattr(args, 'dict' + sub):
shutil.copy(getattr(args, 'dict' + sub), os.path.join(save_path, 'dict' + sub + '.txt'))
if getattr(args, 'unit' + sub) == 'wp':
shutil.copy(getattr(args, 'wp_model' + sub), os.path.join(save_path, 'wp' + sub + '.model'))
for k, v in sorted(vars(args).items(), key=lambda x: x[0]):
logger.info('%s: %s' % (k, str(v)))
# Count total parameters
for n in sorted(list(model.num_params_dict.keys())):
n_params = model.num_params_dict[n]
logger.info("%s %d" % (n, n_params))
logger.info("Total %.2f M parameters" % (model.total_parameters / 1000000))
logger.info('torch version: %s' % str(torch.__version__))
logger.info(model)
# Initialize with pre-trained model's parameters
if args.asr_init:
# Load the ASR model (full model)
conf_init = load_config(os.path.join(os.path.dirname(args.asr_init), 'conf.yml'))
for k, v in conf_init.items():
setattr(args_init, k, v)
model_init = Speech2Text(args_init)
load_checkpoint(args.asr_init, model_init)
# Overwrite parameters
param_dict = dict(model_init.named_parameters())
for n, p in model.named_parameters():
if n in param_dict.keys() and p.size() == param_dict[n].size():
if args.asr_init_enc_only and 'enc' not in n:
continue
p.data = param_dict[n].data
logger.info('Overwrite %s' % n)
# Set optimizer
if args.resume:
resume_epoch = int(args.resume.split('-')[-1])
optimizer = set_optimizer(model, 'sgd' if resume_epoch > args.convert_to_sgd_epoch else args.optimizer,
args.lr, args.weight_decay)
else:
resume_epoch = 0
optimizer = set_optimizer(model, args.optimizer, args.lr, args.weight_decay)
# Wrap optimizer by learning rate scheduler
is_transformer = 'former' in args.enc_type or 'former' in args.dec_type
scheduler = LRScheduler(optimizer, args.lr,
decay_type=args.lr_decay_type,
decay_start_epoch=args.lr_decay_start_epoch,
decay_rate=args.lr_decay_rate,
decay_patient_n_epochs=args.lr_decay_patient_n_epochs,
early_stop_patient_n_epochs=args.early_stop_patient_n_epochs,
lower_better=args.metric not in ['accuracy', 'bleu'],
warmup_start_lr=args.warmup_start_lr,
warmup_n_steps=args.warmup_n_steps,
peak_lr=0.05 / (getattr(args, 'transformer_enc_d_model', 0) **
0.5) if 'conformer' in args.enc_type else 1e6,
model_size=getattr(args, 'transformer_enc_d_model',
getattr(args, 'transformer_dec_d_model', 0)),
factor=args.lr_factor,
noam=args.optimizer == 'noam',
save_checkpoints_topk=10 if is_transformer else 1)
if args.resume:
# Restore the last saved model
load_checkpoint(args.resume, model, scheduler)
# Resume between convert_to_sgd_epoch -1 and convert_to_sgd_epoch
if resume_epoch == args.convert_to_sgd_epoch:
scheduler.convert_to_sgd(model, args.lr, args.weight_decay,
decay_type='always', decay_rate=0.5)
# Load the teacher ASR model
teacher = None
if args.teacher:
assert os.path.isfile(args.teacher), 'There is no checkpoint.'
conf_teacher = load_config(os.path.join(os.path.dirname(args.teacher), 'conf.yml'))
for k, v in conf_teacher.items():
setattr(args_teacher, k, v)
# Setting for knowledge distillation
args_teacher.ss_prob = 0
args.lsm_prob = 0
teacher = Speech2Text(args_teacher)
load_checkpoint(args.teacher, teacher)
# Load the teacher LM
teacher_lm = None
if args.teacher_lm:
assert os.path.isfile(args.teacher_lm), 'There is no checkpoint.'
conf_lm = load_config(os.path.join(os.path.dirname(args.teacher_lm), 'conf.yml'))
args_lm = argparse.Namespace()
for k, v in conf_lm.items():
setattr(args_lm, k, v)
teacher_lm = build_lm(args_lm)
load_checkpoint(args.teacher_lm, teacher_lm)
# GPU setting
use_apex = args.train_dtype in ["O0", "O1", "O2", "O3"]
amp = None
if args.n_gpus >= 1:
model.cudnn_setting(deterministic=not (is_transformer or args.cudnn_benchmark),
benchmark=not is_transformer and args.cudnn_benchmark)
model.cuda()
# Mixed precision training setting
if use_apex:
from apex import amp
model, scheduler.optimizer = amp.initialize(model, scheduler.optimizer,
opt_level=args.train_dtype)
from neural_sp.models.seq2seq.decoders.ctc import CTC
amp.register_float_function(CTC, "loss_fn")
# NOTE: see https://github.com/espnet/espnet/pull/1779
amp.init()
if args.resume:
load_checkpoint(args.resume, amp=amp)
model = CustomDataParallel(model, device_ids=list(range(0, args.n_gpus)))
if teacher is not None:
teacher.cuda()
if teacher_lm is not None:
teacher_lm.cuda()
else:
model = CPUWrapperASR(model)
# Set process name
logger.info('PID: %s' % os.getpid())
logger.info('USERNAME: %s' % os.uname()[1])
logger.info('#GPU: %d' % torch.cuda.device_count())
setproctitle(args.job_name if args.job_name else dir_name)
# Set reporter
reporter = Reporter(save_path)
if args.mtl_per_batch:
# NOTE: from easier to harder tasks
tasks = []
if 1 - args.bwd_weight - args.ctc_weight - args.sub1_weight - args.sub2_weight > 0:
tasks += ['ys']
if args.bwd_weight > 0:
tasks = ['ys.bwd'] + tasks
if args.ctc_weight > 0:
tasks = ['ys.ctc'] + tasks
if args.mbr_ce_weight > 0:
tasks = ['ys.mbr'] + tasks
for sub in ['sub1', 'sub2']:
if getattr(args, 'train_set_' + sub):
if getattr(args, sub + '_weight') - getattr(args, 'ctc_weight_' + sub) > 0:
tasks = ['ys_' + sub] + tasks
if getattr(args, 'ctc_weight_' + sub) > 0:
tasks = ['ys_' + sub + '.ctc'] + tasks
else:
tasks = ['all']
if getattr(args, 'ss_start_epoch', 0) <= resume_epoch:
model.module.trigger_scheduled_sampling()
if getattr(args, 'mocha_quantity_loss_start_epoch', 0) <= resume_epoch:
model.module.trigger_quantity_loss()
start_time_train = time.time()
start_time_epoch = time.time()
start_time_step = time.time()
accum_n_steps = 0
n_steps = scheduler.n_steps * accum_grad_n_steps
epoch_detail_prev = 0
for ep in range(resume_epoch, args.n_epochs):
pbar_epoch = tqdm(total=len(train_set))
session_prev = None
for batch_train, is_new_epoch in train_set:
# Compute loss in the training set
if args.discourse_aware and batch_train['sessions'][0] != session_prev:
model.module.reset_session()
session_prev = batch_train['sessions'][0]
accum_n_steps += 1
# Change mini-batch depending on task
if accum_n_steps == 1:
loss_train = 0 # average over gradient accumulation
for task in tasks:
loss, observation = model(batch_train, task=task,
teacher=teacher, teacher_lm=teacher_lm)
loss = loss / accum_grad_n_steps
reporter.add(observation)
if use_apex:
with amp.scale_loss(loss, scheduler.optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
loss.detach() # Trancate the graph
if accum_n_steps >= accum_grad_n_steps or is_new_epoch:
if args.clip_grad_norm > 0:
total_norm = torch.nn.utils.clip_grad_norm_(
model.module.parameters(), args.clip_grad_norm)
reporter.add_tensorboard_scalar('total_norm', total_norm)
scheduler.step()
scheduler.zero_grad()
accum_n_steps = 0
# NOTE: parameters are forcibly updated at the end of every epoch
loss_train += loss.item()
del loss
pbar_epoch.update(len(batch_train['utt_ids']))
reporter.add_tensorboard_scalar('learning_rate', scheduler.lr)
# NOTE: loss/acc/ppl are already added in the model
reporter.step()
n_steps += 1
# NOTE: n_steps is different from the step counter in Noam Optimizer
if n_steps % args.print_step == 0:
# Compute loss in the dev set
batch_dev = iter(dev_set).next(batch_size=1 if 'transducer' in args.dec_type else None)[0]
# Change mini-batch depending on task
for task in tasks:
loss, observation = model(batch_dev, task=task, is_eval=True)
reporter.add(observation, is_eval=True)
loss_dev = loss.item()
del loss
reporter.step(is_eval=True)
duration_step = time.time() - start_time_step
if args.input_type == 'speech':
xlen = max(len(x) for x in batch_train['xs'])
ylen = max(len(y) for y in batch_train['ys'])
elif args.input_type == 'text':
xlen = max(len(x) for x in batch_train['ys'])
ylen = max(len(y) for y in batch_train['ys_sub1'])
logger.info("step:%d(ep:%.2f) loss:%.3f(%.3f)/lr:%.7f/bs:%d/xlen:%d/ylen:%d (%.2f min)" %
(n_steps, scheduler.n_epochs + train_set.epoch_detail,
loss_train, loss_dev,
scheduler.lr, len(batch_train['utt_ids']),
xlen, ylen, duration_step / 60))
start_time_step = time.time()
# Save fugures of loss and accuracy
if n_steps % (args.print_step * 10) == 0:
reporter.snapshot()
model.module.plot_attention()
model.module.plot_ctc()
# Ealuate model every 0.1 epoch during MBR training
if args.mbr_training:
if int(train_set.epoch_detail * 10) != int(epoch_detail_prev * 10):
# dev
evaluate([model.module], dev_set, recog_params, args,
int(train_set.epoch_detail * 10) / 10, logger)
# Save the model
scheduler.save_checkpoint(
model, save_path, remove_old=False, amp=amp,
epoch_detail=train_set.epoch_detail)
epoch_detail_prev = train_set.epoch_detail
if is_new_epoch:
break
# Save checkpoint and evaluate model per epoch
duration_epoch = time.time() - start_time_epoch
logger.info('========== EPOCH:%d (%.2f min) ==========' %
(scheduler.n_epochs + 1, duration_epoch / 60))
if scheduler.n_epochs + 1 < args.eval_start_epoch:
scheduler.epoch() # lr decay
reporter.epoch() # plot
# Save the model
scheduler.save_checkpoint(
model, save_path, remove_old=not is_transformer and args.remove_old_checkpoints, amp=amp)
else:
start_time_eval = time.time()
# dev
metric_dev = evaluate([model.module], dev_set, recog_params, args,
scheduler.n_epochs + 1, logger)
scheduler.epoch(metric_dev) # lr decay
reporter.epoch(metric_dev, name=args.metric) # plot
if scheduler.is_topk or is_transformer:
# Save the model
scheduler.save_checkpoint(
model, save_path, remove_old=not is_transformer and args.remove_old_checkpoints, amp=amp)
# test
if scheduler.is_topk:
for eval_set in eval_sets:
evaluate([model.module], eval_set, recog_params, args,
scheduler.n_epochs, logger)
duration_eval = time.time() - start_time_eval
logger.info('Evaluation time: %.2f min' % (duration_eval / 60))
# Early stopping
if scheduler.is_early_stop:
break
# Convert to fine-tuning stage
if scheduler.n_epochs == args.convert_to_sgd_epoch:
scheduler.convert_to_sgd(model, args.lr, args.weight_decay,
decay_type='always', decay_rate=0.5)
if scheduler.n_epochs >= args.n_epochs:
break
if getattr(args, 'ss_start_epoch', 0) == (ep + 1):
model.module.trigger_scheduled_sampling()
if getattr(args, 'mocha_quantity_loss_start_epoch', 0) == (ep + 1):
model.module.trigger_quantity_loss()
start_time_step = time.time()
start_time_epoch = time.time()
duration_train = time.time() - start_time_train
logger.info('Total time: %.2f hour' % (duration_train / 3600))
reporter.tf_writer.close()
pbar_epoch.close()
return save_path
def evaluate(models, dataloader, recog_params, args, epoch, logger):
if args.metric == 'edit_distance':
if args.unit in ['word', 'word_char']:
from neural_sp.evaluators.word import eval_word
metric = eval_word(models, dataloader, recog_params, epoch=epoch)[0]
logger.info('WER (%s, ep:%d): %.2f %%' % (dataloader.set, epoch, metric))
elif args.unit == 'wp':
from neural_sp.evaluators.wordpiece import eval_wordpiece
metric, cer = eval_wordpiece(models, dataloader, recog_params, epoch=epoch)
logger.info('WER (%s, ep:%d): %.2f %%' % (dataloader.set, epoch, metric))
logger.info('CER (%s, ep:%d): %.2f %%' % (dataloader.set, epoch, cer))
elif 'char' in args.unit:
from neural_sp.evaluators.character import eval_char
wer, cer = eval_char(models, dataloader, recog_params, epoch=epoch)
logger.info('WER (%s, ep:%d): %.2f %%' % (dataloader.set, epoch, wer))
logger.info('CER (%s, ep:%d): %.2f %%' % (dataloader.set, epoch, cer))
if dataloader.corpus in ['aishell1']:
metric = cer
else:
metric = wer
elif 'phone' in args.unit:
from neural_sp.evaluators.phone import eval_phone
metric = eval_phone(models, dataloader, recog_params, epoch=epoch)
logger.info('PER (%s, ep:%d): %.2f %%' % (dataloader.set, epoch, metric))
elif args.metric == 'ppl':
from neural_sp.evaluators.ppl import eval_ppl
metric = eval_ppl(models, dataloader, batch_size=args.batch_size)[0]
logger.info('PPL (%s, ep:%d): %.3f' % (dataloader.set, epoch, metric))
elif args.metric == 'loss':
from neural_sp.evaluators.ppl import eval_ppl
metric = eval_ppl(models, dataloader, batch_size=args.batch_size)[1]
logger.info('Loss (%s, ep:%d): %.5f' % (dataloader.set, epoch, metric))
elif args.metric == 'accuracy':
from neural_sp.evaluators.accuracy import eval_accuracy
metric = eval_accuracy(models, dataloader, batch_size=args.batch_size)
logger.info('Accuracy (%s, ep:%d): %.3f' % (dataloader.set, epoch, metric))
elif args.metric == 'bleu':
from neural_sp.evaluators.wordpiece_bleu import eval_wordpiece_bleu
metric = eval_wordpiece_bleu(models, dataloader, recog_params, epoch=epoch)
logger.info('BLEU (%s, ep:%d): %.3f' % (dataloader.set, epoch, metric))
else:
raise NotImplementedError(args.metric)
return metric
if __name__ == '__main__':
# Setting for profiling
pr = cProfile.Profile()
save_path = pr.runcall(main)
pr.dump_stats(os.path.join(save_path, 'train.profile'))
|
the-stack_106_13650
|
class Query(object):
def __init__(self, query, q_type, delimeter):
"""
<str> query : Query in string format from command line.
<str> q_type : Options from command line represents type of query.
<str> delimeter : How query is formated.
"""
if q_type == '-d':
query = query.split(delimeter)
self.query = {
"year": int(query[0]),
"month": int(query[1]),
"day": int(query[2])
}
def items(self):
return self.query.items()
|
the-stack_106_13655
|
from .utils import ShellParser
from ..exceptions import UnknownMethod, ShellError
class Parser(ShellParser):
"""Extract text from pdf files using either the ``pdftotext`` method
(default) or the ``pdfminer`` method.
"""
def extract(self, filename, method='', **kwargs):
if method == '' or method == 'pdftotext':
try:
return self.extract_pdftotext(filename, **kwargs)
except ShellError as ex:
# If pdftotext isn't installed and the pdftotext method
# wasn't specified, then gracefully fallback to using
# pdfminer instead.
if method == '' and ex.is_not_installed():
return self.extract_pdfminer(filename, **kwargs)
else:
raise ex
elif method == 'pdfminer':
return self.extract_pdfminer(filename, **kwargs)
else:
raise UnknownMethod(method)
def extract_pdftotext(self, filename, **kwargs):
"""Extract text from pdfs using the pdftotext command line utility."""
if 'layout' in kwargs:
args = ['pdftotext', '-layout', filename, '-']
else:
args = ['pdftotext', filename, '-']
stdout, _ = self.run(args)
return stdout
def extract_pdfminer(self, filename, **kwargs):
"""Extract text from pdfs using pdfminer."""
stdout, _ = self.run(['pdf2txt.py', filename])
return stdout
|
the-stack_106_13656
|
"""Misc util objects"""
from operator import attrgetter, itemgetter
import inspect
import re
import itertools
import functools
import types
from typing import Mapping, Iterable
class Literal:
"""An object to indicate that the value should be considered literally.
>>> t = Literal(42)
>>> t.get_val()
42
>>> t()
42
"""
def __init__(self, val):
self.val = val
def get_val(self):
"""Get the value wrapped by Literal instance.
One might want to use ``literal.get_val()`` instead ``literal()`` to get the
value a ``Literal`` is wrapping because ``.get_val`` is more explicit.
That said, with a bit of hesitation, we allow the ``literal()`` form as well
since it is useful in situations where we need to use a callback function to
get a value.
"""
return self.val
__call__ = get_val
def dflt_idx_preprocessor(obj, idx):
if isinstance(idx, str) and str.isdigit(idx):
idx = int(idx)
if isinstance(idx, int) or isinstance(obj, Mapping):
return obj[idx]
elif hasattr(obj, idx):
return getattr(obj, idx)
else:
raise KeyError(f"Couldn't extract a {idx} from object {obj}")
def path_extractor(tree, path, getter=dflt_idx_preprocessor, *, path_sep='.'):
"""Get items from a tree-structured object from a sequence of tree-traversal indices.
:param tree: The object you want to extract values from:
Can be any object you want, as long as the indices listed by path and how to get
the items indexed are well specified by ``path`` and ``getter``.
:param path: An iterable of indices that define how to traverse the tree to get
to desired item(s). If this iterable is a string, the ``path_sep`` argument
will be used to transform it into a tuple of string indices.
:param getter: A ``(tree, idx)`` function that specifies how to extract item ``idx``
from the ``tree`` object.
:param path_sep: The string separator to use if ``path`` is a string
:return: The ``tree`` item(s) referenced by ``path``
>>> tree = {'a': {'b': [0, {'c': [1, 2, 3]}]}}
>>> path_extractor(tree, path=['a'])
{'b': [0, {'c': [1, 2, 3]}]}
>>> path_extractor(tree, path=['a', 'b'])
[0, {'c': [1, 2, 3]}]
>>> path_extractor(tree, path=['a', 'b', 1])
{'c': [1, 2, 3]}
>>> path_extractor(tree, path=['a', 'b', 1, 'c'])
[1, 2, 3]
>>> path_extractor(tree, path=('a', 'b', 1, 'c', 2))
3
You could do the same by specifying the path as a dot-separated string.
>>> path_extractor(tree, 'a.b.1.c.2')
3
You can use any separation you want.
>>> path_extractor(tree, 'a/b/1/c/2', path_sep='/')
3
You can also use `*` to indicate that you want to keep all the nodes of a given
level.
>>> tree = {'a': [{'b': [1, 10]}, {'b': [2, 20]}, {'b': [3, 30]}]}
>>> path_extractor(tree, 'a.*.b.1')
[10, 20, 30]
A generalization of `*` is to specify a callable which will be intepreted as
a filter function.
>>> tree = {'a': [{'b': 1}, {'c': 2}, {'b': 3}, {'b': 4}]}
>>> path_extractor(tree, ['a', lambda x: 'b' in x])
[{'b': 1}, {'b': 3}, {'b': 4}]
>>> path_extractor(tree, ['a', lambda x: 'b' in x, 'b'])
[1, 3, 4]
"""
if isinstance(path, str):
path = path.split(path_sep)
if len(path) == 0:
return tree
else:
idx, *path = path # extract path[0] as idx & update path to path[1:]
if isinstance(idx, str) and idx == '*':
idx = lambda x: True # use a filter function (but filter everything in)
if callable(idx) and not isinstance(idx, Literal):
# If idx is a non-literal callable, consider it as a filter to be applied
# to iter(tree)
# TODO: https://github.com/i2mint/i2/issues/27
return [
path_extractor(sub_tree, path, getter) for sub_tree in filter(idx, tree)
]
else:
if isinstance(idx, Literal):
# Use of Literal is meant get out of trouble if we want to use a
# callable as an actual index, not as a filter.
idx = idx.get_val()
tree = getter(tree, idx)
return path_extractor(tree, path, getter)
# Note: Specialization of path_extractor for Mappings
def dp_get(d, dot_path):
"""Get stuff from a dict (or any Mapping), using dot_paths (i.e. 'foo.bar' instead of
['foo']['bar'])
>>> d = {'foo': {'bar': 2, 'alice': 'bob'}, 3: {'pi': 3.14}}
>>> assert dp_get(d, 'foo') == {'bar': 2, 'alice': 'bob'}
>>> assert dp_get(d, 'foo.bar') == 2
>>> assert dp_get(d, 'foo.alice') == 'bob'
"""
return path_extractor(d, dot_path, lambda d, k: d[k])
class lazyprop:
"""
A descriptor implementation of lazyprop (cached property) from David Beazley's "Python Cookbook" book.
It's
>>> class Test:
... def __init__(self, a):
... self.a = a
... @lazyprop
... def len(self):
... print('generating "len"')
... return len(self.a)
>>> t = Test([0, 1, 2, 3, 4])
>>> t.__dict__
{'a': [0, 1, 2, 3, 4]}
>>> t.len
generating "len"
5
>>> t.__dict__
{'a': [0, 1, 2, 3, 4], 'len': 5}
>>> t.len
5
>>> # But careful when using lazyprop that no one will change the value of a without deleting the property first
>>> t.a = [0, 1, 2] # if we change a...
>>> t.len # ... we still get the old cached value of len
5
>>> del t.len # if we delete the len prop
>>> t.len # ... then len being recomputed again
generating "len"
3
"""
def __init__(self, func):
self.func = func
def __get__(self, instance, cls):
if instance is None:
return self
else:
value = self.func(instance)
setattr(instance, self.func.__name__, value)
return value
class FrozenHashError(TypeError):
pass
class FrozenDict(dict):
"""An immutable dict subtype that is hashable and can itself be used
as a :class:`dict` key or :class:`set` entry. What
:class:`frozenset` is to :class:`set`, FrozenDict is to
:class:`dict`.
There was once an attempt to introduce such a type to the standard
library, but it was rejected: `PEP 416 <https://www.python.org/dev/peps/pep-0416/>`_.
Because FrozenDict is a :class:`dict` subtype, it automatically
works everywhere a dict would, including JSON serialization.
"""
__slots__ = ('_hash',)
def updated(self, *a, **kw):
"""Make a copy and add items from a dictionary or iterable (and/or
keyword arguments), overwriting values under an existing
key. See :meth:`dict.update` for more details.
"""
data = dict(self)
data.update(*a, **kw)
return type(self)(data)
@classmethod
def fromkeys(cls, keys, value=None):
# one of the lesser known and used/useful dict methods
return cls(dict.fromkeys(keys, value))
def __repr__(self):
cn = self.__class__.__name__
return '%s(%s)' % (cn, dict.__repr__(self))
def __reduce_ex__(self, protocol):
return type(self), (dict(self),)
def __hash__(self):
try:
ret = self._hash
except AttributeError:
try:
ret = self._hash = hash(frozenset(self.items()))
except Exception as e:
ret = self._hash = FrozenHashError(e)
if ret.__class__ is FrozenHashError:
raise ret
return ret
def __copy__(self):
return self # immutable types don't copy, see tuple's behavior
# block everything else
def _raise_frozen_typeerror(self, *a, **kw):
'raises a TypeError, because FrozenDicts are immutable'
raise TypeError('%s object is immutable' % self.__class__.__name__)
__setitem__ = __delitem__ = update = _raise_frozen_typeerror
setdefault = pop = popitem = clear = _raise_frozen_typeerror
del _raise_frozen_typeerror
frozendict = FrozenDict # alias to align with frozenset
########################################################################################################################
function_type = type(
lambda x: x
) # using this instead of callable() because classes are callable, for instance
class NoDefault(object):
def __repr__(self):
return 'no_default'
no_default = NoDefault()
class imdict(dict):
def __hash__(self):
return id(self)
def _immutable(self, *args, **kws):
raise TypeError('object is immutable')
__setitem__ = _immutable
__delitem__ = _immutable
clear = _immutable
update = _immutable
setdefault = _immutable
pop = _immutable
popitem = _immutable
def inject_method(self, method_function, method_name=None):
"""
method_function could be:
* a function
* a {method_name: function, ...} dict (for multiple injections)
* a list of functions or (function, method_name) pairs
"""
if isinstance(method_function, function_type):
if method_name is None:
method_name = method_function.__name__
setattr(self, method_name, types.MethodType(method_function, self))
else:
if isinstance(method_function, dict):
method_function = [
(func, func_name) for func_name, func in method_function.items()
]
for method in method_function:
if isinstance(method, tuple) and len(method) == 2:
self = inject_method(self, method[0], method[1])
else:
self = inject_method(self, method)
return self
########################################################################################################################
def get_function_body(func):
source_lines = inspect.getsourcelines(func)[0]
source_lines = itertools.dropwhile(lambda x: x.startswith('@'), source_lines)
line = next(source_lines).strip()
if not line.startswith('def ') and not line.startswith('class'):
return line.rsplit(':')[-1].strip()
elif not line.endswith(':'):
for line in source_lines:
line = line.strip()
if line.endswith(':'):
break
# Handle functions that are not one-liners
first_line = next(source_lines)
# Find the indentation of the first line
indentation = len(first_line) - len(first_line.lstrip())
return ''.join(
[first_line[indentation:]] + [line[indentation:] for line in source_lines]
)
class ExistingArgument(ValueError):
pass
class MissingArgument(ValueError):
pass
def make_sentinel(name='_MISSING', var_name=None):
"""Creates and returns a new **instance** of a new class, suitable for
usage as a "sentinel", a kind of singleton often used to indicate
a value is missing when ``None`` is a valid input.
Args:
name (str): Name of the Sentinel
var_name (str): Set this name to the name of the variable in
its respective module enable pickleability.
>>> make_sentinel(var_name='_MISSING')
_MISSING
The most common use cases here in boltons are as default values
for optional function arguments, partly because of its
less-confusing appearance in automatically generated
documentation. Sentinels also function well as placeholders in queues
and linked lists.
.. note::
By design, additional calls to ``make_sentinel`` with the same
values will not produce equivalent objects.
>>> make_sentinel('TEST') == make_sentinel('TEST')
False
>>> type(make_sentinel('TEST')) == type(make_sentinel('TEST'))
False
"""
class Sentinel(object):
def __init__(self):
self.name = name
self.var_name = var_name
def __repr__(self):
if self.var_name:
return self.var_name
return '%s(%r)' % (self.__class__.__name__, self.name)
if var_name:
def __reduce__(self):
return self.var_name
def __nonzero__(self):
return False
__bool__ = __nonzero__
return Sentinel()
def _indent(text, margin, newline='\n', key=bool):
'based on boltons.strutils.indent'
indented_lines = [
(margin + line if key(line) else line) for line in text.splitlines()
]
return newline.join(indented_lines)
NO_DEFAULT = make_sentinel(var_name='NO_DEFAULT')
from inspect import formatannotation
def inspect_formatargspec(
args,
varargs=None,
varkw=None,
defaults=None,
kwonlyargs=(),
kwonlydefaults={},
annotations={},
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
formatreturns=lambda text: ' -> ' + text,
formatannotation=formatannotation,
):
"""Copy formatargspec from python 3.7 standard library.
Python 3 has deprecated formatargspec and requested that Signature
be used instead, however this requires a full reimplementation
of formatargspec() in terms of creating Parameter objects and such.
Instead of introducing all the object-creation overhead and having
to reinvent from scratch, just copy their compatibility routine.
"""
def formatargandannotation(arg):
result = formatarg(arg)
if arg in annotations:
result += ': ' + formatannotation(annotations[arg])
return result
specs = []
if defaults:
firstdefault = len(args) - len(defaults)
for i, arg in enumerate(args):
spec = formatargandannotation(arg)
if defaults and i >= firstdefault:
spec = spec + formatvalue(defaults[i - firstdefault])
specs.append(spec)
if varargs is not None:
specs.append(formatvarargs(formatargandannotation(varargs)))
else:
if kwonlyargs:
specs.append('*')
if kwonlyargs:
for kwonlyarg in kwonlyargs:
spec = formatargandannotation(kwonlyarg)
if kwonlydefaults and kwonlyarg in kwonlydefaults:
spec += formatvalue(kwonlydefaults[kwonlyarg])
specs.append(spec)
if varkw is not None:
specs.append(formatvarkw(formatargandannotation(varkw)))
result = '(' + ', '.join(specs) + ')'
if 'return' in annotations:
result += formatreturns(formatannotation(annotations['return']))
return result
class FunctionBuilder(object):
"""The FunctionBuilder type provides an interface for programmatically
creating new functions, either based on existing functions or from
scratch.
Note: Based on https://boltons.readthedocs.io
Values are passed in at construction or set as attributes on the
instance. For creating a new function based of an existing one,
see the :meth:`~FunctionBuilder.from_func` classmethod. At any
point, :meth:`~FunctionBuilder.get_func` can be called to get a
newly compiled function, based on the values configured.
>>> fb = FunctionBuilder('return_five', doc='returns the integer 5',
... body='return 5')
>>> f = fb.get_func()
>>> f()
5
>>> fb.varkw = 'kw'
>>> f_kw = fb.get_func()
>>> f_kw(ignored_arg='ignored_val')
5
Note that function signatures themselves changed quite a bit in
Python 3, so several arguments are only applicable to
FunctionBuilder in Python 3. Except for *name*, all arguments to
the constructor are keyword arguments.
Args:
name (str): Name of the function.
doc (str): `Docstring`_ for the function, defaults to empty.
module (str): Name of the module from which this function was
imported. Defaults to None.
body (str): String version of the code representing the body
of the function. Defaults to ``'pass'``, which will result
in a function which does nothing and returns ``None``.
args (list): List of argument names, defaults to empty list,
denoting no arguments.
varargs (str): Name of the catch-all variable for positional
arguments. E.g., "args" if the resultant function is to have
``*args`` in the signature. Defaults to None.
varkw (str): Name of the catch-all variable for keyword
arguments. E.g., "kwargs" if the resultant function is to have
``**kwargs`` in the signature. Defaults to None.
defaults (tuple): A tuple containing default argument values for
those arguments that have defaults.
kwonlyargs (list): Argument names which are only valid as
keyword arguments. **Python 3 only.**
kwonlydefaults (dict): A mapping, same as normal *defaults*,
but only for the *kwonlyargs*. **Python 3 only.**
annotations (dict): Mapping of type hints and so
forth. **Python 3 only.**
filename (str): The filename that will appear in
tracebacks. Defaults to "boltons.funcutils.FunctionBuilder".
indent (int): Number of spaces with which to indent the
function *body*. Values less than 1 will result in an error.
dict (dict): Any other attributes which should be added to the
functions compiled with this FunctionBuilder.
All of these arguments are also made available as attributes which
can be mutated as necessary.
.. _Docstring: https://en.wikipedia.org/wiki/Docstring#Python
"""
_argspec_defaults = {
'args': list,
'varargs': lambda: None,
'varkw': lambda: None,
'defaults': lambda: None,
'kwonlyargs': list,
'kwonlydefaults': dict,
'annotations': dict,
}
@classmethod
def _argspec_to_dict(cls, f):
argspec = inspect.getfullargspec(f)
return dict((attr, getattr(argspec, attr)) for attr in cls._argspec_defaults)
_defaults = {
'doc': str,
'dict': dict,
'is_async': lambda: False,
'module': lambda: None,
'body': lambda: 'pass',
'indent': lambda: 4,
'annotations': dict,
'filename': lambda: 'boltons.funcutils.FunctionBuilder',
}
_defaults.update(_argspec_defaults)
_compile_count = itertools.count()
def __init__(self, name, **kw):
self.name = name
for a, default_factory in self._defaults.items():
val = kw.pop(a, None)
if val is None:
val = default_factory()
setattr(self, a, val)
if kw:
raise TypeError('unexpected kwargs: %r' % kw.keys())
return
# def get_argspec(self): # TODO
def get_sig_str(self, with_annotations=True):
"""Return function signature as a string.
with_annotations is ignored on Python 2. On Python 3 signature
will omit annotations if it is set to False.
"""
if with_annotations:
annotations = self.annotations
else:
annotations = {}
return inspect_formatargspec(
self.args, self.varargs, self.varkw, [], self.kwonlyargs, {}, annotations
)
_KWONLY_MARKER = re.compile(
r'''
\* # a star
\s* # followed by any amount of whitespace
, # followed by a comma
\s* # followed by any amount of whitespace
''',
re.VERBOSE,
)
def get_invocation_str(self):
kwonly_pairs = None
formatters = {}
if self.kwonlyargs:
kwonly_pairs = dict((arg, arg) for arg in self.kwonlyargs)
formatters['formatvalue'] = lambda value: '=' + value
sig = inspect_formatargspec(
self.args,
self.varargs,
self.varkw,
[],
kwonly_pairs,
kwonly_pairs,
{},
**formatters,
)
sig = self._KWONLY_MARKER.sub('', sig)
return sig[1:-1]
@classmethod
def from_func(cls, func):
"""Create a new FunctionBuilder instance based on an existing
function. The original function will not be stored or
modified.
"""
# TODO: copy_body? gonna need a good signature regex.
# TODO: might worry about __closure__?
if not callable(func):
raise TypeError('expected callable object, not %r' % (func,))
if isinstance(func, functools.partial):
kwargs = {
'name': func.__name__,
'doc': func.__doc__,
'module': getattr(func, '__module__', None), # e.g., method_descriptor
'annotations': getattr(func, '__annotations__', {}),
'dict': getattr(func, '__dict__', {}),
}
kwargs.update(cls._argspec_to_dict(func))
if inspect.iscoroutinefunction(func):
kwargs['is_async'] = True
return cls(**kwargs)
def get_func(self, execdict=None, add_source=True, with_dict=True):
"""Compile and return a new function based on the current values of
the FunctionBuilder.
Args:
execdict (dict): The dictionary representing the scope in
which the compilation should take place. Defaults to an empty
dict.
add_source (bool): Whether to add the source used to a
special ``__source__`` attribute on the resulting
function. Defaults to True.
with_dict (bool): Add any custom attributes, if
applicable. Defaults to True.
To see an example of usage, see the implementation of
:func:`~boltons.funcutils.wraps`.
"""
execdict = execdict or {}
body = self.body or self._default_body
tmpl = 'def {name}{sig_str}:'
tmpl += '\n{body}'
if self.is_async:
tmpl = 'async ' + tmpl
body = _indent(self.body, ' ' * self.indent)
name = self.name.replace('<', '_').replace('>', '_') # lambdas
src = tmpl.format(
name=name,
sig_str=self.get_sig_str(with_annotations=False),
doc=self.doc,
body=body,
)
self._compile(src, execdict)
func = execdict[name]
func.__name__ = self.name
func.__doc__ = self.doc
func.__defaults__ = self.defaults
func.__kwdefaults__ = self.kwonlydefaults
func.__annotations__ = self.annotations
if with_dict:
func.__dict__.update(self.dict)
func.__module__ = self.module
# TODO: caller module fallback?
if add_source:
func.__source__ = src
return func
def get_defaults_dict(self):
"""Get a dictionary of function arguments with defaults and the
respective values.
"""
ret = dict(
reversed(list(zip(reversed(self.args), reversed(self.defaults or []))))
)
kwonlydefaults = getattr(self, 'kwonlydefaults', None)
if kwonlydefaults:
ret.update(kwonlydefaults)
return ret
def get_arg_names(self, only_required=False):
arg_names = tuple(self.args) + tuple(getattr(self, 'kwonlyargs', ()))
if only_required:
defaults_dict = self.get_defaults_dict()
arg_names = tuple([an for an in arg_names if an not in defaults_dict])
return arg_names
def add_arg(self, arg_name, default=NO_DEFAULT, kwonly=False):
"""Add an argument with optional *default* (defaults to
``funcutils.NO_DEFAULT``). Pass *kwonly=True* to add a
keyword-only argument
"""
if arg_name in self.args:
raise ExistingArgument(
'arg %r already in func %s arg list' % (arg_name, self.name)
)
if arg_name in self.kwonlyargs:
raise ExistingArgument(
'arg %r already in func %s kwonly arg list' % (arg_name, self.name)
)
if not kwonly:
self.args.append(arg_name)
if default is not NO_DEFAULT:
self.defaults = (self.defaults or ()) + (default,)
else:
self.kwonlyargs.append(arg_name)
if default is not NO_DEFAULT:
self.kwonlydefaults[arg_name] = default
return
def remove_arg(self, arg_name):
"""Remove an argument from this FunctionBuilder's argument list. The
resulting function will have one less argument per call to
this function.
Args:
arg_name (str): The name of the argument to remove.
Raises a :exc:`ValueError` if the argument is not present.
"""
args = self.args
d_dict = self.get_defaults_dict()
try:
args.remove(arg_name)
except ValueError:
try:
self.kwonlyargs.remove(arg_name)
except (AttributeError, ValueError):
# py2, or py3 and missing from both
exc = MissingArgument(
'arg %r not found in %s argument list:'
' %r' % (arg_name, self.name, args)
)
exc.arg_name = arg_name
raise exc
else:
self.kwonlydefaults.pop(arg_name, None)
else:
d_dict.pop(arg_name, None)
self.defaults = tuple([d_dict[a] for a in args if a in d_dict])
return
def _compile(self, src, execdict):
filename = '<%s-%d>' % (self.filename, next(self._compile_count),)
try:
code = compile(src, filename, 'single')
exec(code, execdict)
except Exception:
raise
return execdict
|
the-stack_106_13657
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
'''The Azure Command-line tool.
This tools provides a command-line interface to Azure's management and storage
APIs.
'''
import pkg_resources
pkg_resources.declare_namespace(__name__)
__author__ = "Microsoft Corporation <[email protected]>"
__version__ = "2.0.61"
|
the-stack_106_13660
|
# -*- coding: utf-8 -*-
"""This module defines functions for executing DSSP program and parsing
its output."""
import os.path
import numpy as np
from caviar.prody_parser.atomic import ATOMIC_FIELDS
from caviar.prody_parser.atomic import AtomGroup
from caviar.prody_parser.utilities import gunzip, which, PLATFORM
from .pdbfile import parsePDB
from .localpdb import fetchPDB
__all__ = ['execDSSP', 'parseDSSP', 'performDSSP']
def execDSSP(pdb, outputname=None, outputdir=None, stderr=True):
"""Execute DSSP for given *pdb*. *pdb* can be a PDB identifier or a PDB
file path. If *pdb* is a compressed file, it will be decompressed using
Python :mod:`gzip` library. When no *outputname* is given, output name
will be :file:`pdb.dssp`. :file:`.dssp` extension will be appended
automatically to *outputname*. If :file:`outputdir` is given, DSSP
output and uncompressed PDB file will be written into this folder.
Upon successful execution of :command:`dssp pdb > out` command, output
filename is returned. On Linux platforms, when *stderr* is false,
standard error messages are suppressed, i.e.
``dssp pdb > outputname 2> /dev/null``.
For more information on DSSP see http://swift.cmbi.ru.nl/gv/dssp/.
If you benefited from DSSP, please consider citing [WK83]_.
.. [WK83] Kabsch W, Sander C. Dictionary of protein secondary structure:
pattern recognition of hydrogen-bonded and geometrical features.
*Biopolymers* **1983** 22:2577-2637."""
dssp = which('mkdssp')
if dssp is None:
dssp = which('dssp')
if dssp is None:
raise EnvironmentError('command not found: dssp executable is not '
'found in one of system paths')
assert outputname is None or isinstance(outputname, str),\
'outputname must be a string'
assert outputdir is None or isinstance(outputdir, str),\
'outputdir must be a string'
if not os.path.isfile(pdb):
pdb = fetchPDB(pdb, compressed=False)
if pdb is None:
raise ValueError('pdb is not a valid PDB identifier or filename')
if os.path.splitext(pdb)[1] == '.gz':
if outputdir is None:
pdb = gunzip(pdb, os.path.splitext(pdb)[0])
else:
pdb = gunzip(pdb, os.path.join(outputdir,
os.path.split(os.path.splitext(pdb)[0])[1]))
if outputdir is None:
outputdir = '.'
if outputname is None:
out = os.path.join(outputdir,
os.path.splitext(os.path.split(pdb)[1])[0] +
'.dssp')
else:
out = os.path.join(outputdir, outputname + '.dssp')
if not stderr and PLATFORM != 'Windows':
status = os.system('{0} {1} > {2} 2> /dev/null'.format(
dssp, pdb, out))
else:
status = os.system('{0} {1} > {2}'.format(dssp, pdb, out))
if status == 0:
return out
def parseDSSP(dssp, ag, parseall=False):
"""Parse DSSP data from file *dssp* into :class:`~.AtomGroup` instance
*ag*. DSSP output file must be in the new format used from July 1995
and onwards. When *dssp* file is parsed, following attributes are added
to *ag*:
* *dssp_resnum*: DSSP's sequential residue number, starting at the first
residue actually in the data set and including chain breaks; this number
is used to refer to residues throughout.
* *dssp_acc*: number of water molecules in contact with this residue \*10.
or residue water exposed surface in Angstrom^2.
* *dssp_kappa*: virtual bond angle (bend angle) defined by the three Cα
atoms of residues I-2,I,I+2. Used to define bend (structure code 'S').
* *dssp_alpha*: virtual torsion angle (dihedral angle) defined by the four
Cα atoms of residues I-1,I,I+1,I+2.Used to define chirality (structure
code '+' or '-').
* *dssp_phi* and *dssp_psi*: IUPAC peptide backbone torsion angles
The following attributes are parsed when ``parseall=True`` is passed:
* *dssp_bp1*, *dssp_bp2*, and *dssp_sheet_label*: residue number of first
and second bridge partner followed by one letter sheet label
* *dssp_tco*: cosine of angle between C=O of residue I and C=O of residue
I-1. For α-helices, TCO is near +1, for β-sheets TCO is near -1. Not
used for structure definition.
* *dssp_NH_O_1_index*, *dssp_NH_O_1_energy*, etc.: hydrogen bonds; e.g.
-3,-1.4 means: if this residue is residue i then N-H of I is h-bonded to
C=O of I-3 with an electrostatic H-bond energy of -1.4 kcal/mol. There
are two columns for each type of H-bond, to allow for bifurcated H-bonds.
See http://swift.cmbi.ru.nl/gv/dssp/DSSP_3.html for details."""
if not os.path.isfile(dssp):
raise IOError('{0} is not a valid file path'.format(dssp))
if not isinstance(ag, AtomGroup):
raise TypeError('ag argument must be an AtomGroup instance')
dssp = open(dssp)
n_atoms = ag.numAtoms()
NUMBER = np.zeros(n_atoms, int)
SHEETLABEL = np.zeros(n_atoms, np.array(['a']).dtype.char + '1')
ACC = np.zeros(n_atoms, float)
KAPPA = np.zeros(n_atoms, float)
ALPHA = np.zeros(n_atoms, float)
PHI = np.zeros(n_atoms, float)
PSI = np.zeros(n_atoms, float)
if parseall:
BP1 = np.zeros(n_atoms, int)
BP2 = np.zeros(n_atoms, int)
NH_O_1 = np.zeros(n_atoms, int)
NH_O_1_nrg = np.zeros(n_atoms, float)
O_HN_1 = np.zeros(n_atoms, int)
O_HN_1_nrg = np.zeros(n_atoms, float)
NH_O_2 = np.zeros(n_atoms, int)
NH_O_2_nrg = np.zeros(n_atoms, float)
O_HN_2 = np.zeros(n_atoms, int)
O_HN_2_nrg = np.zeros(n_atoms, float)
TCO = np.zeros(n_atoms, float)
ag.setSecstrs(np.zeros(n_atoms, dtype=ATOMIC_FIELDS['secondary'].dtype))
for line in dssp:
if line.startswith(' # RESIDUE'):
break
for line in dssp:
if line[13] == '!':
continue
res = ag[(line[11], int(line[5:10]), line[10].strip())]
if res is None:
continue
indices = res.getIndices()
res.setSecstrs(line[16].strip())
NUMBER[indices] = int(line[:5])
SHEETLABEL[indices] = line[33].strip()
ACC[indices] = int(line[35:38])
KAPPA[indices] = float(line[91:97])
ALPHA[indices] = float(line[97:103])
PHI[indices] = float(line[103:109])
PSI[indices] = float(line[109:115])
if parseall:
BP1[indices] = int(line[25:29])
BP2[indices] = int(line[29:33])
NH_O_1[indices] = int(line[38:45])
NH_O_1_nrg[indices] = float(line[46:50])
O_HN_1[indices] = int(line[50:56])
O_HN_1_nrg[indices] = float(line[57:61])
NH_O_2[indices] = int(line[61:67])
NH_O_2_nrg[indices] = float(line[68:72])
O_HN_2[indices] = int(line[72:78])
O_HN_2_nrg[indices] = float(line[79:83])
TCO[indices] = float(line[85:91])
ag.setData('dssp_resnum', NUMBER)
ag.setData('dssp_sheet_label', SHEETLABEL)
ag.setData('dssp_acc', ACC)
ag.setData('dssp_kappa', KAPPA)
ag.setData('dssp_alpha', ALPHA)
ag.setData('dssp_phi', PHI)
ag.setData('dssp_psi', PSI)
if parseall:
ag.setData('dssp_bp1', BP1)
ag.setData('dssp_bp2', BP2)
ag.setData('dssp_NH_O_1_index', NH_O_1)
ag.setData('dssp_NH_O_1_energy', NH_O_1_nrg)
ag.setData('dssp_O_NH_1_index', O_HN_1)
ag.setData('dssp_O_NH_1_energy', O_HN_1_nrg)
ag.setData('dssp_NH_O_2_index', NH_O_2)
ag.setData('dssp_NH_O_2_energy', NH_O_2_nrg)
ag.setData('dssp_O_NH_2_index', O_HN_2)
ag.setData('dssp_O_NH_2_energy', O_HN_2_nrg)
ag.setData('dssp_tco', TCO)
return ag
def performDSSP(pdb, parseall=False, stderr=True):
"""Perform DSSP calculations and parse results. DSSP data is returned
in an :class:`~.AtomGroup` instance. See also :func:`execDSSP`
and :func:`parseDSSP`."""
pdb = fetchPDB(pdb, compressed=False)
return parseDSSP(execDSSP(pdb, stderr=stderr), parsePDB(pdb), parseall)
|
the-stack_106_13661
|
import re
import math
import numpy as np
from ._masker import Masker
from .._serializable import Serializer, Deserializer
from ..utils import safe_isinstance
from ..utils.transformers import parse_prefix_suffix_for_tokenizer, SENTENCEPIECE_TOKENIZERS
class Text(Masker):
""" This masks out tokens according to the given tokenizer.
The masked variables are
output_type : "string" (default) or "token_ids"
"""
def __init__(self, tokenizer=None, mask_token=None, collapse_mask_token="auto", output_type="string"):
""" Build a new Text masker given an optional passed tokenizer.
Parameters
----------
tokenizer : callable or None
The tokenizer used to break apart strings during masking. The passed tokenizer must support a minimal
subset of the HuggingFace Transformers PreTrainedTokenizerBase API. This minimal subset means the
tokenizer must return a dictionary with 'input_ids' and then either include
an 'offset_mapping' entry in the same dictionary or provide a .convert_ids_to_tokens or .decode method.
mask_token : string, int, or None
The sub-string or integer token id used to mask out portions of a string. If None it will use the
tokenizer's .mask_token attribute, if defined, or "..." if the tokenizer does not have a .mask_token
attribute.
collapse_mask_token : True, False, or "auto"
If True, when several consecutive tokens are masked only one mask token is used to replace the entire
series of original tokens.
"""
if tokenizer is None:
self.tokenizer = SimpleTokenizer()
elif callable(tokenizer):
self.tokenizer = tokenizer
else:
try:
self.tokenizer = SimpleTokenizer(tokenizer)
except:
raise Exception( # pylint: disable=raise-missing-from
"The passed tokenizer cannot be wrapped as a masker because it does not have a __call__ " + \
"method, not can it be interpreted as a splitting regexp!"
)
self.output_type = output_type
self.collapse_mask_token = collapse_mask_token
self.input_mask_token = mask_token
self.mask_token = mask_token # could be recomputed later in this function
self.mask_token_id = mask_token if isinstance(mask_token, int) else None
parsed_tokenizer_dict = parse_prefix_suffix_for_tokenizer(self.tokenizer)
self.keep_prefix = parsed_tokenizer_dict['keep_prefix']
self.keep_suffix = parsed_tokenizer_dict['keep_suffix']
# self.prefix_strlen = parsed_tokenizer_dict['prefix_strlen']
# self.suffix_strlen = parsed_tokenizer_dict['suffix_strlen']
#null_tokens = parsed_tokenizer_dict['null_tokens']
self.text_data = True
if mask_token is None:
if getattr(self.tokenizer, "mask_token", None) is not None:
self.mask_token = self.tokenizer.mask_token
self.mask_token_id = getattr(self.tokenizer, "mask_token_id", None)
if self.collapse_mask_token == "auto":
self.collapse_mask_token = False
else:
self.mask_token = "..."
else:
self.mask_token = mask_token
if self.mask_token_id is None:
self.mask_token_id = self.tokenizer(self.mask_token)["input_ids"][self.keep_prefix]
if self.collapse_mask_token == "auto":
self.collapse_mask_token = True
# assign mask token segment
# if self.keep_suffix > 0:
# self.mask_token_segment = self.token_segments(self.mask_token)[self.keep_prefix:-self.keep_suffix]
# else:
# self.mask_token_segment = self.token_segments(self.mask_token)[self.keep_prefix:]
# note if this masker can use a different background for different samples
self.fixed_background = self.mask_token_id is None
self.default_batch_size = 5
# cache variables
self._s = None
self._tokenized_s_full = None
self._tokenized_s = None
self._segments_s = None
def __call__(self, mask, s):
mask = self._standardize_mask(mask, s)
self._update_s_cache(s)
# if we have a fixed prefix or suffix then we need to grow the mask to account for that
if self.keep_prefix > 0 or self.keep_suffix > 0:
mask = mask.copy()
mask[:self.keep_prefix] = True
mask[-self.keep_suffix:] = True
if self.output_type == "string":
# if self.mask_token == "":
# out = self._segments_s[mask]
# else:
# #out = np.array([self._segments_s[i] if mask[i] else self.mask_token for i in range(len(mask))])
out_parts = []
is_previous_appended_token_mask_token = False
sep_token = getattr(self.tokenizer, "sep_token", None)
for i, v in enumerate(mask):
# mask ignores separator tokens and keeps them unmasked
if v or sep_token == self._segments_s[i]:
out_parts.append(self._segments_s[i])
is_previous_appended_token_mask_token = False
else:
if not self.collapse_mask_token or (self.collapse_mask_token and not is_previous_appended_token_mask_token):
out_parts.append(" " + self.mask_token)
is_previous_appended_token_mask_token = True
out = "".join(out_parts)
# tokenizers which treat spaces like parts of the tokens and dont replace the special token while decoding need further postprocessing
# by replacing whitespace encoded as '_' for sentencepiece tokenizer or 'Ġ' for sentencepiece like encoding (GPT2TokenizerFast)
# with ' '
if safe_isinstance(self.tokenizer, SENTENCEPIECE_TOKENIZERS):
out = out.replace('▁', ' ')
# replace sequence of spaces with a single space and strip beginning and end spaces
out = re.sub(r"[\s]+", " ", out).strip() # TODOmaybe: should do strip?? (originally because of fast vs. slow tokenizer differences)
else:
if self.mask_token_id is None:
out = self._tokenized_s[mask]
else:
out = np.array([self._tokenized_s[i] if mask[i] else self.mask_token_id for i in range(len(mask))])
# print("mask len", len(out))
# # crop the output if needed
# if self.max_length is not None and len(out) > self.max_length:
# new_out = np.zeros(self.max_length)
# new_out[:] = out[:self.max_length]
# new_out[-self.keep_suffix:] = out[-self.keep_suffix:]
# out = new_out
# for some sentences with strange configurations around the separator tokens, tokenizer encoding/decoding may contain
# extra unnecessary tokens, for example ''. you may want to strip out spaces adjacent to separator tokens. Refer to PR
# for more details.
return (np.array([out]),)
def data_transform(self, s):
""" Called by explainers to allow us to convert data to better match masking (here this means tokenizing).
"""
return (self.token_segments(s)[0],)
def token_segments(self, s):
""" Returns the substrings associated with each token in the given string.
"""
try:
token_data = self.tokenizer(s, return_offsets_mapping=True)
offsets = token_data["offset_mapping"]
offsets = [(0, 0) if o is None else o for o in offsets]
parts = [s[offsets[i][0]:max(offsets[i][1], offsets[i+1][0])] for i in range(len(offsets)-1)]
parts.append(s[offsets[len(offsets)-1][0]:offsets[len(offsets)-1][1]])
return parts, token_data["input_ids"]
except (NotImplementedError, TypeError): # catch lock of support for return_offsets_mapping
token_ids = self.tokenizer(s)['input_ids']
if hasattr(self.tokenizer, "convert_ids_to_tokens"):
tokens = self.tokenizer.convert_ids_to_tokens(token_ids)
else:
tokens = [self.tokenizer.decode([id]) for id in token_ids]
if hasattr(self.tokenizer, "get_special_tokens_mask"):
special_tokens_mask = self.tokenizer.get_special_tokens_mask(token_ids, already_has_special_tokens=True)
# avoid masking separator tokens, but still mask beginning of sentence and end of sentence tokens
special_keep = [getattr(self.tokenizer, 'sep_token', None), getattr(self.tokenizer, 'mask_token', None)]
for i, v in enumerate(special_tokens_mask):
if v == 1 and (tokens[i] not in special_keep or i + 1 == len(special_tokens_mask)):
tokens[i] = ""
# add spaces to separate the tokens (since we want segments not tokens)
if safe_isinstance(self.tokenizer, SENTENCEPIECE_TOKENIZERS):
for i, v in enumerate(tokens):
if v.startswith("_"):
tokens[i] = " " + tokens[i][1:]
else:
for i, v in enumerate(tokens):
if v.startswith("##"):
tokens[i] = tokens[i][2:]
elif v != "" and i != 0:
tokens[i] = " " + tokens[i]
return tokens, token_ids
def clustering(self, s):
""" Compute the clustering of tokens for the given string.
"""
self._update_s_cache(s)
special_tokens = []
sep_token = getattr(self.tokenizer, "sep_token", None)
if sep_token is None:
special_tokens = []
else:
special_tokens = [sep_token]
# convert the text segments to tokens that the partition tree function expects
tokens = []
space_end = re.compile(r"^.*\W$")
letter_start = re.compile(r"^[A-z]")
for i, v in enumerate(self._segments_s):
if i > 0 and space_end.match(self._segments_s[i-1]) is None and letter_start.match(v) is not None and tokens[i-1] != "":
tokens.append("##" + v.strip())
else:
tokens.append(v.strip())
pt = partition_tree(tokens, special_tokens)
# use the rescaled size of the clusters as their height since the merge scores are just a
# heuristic and not scaled well
pt[:, 2] = pt[:, 3]
pt[:, 2] /= pt[:, 2].max()
return pt
# unused because restricts meaningful perturbations
# def _mark_uninvertable(self, clustering):
# """ This marks which clusters have non-invertable mappings through the tokenizer when masked.
# It seems like a bug that you can decode and then encode a set of token ids and not get what
# you started with...but this is possible with word endings in the transformers implementation
# of BERT for example. So here we mark such uninvertable clusters with negative values.
# """
# M = len(self._tokenized_s)
# assert len(clustering)+1 == M
# def recursive_mark(ind):
# if ind < M:
# return list(self._tokenized_s[ind:ind+1])
# lind = int(clustering[ind-M, 0])
# rind = int(clustering[ind-M, 1])
# ltokens = recursive_mark(lind)
# rtokens = recursive_mark(rind)
# tmp = ltokens + [self.mask_token_id]
# s2 = self.tokenizer.decode(tmp)
# e2 = self.tokenizer.encode(s2)
# if not np.all(e2[1:-1] == tmp):
# clustering[ind-M, 2] = -1 # set the distance of this cluster negative so it can't be split
# tmp = [self.mask_token_id] + rtokens
# s2 = self.tokenizer.decode(tmp)
# e2 = self.tokenizer.encode(s2)
# if not np.all(e2[1:-1] == tmp):
# clustering[ind-M, 2] = -1 # set the distance of this cluster negative so it can't be split
# return ltokens + rtokens
# recursive_mark(M+len(clustering)-1)
def _update_s_cache(self, s):
if self._s != s:
self._s = s
tokens, token_ids = self.token_segments(s)
self._tokenized_s = np.array(token_ids)
self._segments_s = np.array(tokens)
def shape(self, s):
""" The shape of what we return as a masker.
Note we only return a single sample, so there is no expectation averaging.
"""
self._update_s_cache(s)
return (1, len(self._tokenized_s))
def mask_shapes(self, s):
""" The shape of the masks we expect.
"""
self._update_s_cache(s)
return [(len(self._tokenized_s),)]
def invariants(self, s):
""" The names of the features for each mask position for the given input string.
"""
self._update_s_cache(s)
invariants = np.zeros(len(self._tokenized_s), dtype=np.bool)
if self.keep_prefix > 0:
invariants[:self.keep_prefix] = True
if self.keep_suffix > 0:
invariants[-self.keep_suffix:] = True
# mark separator tokens as invariant
for i, v in enumerate(self._tokenized_s):
if v == getattr(self.tokenizer, "sep_token_id", None):
invariants[i] = True
return invariants.reshape(1, -1)
def feature_names(self, s):
""" The names of the features for each mask position for the given input string.
"""
self._update_s_cache(s)
return [[v.strip() for v in self._segments_s]]
def save(self, out_file):
""" Save a Text masker to a file stream.
"""
super().save(out_file)
with Serializer(out_file, "shap.maskers.Text", version=0) as s:
s.save("tokenizer", self.tokenizer)
s.save("mask_token", self.input_mask_token)
s.save("collapse_mask_token", self.collapse_mask_token)
s.save("output_type", self.output_type)
@classmethod
def load(cls, in_file, instantiate=True):
""" Load a Text masker from a file stream.
"""
if instantiate:
return cls._instantiated_load(in_file)
kwargs = super().load(in_file, instantiate=False)
with Deserializer(in_file, "shap.maskers.Text", min_version=0, max_version=0) as s:
kwargs["tokenizer"] = s.load("tokenizer")
kwargs["mask_token"] = s.load("mask_token")
kwargs["collapse_mask_token"] = s.load("collapse_mask_token")
kwargs["output_type"] = s.load("output_type")
return kwargs
class SimpleTokenizer(): # pylint: disable=too-few-public-methods
""" A basic model agnostic tokenizer.
"""
def __init__(self, split_pattern=r"\W+"):
""" Create a tokenizer based on a simple splitting pattern.
"""
self.split_pattern = re.compile(split_pattern)
def __call__(self, s, return_offsets_mapping=True):
""" Tokenize the passed string, optionally returning the offsets of each token in the original string.
"""
pos = 0
offset_ranges = []
input_ids = []
for m in re.finditer(self.split_pattern, s):
start, end = m.span(0)
offset_ranges.append((pos, start))
input_ids.append(s[pos:start])
pos = end
if pos != len(s):
offset_ranges.append((pos, len(s)))
input_ids.append(s[pos:])
out = {}
out["input_ids"] = input_ids
if return_offsets_mapping:
out["offset_mapping"] = offset_ranges
return out
def post_process_sentencepiece_tokenizer_output(s):
""" replaces whitespace encoded as '_' with ' ' for sentencepiece tokenizers.
"""
s = s.replace('▁', ' ')
return s
openers = {
"(": ")"
}
closers = {
")": "("
}
enders = [".", ","]
connectors = ["but", "and", "or"]
class Token():
""" A token representation used for token clustering.
"""
def __init__(self, value):
self.s = value
if value in openers or value in closers:
self.balanced = False
else:
self.balanced = True
def __str__(self):
return self.s
def __repr__(self):
if not self.balanced:
return self.s + "!"
return self.s
class TokenGroup():
""" A token group (substring) representation used for token clustering.
"""
def __init__(self, group, index=None):
self.g = group
self.index = index
def __repr__(self):
return self.g.__repr__()
def __getitem__(self, index):
return self.g[index]
def __add__(self, o):
return TokenGroup(self.g + o.g)
def __len__(self):
return len(self.g)
def merge_score(group1, group2, special_tokens):
""" Compute the score of merging two token groups.
special_tokens: tokens (such as separator tokens) that should be grouped last
"""
score = 0
# ensures special tokens are combined last, so 1st subtree is 1st sentence and 2nd subtree is 2nd sentence
if len(special_tokens) > 0:
if group1[-1].s in special_tokens and group2[0].s in special_tokens:
score -= math.inf # subtracting infinity to create lowest score and ensure combining these groups last
# merge broken-up parts of words first
if group2[0].s.startswith("##"):
score += 20
# merge apostrophe endings next
if group2[0].s == "'" and (len(group2) == 1 or (len(group2) == 2 and group2[1].s in ["t", "s"])):
score += 15
if group1[-1].s == "'" and group2[0].s in ["t", "s"]:
score += 15
start_ctrl = group1[0].s.startswith("[") and group1[0].s.endswith("]")
end_ctrl = group2[-1].s.startswith("[") and group2[-1].s.endswith("]")
if (start_ctrl and not end_ctrl) or (end_ctrl and not start_ctrl):
score -= 1000
if group2[0].s in openers and not group2[0].balanced:
score -= 100
if group1[-1].s in closers and not group1[-1].balanced:
score -= 100
# attach surrounding an openers and closers a bit later
if group1[0].s in openers and not group2[-1] in closers:
score -= 2
# reach across connectors later
if group1[-1].s in connectors or group2[0].s in connectors:
score -= 2
# reach across commas later
if group1[-1].s == ",":
score -= 10
if group2[0].s == ",":
if len(group2) > 1: # reach across
score -= 10
else:
score -= 1
# reach across sentence endings later
if group1[-1].s in [".", "?", "!"]:
score -= 20
if group2[0].s in [".", "?", "!"]:
if len(group2) > 1: # reach across
score -= 20
else:
score -= 1
score -= len(group1) + len(group2)
#print(group1, group2, score)
return score
def merge_closest_groups(groups, special_tokens):
""" Finds the two token groups with the best merge score and merges them.
"""
scores = [merge_score(groups[i], groups[i+1], special_tokens) for i in range(len(groups)-1)]
#print(scores)
ind = np.argmax(scores)
groups[ind] = groups[ind] + groups[ind+1]
#print(groups[ind][0].s in openers, groups[ind][0])
if groups[ind][0].s in openers and groups[ind+1][-1].s == openers[groups[ind][0].s]:
groups[ind][0].balanced = True
groups[ind+1][-1].balanced = True
groups.pop(ind+1)
def partition_tree(decoded_tokens, special_tokens):
""" Build a heriarchial clustering of tokens that align with sentence structure.
Note that this is fast and heuristic right now.
TODO: Build this using a real constituency parser.
"""
token_groups = [TokenGroup([Token(t)], i) for i, t in enumerate(decoded_tokens)]
# print(token_groups)
M = len(decoded_tokens)
new_index = M
clustm = np.zeros((M-1, 4))
for i in range(len(token_groups)-1):
scores = [merge_score(token_groups[i], token_groups[i+1], special_tokens) for i in range(len(token_groups)-1)]
# print(scores)
ind = np.argmax(scores)
lind = token_groups[ind].index
rind = token_groups[ind+1].index
clustm[new_index-M, 0] = token_groups[ind].index
clustm[new_index-M, 1] = token_groups[ind+1].index
clustm[new_index-M, 2] = -scores[ind]
clustm[new_index-M, 3] = (clustm[lind-M, 3] if lind >= M else 1) + (clustm[rind-M, 3] if rind >= M else 1)
token_groups[ind] = token_groups[ind] + token_groups[ind+1]
token_groups[ind].index = new_index
# track balancing of openers/closers
if token_groups[ind][0].s in openers and token_groups[ind+1][-1].s == openers[token_groups[ind][0].s]:
token_groups[ind][0].balanced = True
token_groups[ind+1][-1].balanced = True
token_groups.pop(ind+1)
new_index += 1
# negative means we should never split a group, so we add 10 to ensure these are very tight groups
# (such as parts of the same word)
clustm[:, 2] = clustm[:, 2] + 10
return clustm
|
the-stack_106_13664
|
import pygame
import pygame.font
import math
from ball import *
from block import *
from cannon import *
from allenermy import *
from other import *
class Level4:
pygame.init()
bg = pygame.image.load('LightningBg0.png')
hole = pygame.image.load('GateHole.png')
CannonMoveSound = pygame.mixer.Sound('CannonMove.ogg')
font = pygame.font.SysFont('algerian', 45)
clock = pygame.time.Clock()
def __init__(self, win, love, score, mute):
self.win = win
self.shoot = False
self.kill = False
self.walk = True
self.space = False
self.hold = False
self.backhold = False
self.activate = False
self.run = True
self.WAVE = False
self.right = [False, False, False, False, False, False]
self.mute = mute
self.walkCount = 0
self.angle = 0
self.time = 0
self.power = 0
self.die = 0
self.hitrange = 50
self.wave = 0
self.love = love
self.distance = 80
self.secondtime = 0
self.clocktime = 0
self.level = 4
self.timeleft = 0
self.score = score
self.bomber = ball(173, 335, 5, (255, 255, 255))
self.forcebar = rectangle(560, 720, 10, 20)
self.needle = needle(80, 700, 160, 700)
self.cannon = cannon(126, 233, 170, 200, 1)
self.enermy1 = enermy01(1400, 450, 60, 110, 1)
self.enermy2 = enermy02(1470, 430, 115, 90, 2)
self.enermy3 = enermy01(1540, 450, 60, 110, 1)
self.enermy4 = enermy02(1610, 430, 115, 90, 2)
self.enermy5 = enermy01(1680, 450, 60, 110, 1)
self.enermy6 = enermy02(1750, 430, 115, 90, 2)
self.block1 = block(975, 452, 57, 106, 1)
self.block2 = block(830, 452, 57, 106, 1)
self.block3 = block(655, 452, 57, 106, 1)
self.block4 = block(450, 452, 57, 106, 1)
self.block5 = gate(250, 432, 80, 147, 1)
self.base1 = base1(-9, 81, 146, 320, 1)
self.base2 = base2(-9, 81, 146, 320, 1)
self.base3 = base3(-9, 81, 146, 320, 1)
self.base4 = base4(-9, 81, 146, 320, 1)
self.base5 = base5(-9, 81, 146, 320, 1)
self.sword = swordbar(25, 655, 1155, 154)
self.bomb1 = bombCount(340, 688, 75, 89, 1)
self.bomb2 = bombCount(278, 688, 75, 89, 1)
self.bomb3 = bombCount(216, 688, 75, 89, 1)
self.bomb4 = bombCount(153, 688, 75, 89, 1)
self.bomb5 = bombCount(91, 688, 75, 89, 1)
self.warn1 = level1warn(0, 0, 1400, 800, 0)
self.warn2 = level3warn(0, 0, 1400, 800, 0)
self.warn3 = level5warn(0, 0, 1400, 800, 0)
self.pauseBtn = pauseBtn(1357, 14, 38, 39)
self.bombs = [self.bomb1, self.bomb2, self.bomb3, self.bomb4, self.bomb5]
self.bases = [self.base5, self.base4, self.base3, self.base2, self.base1]
self.enermys = [self.block1, self.enermy1, self.enermy2, self.enermy3, self.enermy4, self.enermy5, self.enermy6]
self.blocks = [self.block1, self.block2, self.block3, self.block4]
self.next = 0
if self.mute:
self.bomber.Mute()
self.cannon.Mute()
self.enermy1.Mute()
self.enermy2.Mute()
self.enermy3.Mute()
self.enermy4.Mute()
self.enermy5.Mute()
self.enermy6.Mute()
def recharge(self):
self.needle.x2 = 160
self.needle.y2 = 700
self.forcebar.width = 10
self.forcebar.height = 20
self.time = 0
self.secondtime = 0
self.angle = 0
self.kill = True
self.shoot = False
if not self.WAVE:
self.wave += 1
self.WAVE = True
for i in range(1, len(self.enermys)):
if self.enermys[i].hitbox[0] + self.enermys[i].width < self.bomber.hitbox[0] \
and self.enermys[i].hitbox[0] + self.enermys[i].width > self.bomber.hitbox[0] - self.hitrange \
or self.enermys[i].hitbox[0] > self.bomber.hitbox[0] \
and self.enermys[i].hitbox[0] < self.bomber.hitbox[0] + self.hitrange + self.bomber.radius:
self.enermys[i].Health()
self.score += 100
def redrawWindow(self):
self.win.blit(Level4.bg, (0, 0))
self.forcebar.draw(self.win)
self.bomber.draw(self.win)
for i in range(len(self.bases)):
self.bases[i].draw(self.win)
self.cannon.draw(self.win)
self.block5.draw(self.win)
for i in range(len(self.enermys)):
self.enermys[i].draw(self.win)
self.sword.draw(self.win)
for i in range(len(self.bombs)):
self.bombs[i].draw(self.win)
text = Level4.font.render('LEVEL ' + str(self.level) + ' / ' + 'WAVE ' + str(self.wave + 1), 1, (255, 255, 255))
self.win.blit(text, (500, 10))
text = Level4.font.render(str(self.score), 1, (255, 255, 255))
self.win.blit(text, (1230, 710))
text = Level4.font.render(str(int(self.timeleft)), 1, (255, 255, 255))
self.win.blit(text, (50, 15))
self.block1.draw(self.win)
self.block2.draw(self.win)
self.block3.draw(self.win)
self.block4.draw(self.win)
self.win.blit(Level4.hole, (0, 442))
self.pauseBtn.draw(self.win)
self.warn1.draw(self.win)
self.warn2.draw(self.win)
self.warn3.draw(self.win)
pygame.display.update()
def findAngle(self, pos):
sX = self.needle.x1
sY = self.needle.y1
try:
self.angle = math.atan((sY - pos[1]) / (sX - pos[0]))
except:
self.angle = math.pi / 2
if pos[1] < sY and pos[0] > sX:
self.angle = abs(self.angle)
elif pos[1] < sY and pos[0] < sX:
self.angle = math.pi - self.angle
elif pos[1] > sY and pos[0] < sX:
self.angle = math.pi + abs(self.angle)
elif pos[1] > sY and pos[0] > sX:
self.angle = (math.pi * 2) - self.angle
return self.angle
def pause2(self):
pause = pygame.image.load('Pause.png')
resumeOn = pygame.image.load('ResumeOn.png')
resumeOff = pygame.image.load('ResumeOff.png')
menuOn = pygame.image.load('MenuOn.png')
menuOff = pygame.image.load('MenuOff.png')
muteOn = pygame.image.load('MuteOn.png')
muteOff = pygame.image.load('MuteOff.png')
unmuteOn = pygame.image.load('UnmuteOn.png')
unmuteOff = pygame.image.load('UnmuteOff.png')
paused = True
while paused:
mouse = pygame.mouse.get_pos()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
paused = False
if event.type == pygame.MOUSEBUTTONDOWN:
if 561 + 259 > mouse[0] > 561 and 424 + 74 > mouse[1] > 424:
if not self.mute:
self.bomber.Mute()
self.cannon.Mute()
self.enermy1.Mute()
self.enermy2.Mute()
self.enermy3.Mute()
self.enermy4.Mute()
self.enermy5.Mute()
self.enermy6.Mute()
pygame.mixer.music.pause()
self.mute = True
else:
self.bomber.Mute()
self.cannon.Mute()
self.enermy1.Mute()
self.enermy2.Mute()
self.enermy3.Mute()
self.enermy4.Mute()
self.enermy5.Mute()
self.enermy6.Mute()
pygame.mixer.music.unpause()
self.mute = False
if 561 + 259 > mouse[0] > 561 and 290 + 74 > mouse[1] > 290:
paused = False
self.win.blit(pause, (0, 0))
click = pygame.mouse.get_pressed()
if 561 + 259 > mouse[0] > 561 and 290 + 74 > mouse[1] > 290:
self.win.blit(resumeOn, (561, 290))
else:
self.win.blit(resumeOff, (561, 290))
if 561 + 259 > mouse[0] > 561 and 424 + 74 > mouse[1] > 424:
if not self.mute:
self.win.blit(muteOn, (561, 424))
else:
self.win.blit(unmuteOn, (561, 424))
else:
if not self.mute:
self.win.blit(muteOff, (561, 424))
else:
self.win.blit(unmuteOff, (561, 424))
if 561 + 259 > mouse[0] > 561 and 556 + 74 > mouse[1] > 556:
self.win.blit(menuOn, (561, 556))
if click[0] == 1:
paused = False
self.next = 0
self.run = False
else:
self.win.blit(menuOff, (561, 556))
pygame.display.update()
def pausewin(self):
congrats = pygame.image.load('Congrats.png')
quitOff = pygame.image.load('QuitOff.png')
quitOn = pygame.image.load('QuitOn.png')
continueOff = pygame.image.load('ContinueOff.png')
continueOn = pygame.image.load('ContinueOn.png')
live = pygame.image.load('Life.png')
font = pygame.font.SysFont('algerian', 50)
paused = True
while paused:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
paused = False
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
self.win.blit(congrats, (0, 0))
text = font.render(str(self.score), 1, (219, 149, 248))
self.win.blit(text, (764, 368))
if self.love == 5:
self.win.blit(live, (405, 434))
self.win.blit(live, (528, 434))
self.win.blit(live, (651, 434))
self.win.blit(live, (774, 434))
self.win.blit(live, (897, 434))
if self.love == 4:
self.win.blit(live, (405, 434))
self.win.blit(live, (528, 434))
self.win.blit(live, (651, 434))
self.win.blit(live, (774, 434))
if self.love == 3:
self.win.blit(live, (405, 434))
self.win.blit(live, (528, 434))
self.win.blit(live, (651, 434))
if self.love == 2:
self.win.blit(live, (405, 434))
self.win.blit(live, (528, 434))
if self.love == 1:
self.win.blit(live, (405, 434))
if 377+259 > mouse[0] > 377 and 558+74 > mouse[1] > 558:
self.win.blit(quitOn, (377, 558))
if click[0] == 1:
paused = False
self.next = 0
self.run = False
else:
self.win.blit(quitOff, (377, 558))
if 755+259 > mouse[0] > 755 and 558+74 > mouse[1] > 558:
self.win.blit(continueOn, (755, 558))
self.next = 1
self.run = False
if click[0] == 1:
paused = False
self.next = 1
self.run = False
else:
self.win.blit(continueOff, (755, 558))
pygame.display.update()
def pausedefeat(self):
defeat = pygame.image.load('Defeat.png')
quitOff = pygame.image.load('QuitOff.png')
quitOn = pygame.image.load('QuitOn.png')
TryAgainOff = pygame.image.load('TryAgainOff.png')
TryAgainOn = pygame.image.load('TryAgainOn.png')
font = pygame.font.SysFont('algerian', 50)
paused = True
while paused:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
paused = False
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
self.win.blit(defeat, (0, 0))
text = font.render(str(self.score), 1, (219, 149, 248))
self.win.blit(text, (764, 368))
if 377+259 > mouse[0] > 377 and 558+74 > mouse[1] > 558:
self.win.blit(quitOn, (377, 558))
if click[0] == 1:
paused = False
self.next = 0
self.run = False
else:
self.win.blit(quitOff, (377, 558))
if 755+259 > mouse[0] > 755 and 558+74 > mouse[1] > 558:
self.win.blit(TryAgainOn, (755, 558))
if click[0] == 1:
paused = False
self.next = -1
self.run = False
else:
self.win.blit(TryAgainOff, (755, 558))
pygame.display.update()
def Mute(self):
if not self.mute:
self.mute = True
else:
self.mute = False
def WaveProcess(self):
if self.bomber.x == 173 and self.bomber.y == 335:
self.shoot = False
self.kill = False
self.WAVE = False
if self.secondtime == 10:
self.activate = True
self.secondtime = 0
def RUN(self):
while self.run:
self.clocktime += Level4.clock.tick_busy_loop(45)
if self.clocktime >= 1000:
self.secondtime += 1
self.clocktime = 0
if self.secondtime == 1:
self.timeleft = 10
if self.secondtime == 2:
self.timeleft = 9
if self.secondtime == 3:
self.timeleft = 8
if self.secondtime == 4:
self.timeleft = 7
if self.secondtime == 5:
self.timeleft = 6
if self.secondtime == 6:
self.timeleft = 5
if self.secondtime == 7:
self.timeleft = 4
if self.secondtime == 8:
self.timeleft = 3
if self.secondtime == 9:
self.timeleft = 2
if self.secondtime == 10:
self.timeleft = 1
if self.activate == True:
if not self.space:
self.power = 51.67
self.hold = False
self.backhold = False
self.cannon.move()
self.cannon.Angle(int(self.angle))
if not self.shoot:
pos = (self.needle.x2, self.needle.y2)
self.shoot = True
self.angle = self.findAngle(pos)
if self.backhold:
self.power -= 0.13
self.forcebar.width -= 3.5
if self.forcebar.width <= 10:
self.hold = True
self.backhold = False
if self.hold:
self.forcebar.width += 3.5
self.power += 0.13
if self.forcebar.width >= 500:
self.hold = False
self.backhold = True
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
if 1357 + 38 > mouse[0] > 1357 and 14 + 39 > mouse[1] > 14:
if click[0] == 1:
self.pause2()
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.run = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.pause2()
if event.key == pygame.K_UP and self.angle > -80 and self.bomber.x == 173\
and self.bomber.y == 335:
if not self.mute:
self.CannonMoveSound.play()
self.angle -= 10
self.cannon.notmove()
self.cannon.Angle(int(self.angle))
self.needle.x2 = self.needle.x1 + math.cos(math.radians(self.angle)) * 80
self.needle.y2 = self.needle.y1 + math.sin(math.radians(self.angle)) * 80
if event.key == pygame.K_DOWN and self.angle < 0 and self.bomber.x == 173\
and self.bomber.y == 335:
if not self.mute:
self.CannonMoveSound.play()
self.angle += 10
self.cannon.notmove()
self.cannon.Angle(int(self.angle))
self.needle.x2 = self.needle.x1 + math.cos(math.radians(self.angle)) * 80
self.needle.y2 = self.needle.y1 + math.sin(math.radians(self.angle)) * 80
if event.key == pygame.K_SPACE and self.bomber.x == 173 and self.bomber.y == 335:
self.power = 51.67
self.hold = True
self.space = True
if event.type == pygame.KEYUP:
if event.key == pygame.K_SPACE and self.bomber.x == 173 and self.bomber.y == 335\
or self.activate == True:
self.hold = False
self.backhold = False
self.cannon.move()
self.cannon.Angle(int(self.angle))
if not self.shoot:
pos = (self.needle.x2, self.needle.y2)
self.shoot = True
self.angle = self.findAngle(pos)
if event.type == pygame.QUIT:
self.run = False
if not self.kill:
for i in range(0, len(self.blocks)):
if self.bomber.hitbox[0] >= self.blocks[i].hitbox[0] \
and self.bomber.hitbox[0] <= self.blocks[i].hitbox[0] + self.blocks[i].width \
and self.bomber.hitbox[1] >= self.blocks[i].hitbox[1] \
and self.bomber.hitbox[1] <= self.blocks[i].hitbox[1] + self.blocks[i].height:
self.bomber.explo()
self.recharge()
for i in range(1, len(self.enermys)):
if self.bomber.hitbox[0] >= self.enermys[i].hitbox[0]\
and self.bomber.hitbox[0] <= self.enermys[i].hitbox[0]+self.enermys[i].width\
and self.bomber.hitbox[1] >= self.enermys[i].hitbox[1]\
and self.bomber.hitbox[1] <= self.enermys[i].hitbox[1]+self.enermys[i].height:
self.bomber.explo()
self.enermys[i].Health()
self.score += 100
self.recharge()
if self.wave < 5:
self.activate = False
for i in range(1, len(self.enermys)):
if self.enermys[i].health <= 0 and not self.enermys[i].spirit():
self.enermys[i].x = self.enermys[i-1].x
if self.wave == 0:
for i in range(1, 4):
if i == 2:
width = 240
else:
width = self.enermys[i].width
if self.enermys[i].x <= self.block1.x + self.distance:
self.right[i - 1] = True
elif self.enermys[i].x >= 1400 - width:
self.right[i - 1] = False
if self.enermys[i].x > self.block1.x + self.distance and not self.right[i - 1]:
self.enermys[i].left()
self.enermys[i].walk(self.win)
elif self.right[i - 1]:
self.enermys[i].right()
self.enermys[i].uturn(self.win)
else:
for i in range(1, len(self.enermys)):
if i == 2 or i == 4 or i == 6:
width = 240
else:
width = self.enermys[i].width
if self.enermys[i].x <= self.block1.x + self.distance:
self.right[i - 1] = True
elif self.enermys[i].x >= 1400 - width:
self.right[i - 1] = False
if self.enermys[i].x > self.block1.x + self.distance and not self.right[i - 1]:
self.enermys[i].left()
self.enermys[i].walk(self.win)
elif self.right[i - 1]:
self.enermys[i].right()
self.enermys[i].uturn(self.win)
if self.shoot:
if self.bomber.y < 540 - self.bomber.radius:
self.time += 0.15
po = self.bomber.ballPath(173, 335, self.power, self.angle, self.time)
self.bomber.x = po[0]
self.bomber.y = po[1]
if self.bomber.y > 530 - self.bomber.radius \
and self.bomber.y < 540 - self.bomber.radius:
self.bomber.land()
self.bomber.explo()
else:
self.recharge()
if self.love == 4:
self.base1.health = 0
if self.love == 3:
self.base1.health = 0
self.base2.health = 0
if self.love == 2:
self.base1.health = 0
self.base2.health = 0
self.base3.health = 0
if self.love == 1:
self.base1.health = 0
self.base2.health = 0
self.base3.health = 0
self.base4.health = 0
if self.love == 0:
self.base1.health = 0
self.base2.health = 0
self.base3.health = 0
self.base4.health = 0
self.base5.health = 0
if self.wave == 0:
self.WaveProcess()
if self.wave == 1:
self.WaveProcess()
self.block1.health = 0
self.bomb1.health = 0
if self.block1.health <= 0 and not self.block1.spirit():
self.block1.x = self.block2.x
if self.wave == 2:
self.WaveProcess()
self.block2.health = 0
self.bomb2.health = 0
if self.block2.health <= 0 and not self.block2.spirit():
self.block1.x = self.block3.x
self.block2.x = self.block3.x
if self.wave == 3:
self.WaveProcess()
self.block3.health = 0
self.bomb3.health = 0
if self.block3.health <= 0 and not self.block3.spirit():
self.block1.x = self.block4.x
self.block2.x = self.block4.x
self.block3.x = self.block4.x
if self.wave == 4:
self.WaveProcess()
self.block4.health = 0
self.bomb4.health = 0
if self.block4.health <= 0 and not self.block4.spirit():
self.block1.x = self.block5.x
self.block2.x = self.block5.x
self.block3.x = self.block5.x
self.block4.x = self.block5.x
if self.wave == 5:
self.activate = False
if self.block4.health <= 0 and not self.block4.spirit():
self.block1.x = self.block5.x - 100
self.block2.x = self.block5.x - 100
self.block3.x = self.block5.x - 100
self.block4.x = self.block5.x - 100
self.block5.health = 0
self.bomb5.health = 0
for i in range(1, len(self.enermys)):
if self.enermys[i].x >= self.block1.x:
self.enermys[i].left()
self.enermys[i].walk(self.win)
if self.enermys[i].health > 0 and self.enermys[i].x <= self.block1.x:
self.enermys[i].health = -1
self.love -= 1
if self.enermy1.health <= 0 and self.enermy2.health <= 0 and self.enermy3.health <= 0 \
and self.enermy4.health <= 0 and self.enermy5.health <= 0 and self.enermy6.health <= 0\
and self.bomber.x == 173 and self.bomber.y == 335:
if self.love > 0:
self.pausewin()
if self.love <= 0:
self.pausedefeat()
self.redrawWindow()
return self.love, self.score, self.mute, self.next
|
the-stack_106_13666
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import types
import csv
import random
import numpy as np
from . import tokenization
from .batching import prepare_batch_data
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def __init__(self,
data_dir,
vocab_path,
max_seq_len,
do_lower_case,
in_tokens,
random_seed=None):
self.data_dir = data_dir
self.max_seq_len = max_seq_len
self.tokenizer = tokenization.FullTokenizer(
vocab_file=vocab_path, do_lower_case=do_lower_case)
self.vocab = self.tokenizer.vocab
self.in_tokens = in_tokens
np.random.seed(random_seed)
self.current_train_example = -1
self.num_examples = {'train': -1, 'dev': -1, 'test': -1}
self.current_train_epoch = -1
def get_train_aug_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
def convert_example(self, index, example, labels, max_seq_len, tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
feature = convert_single_example(index, example, labels, max_seq_len,
tokenizer)
return feature
def generate_instance(self, feature):
"""
generate instance with given feature
Args:
feature: InputFeatures(object). A single set of features of data.
"""
input_pos = list(range(len(feature.input_ids)))
return [
feature.input_ids, feature.segment_ids, input_pos, feature.label_id
]
def generate_batch_data(self,
batch_data,
total_token_num,
voc_size=-1,
mask_id=-1,
return_input_mask=True,
return_max_len=False,
return_num_token=False):
return prepare_batch_data(
batch_data,
total_token_num,
voc_size=-1,
pad_id=self.vocab["[PAD]"],
cls_id=self.vocab["[CLS]"],
sep_id=self.vocab["[SEP]"],
mask_id=-1,
return_input_mask=True,
return_max_len=False,
return_num_token=False)
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with io.open(input_file, "r", encoding="utf8") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
def get_num_examples(self, phase):
"""Get number of examples for train, dev or test."""
if phase not in ['train', 'dev', 'test', 'train_aug']:
raise ValueError(
"Unknown phase, which should be in ['train', 'dev', 'test'].")
return self.num_examples[phase]
def get_train_progress(self):
"""Gets progress for training phase."""
return self.current_train_example, self.current_train_epoch
def data_generator(self,
batch_size,
phase='train',
epoch=1,
dev_count=1,
shuffle=True,
shuffle_seed=None):
"""
Generate data for train, dev or test.
Args:
batch_size: int. The batch size of generated data.
phase: string. The phase for which to generate data.
epoch: int. Total epoches to generate data.
shuffle: bool. Whether to shuffle examples.
"""
search_examples = self.get_train_examples(self.data_dir)
random.shuffle(search_examples)
if phase == 'train':
examples = self.get_train_examples(self.data_dir)
self.num_examples['train'] = len(examples)
elif phase == 'train_aug':
examples = self.get_train_aug_examples(self.data_dir)
self.num_examples['train'] = len(examples)
elif phase == 'dev':
examples = self.get_dev_examples(self.data_dir)
self.num_examples['dev'] = len(examples)
elif phase == 'test':
examples = self.get_test_examples(self.data_dir)
self.num_examples['test'] = len(examples)
elif phase == 'search_train':
#examples = self.get_train_examples(self.data_dir)
self.num_examples['search_train'] = len(search_examples) / 2
examples = search_examples[:self.num_examples['search_train']]
elif phase == 'search_valid':
#examples = self.get_train_examples(self.data_dir)
self.num_examples['search_valid'] = len(search_examples) / 2
examples = search_examples[self.num_examples['search_valid']:]
else:
raise ValueError(
"Unknown phase, which should be in ['train', 'dev', 'test'].")
def instance_reader():
for epoch_index in range(epoch):
if shuffle:
if shuffle_seed is not None:
np.random.seed(shuffle_seed)
np.random.shuffle(examples)
if phase == 'train' or phase == 'search_train':
self.current_train_epoch = epoch_index
for (index, example) in enumerate(examples):
if phase == 'train' or phase == "search_train":
self.current_train_example = index + 1
feature = self.convert_example(
index, example,
self.get_labels(), self.max_seq_len, self.tokenizer)
instance = self.generate_instance(feature)
yield instance
def batch_reader(reader, batch_size, in_tokens):
batch, total_token_num, max_len = [], 0, 0
for instance in reader():
token_ids, sent_ids, pos_ids, label = instance[:4]
max_len = max(max_len, len(token_ids))
if in_tokens:
to_append = (len(batch) + 1) * max_len <= batch_size
else:
to_append = len(batch) < batch_size
if to_append:
batch.append(instance)
total_token_num += len(token_ids)
else:
yield batch, total_token_num
batch, total_token_num, max_len = [instance], len(
token_ids), len(token_ids)
if len(batch) > 0:
yield batch, total_token_num
def wrapper():
all_dev_batches = []
for batch_data, total_token_num in batch_reader(
instance_reader, batch_size, self.in_tokens):
batch_data = self.generate_batch_data(
batch_data,
total_token_num,
voc_size=-1,
mask_id=-1,
return_input_mask=True,
return_max_len=False,
return_num_token=False)
if len(all_dev_batches) < dev_count:
all_dev_batches.append(batch_data)
if len(all_dev_batches) == dev_count:
for batch in all_dev_batches:
batch = self.split_seq_pair(batch)
yield batch
all_dev_batches = []
return wrapper
def split_seq_pair(self, data_ids):
src_ids = data_ids[0]
sentence_ids = data_ids[2]
ids = np.squeeze(src_ids)
sids = np.squeeze(sentence_ids)
batchsize = ids.shape[0]
ids_0 = ids[((sids == 0) & (ids != 0))]
seqlen_0 = ((sids == 0) & (ids != 0)).astype(np.int64).sum(1)
y_0 = np.concatenate([np.arange(s) for s in seqlen_0])
x_0 = np.concatenate(
[np.ones(
[s], dtype=np.int64) * i for i, s in enumerate(seqlen_0)])
ids0 = np.zeros([batchsize, seqlen_0.max()], dtype=np.int64)
ids0[(x_0, y_0)] = ids_0
ids_1 = ids[(sids == 1) & (ids != 0)]
seqlen_1 = ((sids == 1) & (ids != 0)).astype(np.int64).sum(1)
y_1 = np.concatenate([np.arange(s) for s in seqlen_1])
x_1 = np.concatenate(
[np.ones(
[s], dtype=np.int64) * i for i, s in enumerate(seqlen_1)])
ids1 = np.zeros([batchsize, seqlen_1.max()], dtype=np.int64)
ids1[(x_1, y_1)] = ids_1
msl = max(seqlen_0.max(), seqlen_1.max())
ids0 = np.pad(ids0, [[0, 0], [0, msl - seqlen_0.max()]],
mode='constant')
ids1 = np.pad(ids1, [[0, 0], [0, msl - seqlen_1.max()]],
mode='constant')
return data_ids + [ids0, ids1]
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class XnliProcessor(DataProcessor):
"""Processor for the XNLI data set."""
def get_train_examples(self, data_dir):
"""See base class."""
self.language = "zh"
lines = self._read_tsv(
os.path.join(data_dir, "multinli", "multinli.train.%s.tsv" %
self.language))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "train-%d" % (i)
text_a = tokenization.convert_to_unicode(line[0])
text_b = tokenization.convert_to_unicode(line[1])
label = tokenization.convert_to_unicode(line[2])
if label == tokenization.convert_to_unicode("contradictory"):
label = tokenization.convert_to_unicode("contradiction")
examples.append(
InputExample(
guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
self.language = "zh"
lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv"))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "dev-%d" % (i)
language = tokenization.convert_to_unicode(line[0])
if language != tokenization.convert_to_unicode(self.language):
continue
text_a = tokenization.convert_to_unicode(line[6])
text_b = tokenization.convert_to_unicode(line[7])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(
guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_test_examples(self, data_dir):
"""See base class."""
self.language = "zh"
lines = self._read_tsv(os.path.join(data_dir, "xnli.test.tsv"))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "test-%d" % (i)
language = tokenization.convert_to_unicode(line[0])
if language != tokenization.convert_to_unicode(self.language):
continue
text_a = tokenization.convert_to_unicode(line[6])
text_b = tokenization.convert_to_unicode(line[7])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(
guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_aug_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train_aug.tsv")), "train")
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type,
tokenization.convert_to_unicode(line[0]))
text_a = tokenization.convert_to_unicode(line[8])
text_b = tokenization.convert_to_unicode(line[9])
if set_type == "test":
label = "contradiction"
else:
label = tokenization.convert_to_unicode(line[-1])
examples.append(
InputExample(
guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[3])
text_b = tokenization.convert_to_unicode(line[4])
if set_type == "test":
label = "0"
else:
label = tokenization.convert_to_unicode(line[0])
examples.append(
InputExample(
guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# Only the test set has a header
if set_type == "test" and i == 0:
continue
guid = "%s-%s" % (set_type, i)
if set_type == "test":
text_a = tokenization.convert_to_unicode(line[1])
label = "0"
else:
text_a = tokenization.convert_to_unicode(line[3])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(
guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def convert_single_example_to_unicode(guid, single_example):
text_a = tokenization.convert_to_unicode(single_example[0])
text_b = tokenization.convert_to_unicode(single_example[1])
label = tokenization.convert_to_unicode(single_example[2])
return InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
label_id = label_map[example.label]
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id)
return feature
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
print("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
features.append(feature)
return features
if __name__ == '__main__':
pass
|
the-stack_106_13674
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sampling sketches with privacy guarantees.
This package was written for the experimental part of a research project on
adding differential privacy guarantees to sampling sketches. It contains an
implementation of threshold sampling, using either PPSWOR (probability
proportional to size and without replacement) or priority sampling as the
underlying sampling method.
The dataset consists of elements that are (key, weight) pairs. The
implementation assumes that the dataset is aggregated (each key appears once).
As a result, the weight of a data element is equal to the total frequency of
that key, so we can use the terms weight and frequency interchangeably.
For more information on PPSWOR and using bottom-k samples for estimating
statistics, see, for example, Section 2 in
E. Cohen and O. Geri, Sampling Sketches for Concave Sublinear Functions of
Frequencies, NeurIPS 2019
https://arxiv.org/abs/1907.02218
In comparison, here we assume that the sampling threshold is a parameter, and
in the linked paper the sampling threshold is k-th lowest score.
For more information on priority sampling, see
N. Duffield, C. Lund, and M. Thorup, Priority Sampling for Estimation of
Arbitrary Subset Sums, J. ACM 2007
https://nickduffield.net/download/papers/priority.pdf
"""
import abc
import collections
import math
import random
import numpy as np
class SamplingMethod(abc.ABC):
"""Functions to compute score and inclusion probability for a sampling method.
Threshold sampling works by computing a random score for each key, and keeping
the keys with score below a given threshold (a parameter). This class includes
(static) functions to compute the score of a key and the probability that a
key is sampled. The functions satisfy the invariant
Pr[sampling_score(weight) < threshold] == inclusion_prob(weight, threshold).
"""
@staticmethod
@abc.abstractmethod
def sampling_score(weight):
"""Computes the score for a key with a given weight.
Args:
weight: The key weight
Returns:
The random score for the key
"""
raise NotImplementedError(
"sampling_score is abstract and needs to be implemented in a derived "
"class")
@staticmethod
@abc.abstractmethod
def inclusion_prob(weight, threshold):
"""Computes the probability that a key will be included in sample.
Args:
weight: The key weight
threshold: The threshold used by the sample
Returns:
The inclusion probability
"""
raise NotImplementedError(
"inclusion_prob is abstract and needs to be implemented in a derived "
"class")
class PpsworSamplingMethod(SamplingMethod):
"""Functions to compute score and inclusion probability for PPSWOR.
For PPSWOR, the random score is drawn from Exp(weight).
"""
@staticmethod
def sampling_score(weight):
if weight < 0.0:
raise ValueError("The key weight %f should be nonnegative." % weight)
if weight == 0.0:
return math.inf
return random.expovariate(1.0) / weight
@staticmethod
def inclusion_prob(weight, threshold):
if weight < 0.0 or threshold < 0.0:
raise ValueError(
"The key weight %f and threshold %f should be nonnegative." %
(weight, threshold))
return 1.0 - math.exp(-1.0 * weight * threshold)
class PrioritySamplingMethod(SamplingMethod):
"""Functions to compute score and inclusion probability for priority sampling.
For priority sampling, the score is computing by drawing a random number from
U(0,1) and dividing by the weight of the key.
"""
@staticmethod
def sampling_score(weight):
if weight < 0.0:
raise ValueError("The key weight %f should be nonnegative." % weight)
if weight == 0.0:
return math.inf
return random.uniform(0.0, 1.0) / weight
@staticmethod
def inclusion_prob(weight, threshold):
if weight < 0.0 or threshold < 0.0:
raise ValueError(
"The key weight %f and threshold %f should be nonnegative." %
(weight, threshold))
if weight * threshold > 1.0:
return 1.0
return weight * threshold
class AlwaysIncludeSamplingMethod(SamplingMethod):
"""No sampling (sampling probability = 1.0).
Used to compare different private methods when no sampling is done.
"""
@staticmethod
def sampling_score(weight):
return -1.0 * math.inf
@staticmethod
def inclusion_prob(weight, threshold):
return 1.0
class ThresholdSample(object):
"""Implementation of a threshold sampling sketch (without privacy guarantees).
Threshold sampling works by computing a random score for each key, and keeping
the keys with score below a given threshold (a parameter). The keys that are
kept are the sample.
The score is determined by the underlying sampling method, e.g., PPSWOR or
priority sampling.
This sketch only supports aggregated data: each key can only appear once in
the dataset.
"""
def __init__(self,
threshold,
sampling_method=PpsworSamplingMethod,
func_of_freq=lambda x: x):
"""Initializes an empty sample.
Args:
threshold: The sampling threshold
sampling_method: A class that provides functions to compute the score and
inclusion probability according to the underlying sampling method (e.g.,
PPSWOR, which is the default value)
func_of_freq: The function applied to the frequency of a key to determine
its sampling weight
"""
self.threshold = threshold
self.sampling_method = sampling_method
self.func_of_freq = func_of_freq
# The following stores the sampled elements. It is a dict where the keys are
# the keys in the samples, and the value for each key is its weight.
self.elements = {}
def process(self, key, freq):
"""Processes a data element into the sketch.
Args:
key: The key of the element. We assume the data is aggregated, so the key
has to be unique to this element.
freq: The frequency of this element/key
"""
if key in self.elements:
raise ValueError("Only works for aggregated data: repeated key %s" % key)
score = self.sampling_method.sampling_score(self.func_of_freq(freq))
if score < self.threshold:
self.elements[key] = freq
def estimate_statistics(self,
key_coeff=lambda x: 1,
func_of_freq_to_estimate=None):
"""Estimates statistics using the frequencies of the keys.
There are two functions applied to the frequencies: One is used to compute
the sampling (this function is given to the constructor). The other is the
one we are trying to estimate in this function (passed as a parameter to
this function).
The estimate is computed by summing the inverse probability estimator for
each one of the keys in the sample. To support more general statistics than
the sum of all weights, we allow the estimator of key to be multiplied by
a constant (given as a parameter to this function). That is, this function
computes an estimate for:
sum_{key x} {key_coeff(x) * func(frequency) of x}.
Args:
key_coeff: A function that maps each key to its coefficient.
func_of_freq_to_estimate: The function applied to the frequencies. If
None, the function used for sampling will be used.
Returns:
The estimate for sum_{key x} {key_coeff(x) * func(frequency) of x}.
"""
if func_of_freq_to_estimate is None:
func_of_freq_to_estimate = self.func_of_freq
sum_estimate = 0.0
for key, freq in self.elements.items():
sampling_weight = self.func_of_freq(freq)
sum_estimate += key_coeff(key) * (
func_of_freq_to_estimate(freq) /
self.sampling_method.inclusion_prob(sampling_weight, self.threshold))
return sum_estimate
# A default value for a parameter that trades off time and space.
# When computing values iteratively, we store some of the computed values to
# avoid recomputation. The store_every parameter controls how many values we
# store.
# For our experiments so far, it seems better to save time over space.
STORE_EVERY_DEFAULT = 1
class PrivateThresholdSampleKeysOnly(object):
"""Threshold sampling with differential privacy (returns sampled keys only).
This class implements threshold sampling, and then performs subsampling to
satisfy the differential privacy constraints. The private sample only includes
keys (and no information about their frequencies).
The sketch only supports aggregated data: each key can only appear once in the
dataset.
"""
def __init__(self,
threshold,
eps,
delta,
sampling_method=PpsworSamplingMethod,
store_every=STORE_EVERY_DEFAULT,
func_of_freq=lambda x: x):
"""Initializes an empty sample.
Args:
threshold: The sampling threshold
eps: The differential privacy parameter epsilon
delta: The differential privacy parameter delta
sampling_method: A class that provides functions to compute the score and
inclusion probability according to the underlying non-private sampling
method (e.g., PPSWOR, which is the default value).
store_every: A parameter that trades off the use of space and time. When
an element is processed into the sketch, we need to compute its
inclusion probability iteratively, and this parameter controls how many
such values we store (in order to not recompute). The number of values
stored is (max weight seen so far / store_every) + 1.
func_of_freq: The function applied to the frequency of a key to determine
its sampling weight
"""
self.threshold = threshold
self.sampling_method = sampling_method
self.eps = eps
self.delta = delta
self._store_every = store_every
self.func_of_freq = func_of_freq
# Stores the computed inclusion probabilities: self._inclusion_prob[i] is
# the inclusion probability of a key with frequency i * store_every.
self._inclusion_prob = [0.0]
# The set of keys that were sampled
self.elements = set()
@classmethod
def from_non_private(cls,
sample,
eps,
delta,
store_every=STORE_EVERY_DEFAULT,
func_of_freq=lambda x: x):
"""Creates a private sample from a given non-private threshold sample.
The input sample is subsampled to satisfy the differential privacy
constraints.
Args:
sample: The input non-private sample. Must be of type ThresholdSample.
eps: The differential privacy parameter epsilon
delta: The differential privacy parameter delta
store_every: A parameter that trades off the use of space and time. When
an element is processed into the sketch, we need to compute its
inclusion probability iteratively, and this parameter controls how many
such values we store (in order to not recompute). The number of values
stored is (max weight seen so far / store_every) + 1.
func_of_freq: The function applied to the frequency of a key to determine
its sampling weight
Returns:
A private sample derived from the input non-private sample.
"""
if not isinstance(sample, ThresholdSample):
raise TypeError(
"Tried to create a private sample from a non-sample object")
s = cls(sample.threshold, eps, delta, sample.sampling_method, store_every,
func_of_freq)
for key, freq in sample.elements.items():
non_private_inclusion_prob = s.sampling_method.inclusion_prob(
s.func_of_freq(freq), s.threshold)
private_inclusion_prob = s.compute_inclusion_prob(freq)
# In the private sample, the inclusion probability of key should be
# private_inclusion_prob.
# The key was included in the non-private sample with probability
# non_private_inclusion_prob, so we add it to the private sample with
# probability private_inclusion_prob/non_private_inclusion_prob.
if random.random() < (private_inclusion_prob /
non_private_inclusion_prob):
s.elements.add(key)
return s
def compute_inclusion_prob(self, freq):
"""Computes the inclusion probability of a key in the private sample.
The inclusion probability of a key in the private sample is determined by
its frequency and the differential privacy parameters.
The current implementation computes the maximum allowed inclusion
probability by iterating from 1 to the frequency. To avoid recomputation, we
store some of the computed inclusion probabilities for future calls to this
function.
Args:
freq: The frequency of the key
Returns:
The inclusion probability of a key with the given frequency in the private
sample.
"""
if not isinstance(freq, int):
raise TypeError("The frequency %f should be of type int" % freq)
if freq < 0:
raise ValueError("The frequency %d should be nonnegative" % freq)
# Find the closest precomputed value to start iterating from
start_arr_index = min(freq // self._store_every,
len(self._inclusion_prob) - 1)
cur_prob = self._inclusion_prob[start_arr_index]
cur_freq = start_arr_index * self._store_every
# Invariant: before/after each iteration of the loop, cur_prob is the
# inclusion probability for a key with frequency cur_freq.
while cur_freq < freq:
cur_freq += 1
cur_prob = min(
self.sampling_method.inclusion_prob(
self.func_of_freq(cur_freq), self.threshold),
math.exp(self.eps) * cur_prob + self.delta,
1.0 + math.exp(-1.0 * self.eps) * (cur_prob + self.delta - 1))
if cur_freq == len(self._inclusion_prob) * self._store_every:
self._inclusion_prob.append(cur_prob)
return cur_prob
def process(self, key, weight):
"""Processes a data element into the sketch.
Args:
key: The key of the element. We assume the data is aggregated, so the key
has to be unique to this element.
weight: The weight of this element/key
"""
if key in self.elements:
raise ValueError("Only works for aggregated data: repeated key %s" % key)
inclusion_prob = self.compute_inclusion_prob(weight)
if random.random() < inclusion_prob:
self.elements.add(key)
class PrivateThresholdSampleWithFrequencies(object):
"""Threshold sampling with differential privacy (with frequencies).
This class implements threshold sampling, and then performs subsampling to
satisfy the differential privacy constraints. Together with each sampled key,
the sketch reports a random value that is between 1 and the frequency of the
key, taken with a distribution that:
1. Ensures that the differential privacy constraints are satisfied.
2. Has as much probability mass as possible on higher values (which are closer
to the true frequency).
The reported frequency values can be used to estimate statistics on the data.
The sketch only supports aggregated data: each key can only appear once in the
dataset.
"""
def __init__(self,
threshold,
eps,
delta,
sampling_method=PpsworSamplingMethod,
store_every=STORE_EVERY_DEFAULT,
func_of_freq=lambda x: x):
"""Initializes an empty sample.
Args:
threshold: The sampling threshold
eps: The differential privacy parameter epsilon
delta: The differential privacy parameter delta
sampling_method: A class that provides functions to compute the score and
inclusion probability according to the underlying sampling method (e.g.,
PPSWOR, which is the default value)
store_every: A parameter that trades off the use of space and time. When
an element is processed into the sketch, we need to compute the
distribution of reported frequency iteratively, and this parameter
controls how many such distributions we store (to avoid recomputation).
func_of_freq: The function applied to the frequency of a key to determine
its sampling weight
"""
self.threshold = threshold
self.sampling_method = sampling_method
self.eps = eps
self.delta = delta
self._store_every = store_every
self.func_of_freq = func_of_freq
# The stored distributions of reported frequencies
# self._reported_weight_dist[i] is the probability distribution used for
# keys with frequency i * store_every.
# The key is not sampled with probability self._reported_weight_dist[i][0].
# The key is sampled and reported with frequency j with probability
# self._reported_weight_dist[i][j].
# We use a defaultdict to save space since self._reported_weight_dist[i][j]
# is 0 for many values of j.
self._reported_weight_dist = [collections.defaultdict(float)]
self._reported_weight_dist[0][0] = 1.0
# Stores the estimators used to estimate statistics to avoid recomputation.
self._biased_down_estimators = {0: 0.0}
self._mle_estimators = {0: 0.0}
# The elements in the sample: for each key x, self.elements[x] is the
# reported frequency (randomly chosen to satisfy the privacy constraints).
self.elements = {}
@classmethod
def from_non_private(cls,
sample,
eps,
delta,
store_every=STORE_EVERY_DEFAULT,
func_of_freq=lambda x: x):
"""Creates a private sample from a given non-private threshold sample.
The input sample is subsampled to satisfy the differential privacy
constraints.
Args:
sample: The input non-private sample
eps: The differential privacy parameter epsilon
delta: The differential privacy parameter delta
store_every: A parameter that trades off the use of space and time. When
an element is processed into the sketch, we need to compute the
distribution of reported frequency iteratively, and this parameter
controls how many such distributions we store (to avoid recomputation).
func_of_freq: The function applied to the frequency of a key to determine
its sampling weight
Returns:
A private sample derived from the input non-private sample.
"""
if not isinstance(sample, ThresholdSample):
raise TypeError(
"Tried to create a private sample from a non-sample object")
s = cls(sample.threshold, eps, delta, sample.sampling_method, store_every,
func_of_freq)
for key, freq in sample.elements.items():
# Determines whether the key should be included in the private sample.
non_private_inclusion_prob = s.sampling_method.inclusion_prob(
s.func_of_freq(freq), s.threshold)
weight_dist = s.compute_reported_frequency_dist(freq)
private_inclusion_prob = 1.0 - weight_dist[0]
if random.random() > (private_inclusion_prob /
non_private_inclusion_prob):
continue
# Randomly chooses the reported frequency for the key from the
# distribution conditioned on the fact that the key is in the sample.
x = random.random()
for reported_weight, prob in weight_dist.items():
# We condition on the fact that the key is included, so we should
# ignore the probability it is not included.
if reported_weight == 0:
continue
x -= prob / private_inclusion_prob
if x < 0:
s.elements[key] = reported_weight
break
return s
def _compute_next_reported_frequency_dist(self, cur_freq, cur_dist):
"""Computes the distribution of reported frequency for i+1 from that of i.
Args:
cur_freq: The frequency
cur_dist: The distribution of reported frequency for a key with frequency
cur_freq
Returns:
The distribution of reported frequency for a key with frequency
cur_freq + 1.
"""
cur_prob = 1.0 - cur_dist[0]
# The pseudocode/details behind the computation below are in the writeup.
cur_freq += 1
# Updates the inclusion probability.
cur_prob = min(
self.sampling_method.inclusion_prob(
self.func_of_freq(cur_freq), self.threshold),
math.exp(self.eps) * cur_prob + self.delta,
1.0 + math.exp(-1.0 * self.eps) * (cur_prob + self.delta - 1))
prev_dist = cur_dist
# Computes the new distribution of reported frequency.
cur_dist = collections.defaultdict(float)
cur_dist[0] = 1.0 - cur_prob
prev_cumulative = 0.0
cur_cumulative = 0.0
diff_in_inclusion_prob = max(
0,
math.exp(-1.0 * self.eps) * prev_dist[0] - cur_dist[0])
# Possible optimization: for j in sorted(prev_dist.keys())
for j in range(1, cur_freq):
prev_cumulative += prev_dist[j]
new_val_j = math.exp(-1.0 * self.eps) * (
prev_cumulative -
self.delta) - cur_cumulative + diff_in_inclusion_prob
if new_val_j > 0.0:
cur_dist[j] = new_val_j
cur_cumulative += cur_dist[j]
remainder = cur_prob - cur_cumulative
prev_cumulative = 0.0
cur_cumulative = 0.0
for j in range(cur_freq, 0, -1):
if remainder <= 0.0:
break
max_prob_j = math.exp(
self.eps) * prev_cumulative + self.delta - cur_cumulative
amount_added = min(remainder, max_prob_j - cur_dist[j])
cur_dist[j] += amount_added
remainder -= amount_added
prev_cumulative += prev_dist[j - 1]
cur_cumulative += cur_dist[j]
return cur_dist
def compute_reported_frequency_dist(self, freq):
"""Computes the distribution of the reported frequency of a key.
The distribution of reported frequency of a key in the private sample is
determined by its frequency and the differential privacy parameters.
The current implementation computes the distribution by iterating from 1 to
the frequency. To avoid recomputation, we store some of the computed values
for future calls to this function.
Args:
freq: The frequency of the key
Returns:
A defaultdict, where the value at j is the probability that the key is
included with reported frequency j. The value at 0 is the probability
that the key is not included in the private sample.
"""
if not isinstance(freq, int):
raise TypeError("The frequency %f should be of type int" % freq)
if freq < 0:
raise ValueError("The frequency %d should be nonnegative" % freq)
# Find the closest precomputed value to start iterating from
start_arr_index = min(freq // self._store_every,
len(self._reported_weight_dist) - 1)
cur_dist = self._reported_weight_dist[start_arr_index].copy()
cur_freq = start_arr_index * self._store_every
while cur_freq < freq:
cur_dist = self._compute_next_reported_frequency_dist(cur_freq, cur_dist)
cur_freq += 1
if cur_freq == len(self._reported_weight_dist) * self._store_every:
self._reported_weight_dist.append(cur_dist.copy())
return cur_dist
def biased_down_estimator(self, reported_freq, func_of_freq_to_estimate=None):
"""The biased down estimator.
Args:
reported_freq: The reported frequency of the key whose function of
frequency we are trying to estimate.
func_of_freq_to_estimate: The function of frequency we are trying to
estimate. If None, the function used for sampling will be used.
Returns:
The estimator for the function of the frequency of the key.
"""
if func_of_freq_to_estimate is None:
func_of_freq_to_estimate = self.func_of_freq
if reported_freq in self._biased_down_estimators:
return self._biased_down_estimators[reported_freq]
if reported_freq == 0:
return 0.0
i = reported_freq
dist_i = self.compute_reported_frequency_dist(i)
est = float("inf")
while dist_i[reported_freq] > 0.0:
prob_lower = 0.0
sum_lower = 0.0
for j, prob in dist_i.items():
if j < reported_freq:
sum_lower += self.biased_down_estimator(
j, func_of_freq_to_estimate) * prob
prob_lower += prob
est = min(est,
(func_of_freq_to_estimate(i) - sum_lower) / (1.0 - prob_lower))
if i + 1 % self._store_every == 0 and i + 1 < len(
self._reported_weight_dist) * self._store_every:
dist_i = self.compute_reported_frequency_dist(i + 1)
else:
dist_i = self._compute_next_reported_frequency_dist(i, dist_i)
i += 1
if i == len(self._reported_weight_dist) * self._store_every:
self._reported_weight_dist.append(dist_i.copy())
self._biased_down_estimators[reported_freq] = est
return est
def mle_estimator(self, reported_freq, func_of_freq_to_estimate=None):
"""The MLE estimator.
Args:
reported_freq: The reported frequency of the key whose function of
frequency we are trying to estimate.
func_of_freq_to_estimate: The function of frequency we are trying to
estimate. If None, the function used for sampling will be used.
Returns:
The estimator for the function of the frequency of the key.
"""
if func_of_freq_to_estimate is None:
func_of_freq_to_estimate = self.func_of_freq
if reported_freq in self._mle_estimators:
return self._mle_estimators[reported_freq]
i = reported_freq
dist_i = self.compute_reported_frequency_dist(i)
est = 0.0
max_prob_to_report_freq = 0.0
while dist_i[reported_freq] > 0.0:
if dist_i[reported_freq] > max_prob_to_report_freq:
max_prob_to_report_freq = dist_i[reported_freq]
est = func_of_freq_to_estimate(i) / (1.0 - dist_i[0])
if i + 1 % self._store_every == 0 and i + 1 < len(
self._reported_weight_dist) * self._store_every:
dist_i = self.compute_reported_frequency_dist(i + 1)
else:
dist_i = self._compute_next_reported_frequency_dist(i, dist_i)
i += 1
if i == len(self._reported_weight_dist) * self._store_every:
self._reported_weight_dist.append(dist_i.copy())
self._mle_estimators[reported_freq] = est
return est
def estimator(self, reported_freq, func_of_freq_to_estimate=None):
"""A default estimator to be used when estimating statistics.
Currently returns the MLE estimator.
Args:
reported_freq: The reported frequency of the key whose function of
frequency we are trying to estimate.
func_of_freq_to_estimate: The function of frequency we are trying to
estimate. If None, the function used for sampling will be used.
Returns:
The estimator for the function of the frequency of the key.
"""
return self.mle_estimator(reported_freq, func_of_freq_to_estimate)
def bias_and_mean_square_error(self,
freq,
estimator_func,
func_of_freq_to_estimate=None):
"""Computes the bias and mean square error of the estimator.
Args:
freq: The true frequency of the key
estimator_func: The function that is used to compute the estimator
func_of_freq_to_estimate: The function of frequency we are trying to
estimate. If None, the function used for sampling will be used.
Returns:
A tuple (bias, MSE)
"""
# TODO(ofirg): check that estimator_func is a member of self?
if func_of_freq_to_estimate is None:
func_of_freq_to_estimate = self.func_of_freq
reported_freq_dist = self.compute_reported_frequency_dist(freq)
bias = -1.0 * func_of_freq_to_estimate(freq)
mse = 0.0
for reported_freq, prob in reported_freq_dist.items():
bias += prob * estimator_func(reported_freq, func_of_freq_to_estimate)
mse += prob * ((estimator_func(reported_freq, func_of_freq_to_estimate) -
func_of_freq_to_estimate(freq))**2)
return bias, mse
def process(self, key, freq):
"""Processes a data element into the sketch.
Args:
key: The key of the element. We assume the data is aggregated, so the key
has to be unique to this element.
freq: The frequency of this element/key
"""
if key in self.elements:
raise ValueError("Only works for aggregated data: repeated key %s" % key)
# compute_reported_frequency_dist applies the function of frequency used
# for sampling.
weight_dist = self.compute_reported_frequency_dist(freq)
x = random.random()
for reported_freq, prob in weight_dist.items():
x -= prob
if x < 0.0:
if reported_freq != 0:
self.elements[key] = reported_freq
return
def estimate_statistics(self,
key_coeff=lambda x: 1,
func_of_freq_to_estimate=None):
"""Estimates statistics using the frequencies of the keys.
There are two functions applied to the frequencies: One is used to compute
the sampling (this function is given to the constructor). The other is the
one we are trying to estimate in this function (passed as a parameter to
this function).
The estimate is computed by summing the estimator for each one of the keys
in the sample. To support more general statistics than the sum of all
weights, we allow the estimator of key to be multiplied by a constant
(given as a parameter to this function). That is, this function computes an
estimate for:
sum_{key x} {key_coeff(x) * func(frequency of x)}.
Args:
key_coeff: A function that maps each key to its coefficient.
func_of_freq_to_estimate: The function applied to the frequencies. If
None, the function used for sampling will be used.
Returns:
The estimate for sum_{key x} {key_coeff(x) * func(frequency of x)}.
"""
if func_of_freq_to_estimate is None:
func_of_freq_to_estimate = self.func_of_freq
sum_estimate = 0.0
for key, reported_weight in self.elements.items():
sum_estimate += key_coeff(key) * self.estimator(reported_weight,
func_of_freq_to_estimate)
return sum_estimate
class PrivateHistogramAndSample(ThresholdSample):
"""Computes a private histogram and then applies threhold sampling."""
def __init__(self,
threshold,
eps,
delta,
sampling_method=PpsworSamplingMethod,
func_of_freq=lambda x: x):
"""Initializes an empty sample.
Args:
threshold: The sampling threshold
eps: The differential privacy parameter epsilon
delta: The differential privacy parameter delta
sampling_method: A class that provides functions to compute the score and
inclusion probability according to the underlying sampling method (e.g.,
PPSWOR, which is the default value).
func_of_freq: The function applied to the frequency of a key to determine
its sampling weight
"""
super().__init__(threshold, sampling_method, func_of_freq)
self.eps = eps
self.delta = delta
def process(self, key, weight):
# Computes the private weight (i.e., the weight in the histogram), then
# calls the base class (to sample).
private_weight = weight + np.random.default_rng().laplace(scale=1.0 /
self.eps)
if private_weight >= (1.0 / self.eps) * math.log(1.0 / self.delta,
math.e) + 1:
super().process(key, private_weight)
|
the-stack_106_13676
|
# A part of pdfrw (pdfrw.googlecode.com)
# Copyright (C) 2006-2009 Patrick Maupin, Austin, Texas
# MIT license -- See LICENSE.txt for details
'''
A tokenizer for PDF streams.
In general, documentation used was "PDF reference",
sixth edition, for PDF version 1.7, dated November 2006.
'''
from __future__ import generators
try:
set
except NameError:
from sets import Set as set
import re
from pdfobjects import PdfString, PdfObject
class _PrimitiveTokens(object):
# Table 3.1, page 50 of reference, defines whitespace
whitespaceset = set('\x00\t\n\f\r ')
# Text on page 50 defines delimiter characters
delimiterset = set('()<>{}[]/%')
# Coalesce contiguous whitespace into a single token
whitespace_pattern = '[%s]+' % ''.join(whitespaceset)
# In addition to the delimiters, we also use '\', which
# is special in some contexts in PDF.
delimiter_pattern = '\\\\|\\' + '|\\'.join(delimiterset)
# Dictionary delimiters are '<<' and '>>'. Look for
# these before the single variety.
dictdelim_pattern = r'\<\<|\>\>'
pattern = '(%s|%s|%s)' % (whitespace_pattern,
dictdelim_pattern, delimiter_pattern)
re_func = re.compile(pattern).finditer
del whitespace_pattern, dictdelim_pattern
del delimiter_pattern, pattern
def __init__(self, fdata):
class MyIterator(object):
def next():
if not tokens:
startloc = self.startloc
for match in next_match[0]:
start = match.start()
end = match.end()
tappend(fdata[start:end])
if start > startloc:
tappend(fdata[startloc:start])
self.startloc = end
break
else:
s = fdata[startloc:]
self.startloc = len(fdata)
if s:
tappend(s)
if not tokens:
raise StopIteration
return tpop()
next = staticmethod(next)
self.fdata = fdata
self.tokens = tokens = []
self.iterator = iterator = MyIterator()
self.next = iterator.next
self.next_match = next_match = [None]
tappend = tokens.append
tpop = tokens.pop
def setstart(self, startloc):
self.startloc = startloc
self.next_match[0] = self.re_func(self.fdata, startloc)
def __iter__(self):
return self.iterator
def coalesce(self, result):
''' This function coalesces tokens together up until
the next delimiter or whitespace.
All of the coalesced tokens will either be non-matches,
or will be a matched backslash. We distinguish the
non-matches by the fact that next() will have left
a following match inside self.tokens for the actual match.
'''
tokens = self.tokens
whitespace = self.whitespaceset
# Optimized path for usual case -- regular data (not a name string),
# with no escape character, and followed by whitespace.
if tokens:
token = tokens.pop()
if token != '\\':
if token[0] not in whitespace:
tokens.append(token)
return
result.append(token)
# Non-optimized path. Either start of a name string received,
# or we just had one escape.
for token in self:
if tokens:
result.append(token)
token = tokens.pop()
if token != '\\':
if token[0] not in whitespace:
tokens.append(token)
return
result.append(token)
def floc(self):
return self.startloc - sum([len(x) for x in self.tokens])
class PdfTokens(object):
def __init__(self, fdata, startloc=0, strip_comments=True):
def comment(token):
tokens = [token]
for token in primitive:
tokens.append(token)
if token[0] in whitespaceset and ('\n' in token or '\r' in token):
break
return not strip_comments and ''.join(tokens)
def single(token):
return token
def regular_string(token):
def escaped():
escaped = False
i = -2
while tokens[i] == '\\':
escaped = not escaped
i -= 1
return escaped
tokens = [token]
nestlevel = 1
for token in primitive:
tokens.append(token)
if token in '()' and not escaped():
nestlevel += token == '(' or -1
if not nestlevel:
break
else:
assert 0, "Unexpected end of token stream"
return PdfString(''.join(tokens))
def hex_string(token):
tokens = [token]
for token in primitive:
tokens.append(token)
if token == '>':
break
while tokens[-2] == '>>':
tokens.append(tokens.pop(-2))
return PdfString(''.join(tokens))
def normal_data(token):
# Obscure optimization -- we can get here with
# whitespace or regular character data. If we get
# here with whitespace, then there won't be an additional
# token queued up in the primitive object, otherwise there
# will...
if primitive_tokens: #if token[0] not in whitespaceset:
tokens = [token]
primitive.coalesce(tokens)
return PdfObject(''.join(tokens))
def name_string(token):
tokens = [token]
primitive.coalesce(tokens)
token = ''.join(tokens)
if '#' in token:
substrs = token.split('#')
substrs.reverse()
tokens = [substrs.pop()]
while substrs:
s = substrs.pop()
tokens.append(chr(int(s[:2], 16)))
tokens.append(s[2:])
token = ''.join(tokens)
return PdfObject(token)
def broken(token):
assert 0, token
dispatch = {
'(': regular_string,
')': broken,
'<': hex_string,
'>': broken,
'[': single,
']': single,
'{': single,
'}': single,
'/': name_string,
'%' : comment,
'<<': single,
'>>': single,
}.get
class MyIterator(object):
def next():
while not tokens:
token = primitive_next()
token = dispatch(token, normal_data)(token)
if token:
return token
return tokens.pop()
next = staticmethod(next)
self.primitive = primitive = _PrimitiveTokens(fdata)
self.setstart = primitive.setstart
primitive.setstart(startloc)
self.fdata = fdata
self.strip_comments = strip_comments
self.tokens = tokens = []
self.iterator = iterator = MyIterator()
self.next = iterator.next
primitive_next = primitive.next
primitive_tokens = primitive.tokens
whitespaceset = _PrimitiveTokens.whitespaceset
def floc(self):
return self.primitive.floc() - sum([len(x) for x in self.tokens])
floc = property(floc)
def __iter__(self):
return self.iterator
def multiple(self, count):
next = self.next
return [next() for i in range(count)]
|
the-stack_106_13677
|
import adsk.core
import adsk.fusion
import adsk.cam
import configparser
import os
from enum import Enum
from .apper import apper
from . import config
class Fusion360Template:
SEPARATOR = ":"
# Base parameter name
BASE = "Fusion360" + SEPARATOR
# Bounding box
BOUNDING_BOX_BASE = BASE + "BoundingBox" + SEPARATOR
def __init__(self, name, unit=None):
self.name = name
self.unit = unit
@apper.lib_import(config.lib_path)
def create_template(self):
from inventree.base import ParameterTemplate
ParameterTemplate.create(inv_api(), {
"name": self.name,
"units": self.unit or ""
})
@apper.lib_import(config.lib_path)
def create_parameter(self, part, data):
from inventree.base import Parameter
Parameter.create(inv_api(), {'part': part.pk, 'template': self.pk, 'data': data})
@apper.lib_import(config.lib_path)
def update_parameter(self, part, data):
from inventree.base import Parameter
param = Parameter.list(inv_api(), {
"part": part.pk,
"template": self.pk
})[0]
param.save({
"data": data
})
__PART_TEMPLATE_CACHE = {}
def cache_part_templates(templates):
for template in templates:
Fusion360Template.__PART_TEMPLATE_CACHE[template.name] = template
@property
def pk(self):
return Fusion360Template.__PART_TEMPLATE_CACHE[self.name].pk
class Fusion360Parameters(Enum):
ID = Fusion360Template(Fusion360Template.BASE + "Id", "UUID")
# Physical properties name
AREA = Fusion360Template(Fusion360Template.BASE + "Area", "cm2")
VOLUME = Fusion360Template(Fusion360Template.BASE + "Volume", "cm3")
MASS = Fusion360Template(Fusion360Template.BASE + "Mass", "kg")
DENSITY = Fusion360Template(Fusion360Template.BASE + "Density", "kg/cm3")
MATERIAL = Fusion360Template(Fusion360Template.BASE + "Material")
# Bounding box
BOUNDING_BOX_WIDTH = Fusion360Template(Fusion360Template.BOUNDING_BOX_BASE + "Width", "cm")
BOUNDING_BOX_HEIGHT = Fusion360Template(Fusion360Template.BOUNDING_BOX_BASE + "Height", "cm")
BOUNDING_BOX_DEPTH = Fusion360Template(Fusion360Template.BOUNDING_BOX_BASE + "Depth", "cm")
@apper.lib_import(config.lib_path)
def init_Fusion360():
from inventree.base import ParameterTemplate
existing = [parameter.name for parameter in ParameterTemplate.list(inv_api())]
for variant in Fusion360Parameters:
template = variant.value
if template.name in existing:
continue
template.create_template()
print("Created non-existing parameter template " + template.name)
Fusion360Template.cache_part_templates(ParameterTemplate.list(inv_api()))
# region tracking
@apper.lib_import(config.lib_path)
def init_sentry():
import sentry_sdk
sentry_sdk.init(
"https://[email protected]/6024677",
traces_sample_rate=1.0,
release="0.0.2",
)
config.app_tracking = sentry_sdk
# end region
# region config
def load_config(ui: adsk.core.UserInterface):
config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'conf.ini')
if os.path.exists(config_path) is False:
TITLE = "InventreeLink - Initialisation"
POSTFACE = (
"It seems InvenTreeLink was not yet properly configured.\n"
"Please provide the necessary information prompted by the\n"
"next couple of input boxes.\n"
)
def ask_user(line, default=""):
(value, cancelled) = ui.inputBox(
line,
TITLE,
default
)
return value if not cancelled and value is not "" else None
ui.messageBox(POSTFACE)
address = ask_user(
"Please enter the server address of the InvenTree instance.",
)
if address is None:
ui.messageBox("Invalid address", TITLE)
return False
token = ask_user(
"Please enter the user token:"
)
if token is None:
ui.messageBox("Invalid token", TITLE)
return False
part_category = ask_user(
"Please enter the part category's name were you would like the Part's to show up.",
"plugin-test"
)
if part_category is None:
ui.messageBox("Invalid part category", TITLE)
return False
config_text = '\n'.join((
"[SERVER]",
"current = default",
"",
"[default]",
f"{config.CFG_ADDRESS} = {address}",
f"{config.CFG_TOKEN} = {token}",
f"{config.CFG_PART_CATEGORY} = {part_category}",
))
with open(config_path, "w") as f:
f.write(config_text)
try:
config_dict = configparser.ConfigParser()
config_dict.read(config_path)
config.CONFIG = config_dict
except:
return False
return True
def config_get(ref):
""" returns current config """
# SET where config is saved here
crt_srv = config.CONFIG['SERVER']['current'] # ref enables multiple server confs
if ref == 'srv_address':
return config.CONFIG[crt_srv][config.CFG_ADDRESS]
if ref == 'srv_token':
return config.CONFIG[crt_srv][config.CFG_TOKEN]
if ref == config.CFG_PART_CATEGORY:
return config.CONFIG[crt_srv][config.CFG_PART_CATEGORY]
if ref == config.CFG_TEMPLATE_PARAMETER:
return config.CONFIG[crt_srv][config.CFG_TEMPLATE_PARAMETER]
raise NotImplementedError('unknown ref')
@apper.lib_import(config.lib_path)
def config_ref(ref):
""" retuns a (cached) api-object based on ref """
from inventree.base import ParameterTemplate
from inventree.part import PartCategory
def get(ref, cat):
""" handles caching of ref-objects """
if config.REF_CACHE.get(ref):
return config.REF_CACHE.get(ref)
ref_vals = [category for category in cat.list(inv_api()) if category.name == config_get(ref)]
if ref_vals:
config.REF_CACHE[ref] = ref_vals[0]
return config.REF_CACHE[ref]
return None
# set the API-objects
if ref == config.CFG_PART_CATEGORY:
return get(ref, PartCategory)
if ref == config.CFG_TEMPLATE_PARAMETER:
return get(ref, ParameterTemplate)
raise NotImplementedError('unknown ref')
# endregion
# region API
@apper.lib_import(config.lib_path)
def inv_api():
""" connect to API """
from inventree.api import InvenTreeAPI
if not config.INV_API:
config.INV_API = InvenTreeAPI(config_get('srv_address'), token=config_get('srv_token'))
return config.INV_API
return config.INV_API
@apper.lib_import(config.lib_path)
def inventree_get_part(part_id):
""" returns a part from InvenTree """
from inventree.part import Part
from inventree.base import Parameter
def search(parameters, part_id):
try:
part = [a.part for a in parameters if a._data['data'] == part_id]
if len(part) == 1:
return Part(inv_api(), part[0])
return False
except Exception as _e:
config.app_tracking.capture_exception(_e)
raise Exception from _e
parameters = Parameter.list(inv_api())
if not parameters:
parameters = []
if type(part_id) in (list, tuple):
result = {}
for cur_id in part_id:
result[cur_id] = search(parameters, cur_id)
return result
return search(parameters, part_id)
# endregion
# region bom functions
def extract_bom():
""" returns bom """
try:
ao = apper.AppObjects()
design = ao.product
if not design:
ao.ui.messageBox('No active design', 'Extract BOM')
return []
# Get all occurrences in the root component of the active design
occs = design.rootComponent.allOccurrences
# Gather information about each unique component
bom = []
for occ in occs:
comp = occ.component
already_exists = False
# Go through the BOM for items previously added
for item in bom:
if item['component'] == comp:
# Increment the instance count of the existing row.
item['instances'] += 1
already_exists = True
break
if already_exists is False:
# Gather any BOM worthy values from the component
volume = 0
bodies = comp.bRepBodies
for bodyK in bodies:
if bodyK.isSolid:
volume += bodyK.volume
# Add this component to the BOM
node = component_info(comp, comp_set=True)
node['volume'] = volume
node['linked'] = occ.isReferencedComponent
bom.append(node)
bom_parts = inventree_get_part([item['id'] for item in bom])
for item in bom:
part = bom_parts[item['id']]
if part is not False:
item['synced'] = True # "<span style='color: green;'> Synced </span>"
else:
item['synced'] = False # "<span style='color: red;'> Not synced </span>"
# Display the BOM
return bom
except Exception as _e:
config.app_tracking.capture_exception(_e)
raise _e
def component_info(comp, parent='#', comp_set=False):
""" returns a node element """
node = {
'name': comp.name,
'IPN': comp.partNumber,
'id': comp.id,
'revision-id': comp.revisionId,
'instances': 1,
'parent': parent,
}
if comp_set:
node['component'] = comp
else:
node['state'] = {'opened': True, 'checkbox_disabled': False}
node["type"] = "4-root_component"
node["text"] = comp.name
return node
def make_component_tree():
""" generates the full tree """
ao = apper.AppObjects()
root = ao.root_comp
node_list = []
root_node = component_info(root)
node_list.append(root_node)
if root.occurrences.count > 0:
make_assembly_nodes(root.occurrences, node_list, root.id)
return node_list
def make_assembly_nodes(occurrences: adsk.fusion.OccurrenceList, node_list, parent):
""" adds one node and checks for others """
for occurrence in occurrences:
node = component_info(occurrence.component, parent)
if occurrence.childOccurrences.count > 0:
node["type"] = "4-component_group"
node_list.append(node)
make_assembly_nodes(occurrence.childOccurrences, node_list, occurrence.component.id)
else:
node["type"] = "4-component"
node_list.append(node)
# endregion
|
the-stack_106_13678
|
from ocdskingfisherarchive.cache import Cache
from ocdskingfisherarchive.crawl import Crawl
def test_cache(tmpdir):
query = Crawl('scotland', '20200902_052458')
crawl = Crawl('scotland', '20200902_052458', tmpdir, None)
crawl.reject_reason
# Initialize.
Cache(str(tmpdir.join('cache.sqlite3')))
# Initialize existing.
cache = Cache(str(tmpdir.join('cache.sqlite3')))
# Get.
assert cache.get(query) == query
# Set and get.
crawl.archived = True
cache.set(crawl)
crawl = cache.get(query)
assert crawl.asdict() == {
'id': 'scotland/20200902_052458',
'source_id': 'scotland',
'data_version': '20200902_052458',
'bytes': None,
'checksum': None,
'errors_count': None,
'files_count': None,
'reject_reason': 'no_data_directory',
'archived': True,
}
# Set and get existing.
crawl.archived = False
cache.set(crawl)
crawl = cache.get(query)
assert crawl.asdict() == {
'id': 'scotland/20200902_052458',
'source_id': 'scotland',
'data_version': '20200902_052458',
'bytes': None,
'checksum': None,
'errors_count': None,
'files_count': None,
'reject_reason': 'no_data_directory',
'archived': False,
}
# Delete.
cache.delete(crawl)
assert cache.get(query) == query
|
the-stack_106_13680
|
"""
This module contains all expressions and classes needed for lazy computation/ query execution.
"""
import os
import shutil
import subprocess
import tempfile
import typing as tp
from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Type, Union
import polars as pl
try:
from polars.polars import PyExpr, PyLazyFrame, PyLazyGroupBy
_DOCUMENTING = False
except ImportError:
_DOCUMENTING = True
from ..datatypes import DataType, pytype_to_polars_type
from ..utils import _process_null_values
from .expr import Expr, _selection_to_pyexpr_list, col, expr_to_lit_or_expr, lit
__all__ = [
"LazyFrame",
]
def wrap_ldf(ldf: "PyLazyFrame") -> "LazyFrame":
return LazyFrame._from_pyldf(ldf)
class LazyFrame:
"""
Representation of a Lazy computation graph/ query.
"""
def __init__(self) -> None:
self._ldf: PyLazyFrame
@staticmethod
def _from_pyldf(ldf: "PyLazyFrame") -> "LazyFrame":
self = LazyFrame.__new__(LazyFrame)
self._ldf = ldf
return self
@staticmethod
def scan_csv(
file: str,
has_headers: bool = True,
ignore_errors: bool = False,
sep: str = ",",
skip_rows: int = 0,
stop_after_n_rows: Optional[int] = None,
cache: bool = True,
dtype: Optional[Dict[str, Type[DataType]]] = None,
low_memory: bool = False,
comment_char: Optional[str] = None,
null_values: Optional[Union[str, tp.List[str], Dict[str, str]]] = None,
) -> "LazyFrame":
"""
See Also: `pl.scan_csv`
"""
dtype_list: Optional[tp.List[Tuple[str, Type[DataType]]]] = None
if dtype is not None:
dtype_list = []
for k, v in dtype.items():
dtype_list.append((k, pytype_to_polars_type(v)))
processed_null_values = _process_null_values(null_values)
self = LazyFrame.__new__(LazyFrame)
self._ldf = PyLazyFrame.new_from_csv(
file,
sep,
has_headers,
ignore_errors,
skip_rows,
stop_after_n_rows,
cache,
dtype_list,
low_memory,
comment_char,
processed_null_values,
)
return self
@staticmethod
def scan_parquet(
file: str, stop_after_n_rows: Optional[int] = None, cache: bool = True
) -> "LazyFrame":
"""
See Also: `pl.scan_parquet`
"""
self = LazyFrame.__new__(LazyFrame)
self._ldf = PyLazyFrame.new_from_parquet(file, stop_after_n_rows, cache)
return self
def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
"""
Apply a function on Self.
Parameters
----------
func
Callable.
args
Arguments.
kwargs
Keyword arguments.
"""
return func(self, *args, **kwargs)
def describe_plan(self) -> str:
"""
A string representation of the unoptimized query plan.
"""
return self._ldf.describe_plan()
def describe_optimized_plan(
self,
type_coercion: bool = True,
predicate_pushdown: bool = True,
projection_pushdown: bool = True,
simplify_expression: bool = True,
) -> str:
"""
A string representation of the optimized query plan.
"""
ldf = self._ldf.optimization_toggle(
type_coercion,
predicate_pushdown,
projection_pushdown,
simplify_expression,
string_cache=False,
)
return ldf.describe_optimized_plan()
def show_graph(
self,
optimized: bool = True,
show: bool = True,
output_path: Optional[str] = None,
raw_output: bool = False,
figsize: Tuple[float, float] = (16.0, 12.0),
) -> Optional[str]:
"""
Show a plot of the query plan. Note that you should have graphviz installed.
Parameters
----------
optimized
Optimize the query plan.
show
Show the figure.
output_path
Write the figure to disk.
raw_output
Return dot syntax.
figsize
Passed to matlotlib if `show` == True.
"""
try:
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
except ImportError:
raise ImportError(
"Graphviz dot binary should be on your PATH and matplotlib should be installed to show graph."
)
dot = self._ldf.to_dot(optimized)
if raw_output:
return dot
with tempfile.TemporaryDirectory() as tmpdir_name:
dot_path = os.path.join(tmpdir_name, "dot")
with open(dot_path, "w") as f:
f.write(dot)
subprocess.run(["dot", "-Nshape=box", "-Tpng", "-O", dot_path])
out_path = os.path.join(tmpdir_name, "dot.png")
if output_path is not None:
shutil.copy(out_path, output_path)
if show:
plt.figure(figsize=figsize)
img = mpimg.imread(out_path)
plt.imshow(img)
plt.show()
return None
def inspect(self, fmt: str = "{}") -> "pl.LazyFrame": # type: ignore
"""
Prints the value that this node in the computation graph evaluates to and passes on the value.
>>> (df.select(col("foo").cumsum().alias("bar"))
>>> .inspect() # print the node before the filter
>>> .filter(col("bar") == col("foo")))
"""
def inspect(s: "pl.DataFrame") -> "pl.DataFrame":
print(fmt.format(s)) # type: ignore
return s
return self.map(inspect, predicate_pushdown=True, projection_pushdown=True)
def sort(
self,
by: Union[str, "Expr", tp.List["Expr"]],
reverse: Union[bool, tp.List[bool]] = False,
) -> "LazyFrame":
"""
Sort the DataFrame by:
- A single column name
- An expression
- Multiple expressions
Parameters
----------
by
Column (expressions) to sort by.
reverse
Whether or not to sort in reverse order.
"""
if type(by) is str:
return wrap_ldf(self._ldf.sort(by, reverse))
if type(reverse) is bool:
reverse = [reverse]
by = expr_to_lit_or_expr(by, str_to_lit=False)
by = _selection_to_pyexpr_list(by)
return wrap_ldf(self._ldf.sort_by_exprs(by, reverse))
def collect(
self,
type_coercion: bool = True,
predicate_pushdown: bool = True,
projection_pushdown: bool = True,
simplify_expression: bool = True,
string_cache: bool = False,
no_optimization: bool = False,
) -> "pl.DataFrame":
"""
Collect into a DataFrame.
Parameters
----------
type_coercion
Do type coercion optimization.
predicate_pushdown
Do predicate pushdown optimization.
projection_pushdown
Do projection pushdown optimization.
simplify_expression
Run simplify expressions optimization.
string_cache
Use a global string cache in this query.
This is needed if you want to join on categorical columns.
Caution!
If you already have set a global string cache, set this to `False` as this will reset the
global cache when the query is finished.
no_optimization
Turn off optimizations.
Returns
-------
DataFrame
"""
if no_optimization:
predicate_pushdown = False
projection_pushdown = False
ldf = self._ldf.optimization_toggle(
type_coercion,
predicate_pushdown,
projection_pushdown,
simplify_expression,
string_cache,
)
return pl.eager.frame.wrap_df(ldf.collect())
def fetch(
self,
n_rows: int = 500,
type_coercion: bool = True,
predicate_pushdown: bool = True,
projection_pushdown: bool = True,
simplify_expression: bool = True,
string_cache: bool = True,
no_optimization: bool = False,
) -> "pl.DataFrame":
"""
Fetch is like a collect operation, but it overwrites the number of rows read by every scan
operation. This is a utility that helps debug a query on a smaller number of rows.
Note that the fetch does not guarantee the final number of rows in the DataFrame.
Filter, join operations and a lower number of rows available in the scanned file influence
the final number of rows.
Parameters
----------
n_rows
Collect n_rows from the data sources.
type_coercion
Run type coercion optimization.
predicate_pushdown
Run predicate pushdown optimization.
projection_pushdown
Run projection pushdown optimization.
simplify_expression
Run simplify expressions optimization.
string_cache
Use a global string cache in this query.
This is needed if you want to join on categorical columns.
no_optimization
Turn off optimizations.
Returns
-------
DataFrame
"""
if no_optimization:
predicate_pushdown = False
projection_pushdown = False
ldf = self._ldf.optimization_toggle(
type_coercion,
predicate_pushdown,
projection_pushdown,
simplify_expression,
string_cache,
)
return pl.eager.frame.wrap_df(ldf.fetch(n_rows))
@property
def columns(self) -> tp.List[str]:
"""
Get or set column names.
Examples
--------
>>> df = (pl.DataFrame({
>>> "foo": [1, 2, 3],
>>> "bar": [6, 7, 8],
>>> "ham": ['a', 'b', 'c']
>>> }).lazy()
>>> .select(["foo", "bar"]))
>>> df.columns
["foo", "bar"]
"""
return self._ldf.columns()
def cache(
self,
) -> "LazyFrame":
"""
Cache the result once the execution of the physical plan hits this node.
"""
return wrap_ldf(self._ldf.cache())
def filter(self, predicate: "Expr") -> "LazyFrame":
"""
Filter the rows in the DataFrame based on a predicate expression.
Parameters
----------
predicate
Expression that evaluates to a boolean Series.
"""
if isinstance(predicate, str):
predicate = col(predicate)
return wrap_ldf(self._ldf.filter(predicate._pyexpr))
def select(
self, exprs: Union[str, "Expr", Sequence[str], Sequence["Expr"]]
) -> "LazyFrame":
"""
Select columns from this DataFrame.
Parameters
----------
exprs
Column or columns to select.
"""
exprs = _selection_to_pyexpr_list(exprs)
return wrap_ldf(self._ldf.select(exprs))
def groupby(
self,
by: Union[str, tp.List[str], "Expr", tp.List["Expr"]],
maintain_order: bool = False,
) -> "LazyGroupBy":
"""
Start a groupby operation.
Parameters
----------
by
Column(s) to group by.
maintain_order
Make sure that the order of the groups remain consistent. This is more expensive than a default groupby.
"""
new_by: tp.List[PyExpr]
if isinstance(by, list):
new_by = []
for e in by:
if isinstance(e, str):
e = col(e)
new_by.append(e._pyexpr)
elif isinstance(by, str):
new_by = [col(by)._pyexpr]
elif isinstance(by, Expr):
new_by = [by._pyexpr]
lgb = self._ldf.groupby(new_by, maintain_order)
return LazyGroupBy(lgb)
def join(
self,
ldf: "LazyFrame",
left_on: Optional[Union[str, "Expr", tp.List[str], tp.List["Expr"]]] = None,
right_on: Optional[Union[str, "Expr", tp.List[str], tp.List["Expr"]]] = None,
on: Optional[Union[str, "Expr", tp.List[str], tp.List["Expr"]]] = None,
how: str = "inner",
allow_parallel: bool = True,
force_parallel: bool = False,
) -> "LazyFrame":
"""
Add a join operation to the Logical Plan.
Parameters
----------
ldf
Lazy DataFrame to join with.
left_on
Join column of the left DataFrame.
right_on
Join column of the right DataFrame.
on
Join column of both DataFrames. If set, `left_on` and `right_on` should be None.
how
one of:
"inner"
"left"
"outer"
"asof",
"cross"
allow_parallel
Allow the physical plan to optionally evaluate the computation of both DataFrames up to the join in parallel.
force_parallel
Force the physical plan to evaluate the computation of both DataFrames up to the join in parallel.
# Asof joins
This is similar to a left-join except that we match on nearest key rather than equal keys.
The keys must be sorted to perform an asof join
"""
if how == "cross":
return wrap_ldf(
self._ldf.join(ldf._ldf, [], [], allow_parallel, force_parallel, how)
)
left_on_: Union[tp.List[str], tp.List[Expr], None]
if isinstance(left_on, (str, Expr)):
left_on_ = [left_on] # type: ignore[assignment]
else:
left_on_ = left_on
right_on_: Union[tp.List[str], tp.List[Expr], None]
if isinstance(right_on, (str, Expr)):
right_on_ = [right_on] # type: ignore[assignment]
else:
right_on_ = right_on
if isinstance(on, str):
left_on_ = [on]
right_on_ = [on]
elif isinstance(on, list):
left_on_ = on
right_on_ = on
if left_on_ is None or right_on_ is None:
raise ValueError("You should pass the column to join on as an argument.")
new_left_on = []
for column in left_on_:
if isinstance(column, str):
column = col(column)
new_left_on.append(column._pyexpr)
new_right_on = []
for column in right_on_:
if isinstance(column, str):
column = col(column)
new_right_on.append(column._pyexpr)
return wrap_ldf(
self._ldf.join(
ldf._ldf, new_left_on, new_right_on, allow_parallel, force_parallel, how
)
)
def with_columns(self, exprs: Union[tp.List["Expr"], "Expr"]) -> "LazyFrame":
"""
Add or overwrite multiple columns in a DataFrame.
Parameters
----------
exprs
List of Expressions that evaluate to columns.
"""
if isinstance(exprs, Expr):
return self.with_column(exprs)
pyexprs = []
for e in exprs:
if isinstance(e, Expr):
pyexprs.append(e._pyexpr)
elif isinstance(e, pl.Series):
pyexprs.append(lit(e)._pyexpr)
return wrap_ldf(self._ldf.with_columns(pyexprs))
def with_column(self, expr: "Expr") -> "LazyFrame":
"""
Add or overwrite column in a DataFrame.
Parameters
----------
expr
Expression that evaluates to column.
"""
return self.with_columns([expr])
def drop_columns(self, columns: tp.List[str]) -> "LazyFrame":
"""
Remove multiple columns from a DataFrame.
Parameters
----------
columns
List of column names.
"""
return wrap_ldf(self._ldf.drop_columns(columns))
def drop_column(self, column: str) -> "LazyFrame":
"""
Remove a column from the DataFrame.
Parameters
----------
column
Name of the column that should be removed.
"""
return self.drop_columns([column])
def with_column_renamed(self, existing_name: str, new_name: str) -> "LazyFrame":
"""
Rename a column in the DataFrame
"""
return wrap_ldf(self._ldf.with_column_renamed(existing_name, new_name))
def reverse(self) -> "LazyFrame":
"""
Reverse the DataFrame.
"""
return wrap_ldf(self._ldf.reverse())
def shift(self, periods: int) -> "LazyFrame":
"""
Shift the values by a given period and fill the parts that will be empty due to this operation
with `Nones`.
Parameters
----------
periods
Number of places to shift (may be negative).
"""
return wrap_ldf(self._ldf.shift(periods))
def shift_and_fill(
self, periods: int, fill_value: Union["Expr", int, str, float]
) -> "LazyFrame":
"""
Shift the values by a given period and fill the parts that will be empty due to this operation
with the result of the `fill_value` expression.
Parameters
----------
periods
Number of places to shift (may be negative).
fill_value
fill None values with the result of this expression.
"""
if not isinstance(fill_value, Expr):
fill_value = lit(fill_value)
return wrap_ldf(self._ldf.shift_and_fill(periods, fill_value._pyexpr))
def slice(self, offset: int, length: int) -> "LazyFrame":
"""
Slice the DataFrame.
Parameters
----------
offset
Start index.
length
Length of the slice.
"""
return wrap_ldf(self._ldf.slice(offset, length))
def limit(self, n: int) -> "LazyFrame":
"""
Limit the DataFrame to the first `n` rows. Note if you don't want the rows to be scanned,
use the `fetch` operation.
Parameters
----------
n
Number of rows.
"""
return self.slice(0, n)
def head(self, n: int) -> "LazyFrame":
"""
Get the first `n` rows of the DataFrame
Note if you don't want the rows to be scanned,
use the `fetch` operation.
Parameters
----------
n
Number of rows.
"""
return self.limit(n)
def tail(self, n: int) -> "LazyFrame":
"""
Get the last `n` rows of the DataFrame.
Parameters
----------
n
Number of rows.
"""
return wrap_ldf(self._ldf.tail(n))
def last(self) -> "LazyFrame":
"""
Get the last row of the DataFrame.
"""
return self.tail(1)
def first(self) -> "LazyFrame":
"""
Get the first row of the DataFrame.
"""
return self.slice(0, 1)
def fill_null(self, fill_value: Union[int, str, "Expr"]) -> "LazyFrame":
"""
Fill missing values
Parameters
----------
fill_value
Value to fill the missing values with
"""
if not isinstance(fill_value, Expr):
fill_value = lit(fill_value)
return wrap_ldf(self._ldf.fill_null(fill_value._pyexpr))
def fill_nan(self, fill_value: Union[int, str, "Expr"]) -> "LazyFrame":
"""
Fill floating point NaN values.
..warning::
NOTE that floating point NaN (No a Number) are not missing values!
to replace missing values, use `fill_null`.
Parameters
----------
fill_value
Value to fill the NaN values with
"""
if not isinstance(fill_value, Expr):
fill_value = lit(fill_value)
return wrap_ldf(self._ldf.fill_nan(fill_value._pyexpr))
def std(self) -> "LazyFrame":
"""
Aggregate the columns in the DataFrame to their standard deviation value.
"""
return wrap_ldf(self._ldf.std())
def var(self) -> "LazyFrame":
"""
Aggregate the columns in the DataFrame to their variance value.
"""
return wrap_ldf(self._ldf.var())
def max(self) -> "LazyFrame":
"""
Aggregate the columns in the DataFrame to their maximum value.
"""
return wrap_ldf(self._ldf.max())
def min(self) -> "LazyFrame":
"""
Aggregate the columns in the DataFrame to their minimum value.
"""
return wrap_ldf(self._ldf.min())
def sum(self) -> "LazyFrame":
"""
Aggregate the columns in the DataFrame to their sum value.
"""
return wrap_ldf(self._ldf.sum())
def mean(self) -> "LazyFrame":
"""
Aggregate the columns in the DataFrame to their mean value.
"""
return wrap_ldf(self._ldf.mean())
def median(self) -> "LazyFrame":
"""
Aggregate the columns in the DataFrame to their median value.
"""
return wrap_ldf(self._ldf.median())
def quantile(self, quantile: float) -> "LazyFrame":
"""
Aggregate the columns in the DataFrame to their quantile value.
"""
return wrap_ldf(self._ldf.quantile(quantile))
def explode(
self, columns: Union[str, tp.List[str], "Expr", tp.List["Expr"]]
) -> "LazyFrame":
"""
Explode lists to long format.
Examples
--------
>>> df = pl.DataFrame({
>>> "letters": ["c", "c", "a", "c", "a", "b"],
>>> "nrs": [[1, 2], [1, 3], [4, 3], [5, 5, 5], [6], [2, 1, 2]]
>>> })
>>> df
shape: (6, 2)
╭─────────┬────────────╮
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ list [i64] │
╞═════════╪════════════╡
│ "c" ┆ [1, 2] │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ "c" ┆ [1, 3] │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ "a" ┆ [4, 3] │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ "c" ┆ [5, 5, 5] │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ "a" ┆ [6] │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤
│ "b" ┆ [2, 1, 2] │
╰─────────┴────────────╯
>>> df.explode("nrs")
shape: (13, 2)
╭─────────┬─────╮
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ "c" ┆ 1 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 2 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 1 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 3 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ ... ┆ ... │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 5 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "a" ┆ 6 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "b" ┆ 2 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "b" ┆ 1 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "b" ┆ 2 │
╰─────────┴─────╯
"""
columns = _selection_to_pyexpr_list(columns)
return wrap_ldf(self._ldf.explode(columns))
def drop_duplicates(
self,
maintain_order: bool = False,
subset: Optional[Union[tp.List[str], str]] = None,
) -> "LazyFrame":
"""
Drop duplicate rows from this DataFrame.
Note that this fails if there is a column of type `List` in the DataFrame.
"""
if subset is not None and not isinstance(subset, list):
subset = [subset]
return wrap_ldf(self._ldf.drop_duplicates(maintain_order, subset))
def drop_nulls(
self, subset: Optional[Union[tp.List[str], str]] = None
) -> "LazyFrame":
"""
Drop rows with null values from this DataFrame.
"""
if subset is not None and not isinstance(subset, list):
subset = [subset]
return wrap_ldf(self._ldf.drop_nulls(subset))
def melt(
self, id_vars: Union[str, tp.List[str]], value_vars: Union[str, tp.List[str]]
) -> "LazyFrame":
"""
Unpivot DataFrame to long format.
Parameters
----------
id_vars
Columns to use as identifier variables.
value_vars
Values to use as identifier variables.
"""
if isinstance(value_vars, str):
value_vars = [value_vars]
if isinstance(id_vars, str):
id_vars = [id_vars]
return wrap_ldf(self._ldf.melt(id_vars, value_vars))
def map(
self,
f: Callable[["pl.DataFrame"], "pl.DataFrame"],
predicate_pushdown: bool = True,
projection_pushdown: bool = True,
no_optimizations: bool = False,
) -> "LazyFrame":
"""
Apply a custom function. It is important that the function returns a Polars DataFrame.
Parameters
----------
f
Lambda/ function to apply.
predicate_pushdown
Allow predicate pushdown optimization to pass this node.
projection_pushdown
Allow projection pushdown optimization to pass this node.
no_optimizations
Turn off all optimizations past this point.
"""
if not no_optimizations:
predicate_pushdown = False
projection_pushdown = False
return wrap_ldf(self._ldf.map(f, predicate_pushdown, projection_pushdown))
def interpolate(self) -> "LazyFrame":
"""
Interpolate intermediate values. The interpolation method is linear.
"""
return self.select(pl.col("*").interpolate()) # type: ignore
class LazyGroupBy:
"""
Created by `df.lazy().groupby("foo)"`
"""
def __init__(self, lgb: "PyLazyGroupBy"):
self.lgb = lgb
def agg(self, aggs: Union[tp.List["Expr"], "Expr"]) -> "LazyFrame":
"""
Describe the aggregation that need to be done on a group.
Parameters
----------
aggs
Single/ Multiple aggregation expression(s).
Examples
--------
>>> (pl.scan_csv("data.csv")
.groupby("groups")
.agg([
pl.col("name").n_unique().alias("unique_names"),
pl.max("values")
])
)
"""
aggs = _selection_to_pyexpr_list(aggs)
return wrap_ldf(self.lgb.agg(aggs))
def head(self, n: int = 5) -> "LazyFrame":
"""
Return first n rows of each group.
Parameters
----------
n
Number of values of the group to select
Examples
--------
>>> df = pl.DataFrame({
>>> "letters": ["c", "c", "a", "c", "a", "b"],
>>> "nrs": [1, 2, 3, 4, 5, 6]
>>> })
>>> df
shape: (6, 2)
╭─────────┬─────╮
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ "c" ┆ 1 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 2 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "a" ┆ 3 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 4 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "a" ┆ 5 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "b" ┆ 6 │
╰─────────┴─────╯
>>> (df.groupby("letters")
>>> .head(2)
>>> .sort("letters")
>>> )
shape: (5, 2)
╭─────────┬─────╮
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ "a" ┆ 3 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "a" ┆ 5 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "b" ┆ 6 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 1 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 2 │
╰─────────┴─────╯
"""
return wrap_ldf(self.lgb.head(n))
def tail(self, n: int = 5) -> "LazyFrame":
"""
Return last n rows of each group.
Parameters
----------
n
Number of values of the group to select
Examples
--------
>>> df = pl.DataFrame({
>>> "letters": ["c", "c", "a", "c", "a", "b"],
>>> "nrs": [1, 2, 3, 4, 5, 6]
>>> })
>>> df
shape: (6, 2)
╭─────────┬─────╮
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ "c" ┆ 1 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 2 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "a" ┆ 3 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 4 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "a" ┆ 5 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "b" ┆ 6 │
╰─────────┴─────╯
>>> (df.groupby("letters")
>>> .tail(2)
>>> .sort("letters")
>>> )
shape: (5, 2)
╭─────────┬─────╮
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ "a" ┆ 3 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "a" ┆ 5 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "b" ┆ 6 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 2 │
├╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌┤
│ "c" ┆ 4 │
╰─────────┴─────╯
"""
return wrap_ldf(self.lgb.tail(n))
def apply(self, f: Callable[["pl.DataFrame"], "pl.DataFrame"]) -> "LazyFrame":
"""
Apply a function over the groups as a new `DataFrame`. It is not recommended that you use
this as materializing the `DataFrame` is quite expensive.
Parameters
----------
f
Function to apply over the `DataFrame`.
"""
return wrap_ldf(self.lgb.apply(f))
|
the-stack_106_13681
|
#!/usr/local/bin/python
# Script to convert original .MOV video files to both .MP3 and .WAV audio files
import os
import subprocess
import moviepy.editor as mp
def main():
# Create wav files from videos (run once)
mov_to_wav()
# Create mp3 files from videos (run once)
# mov_to_mp3()
def mov_to_wav():
"""
Uses subprocess to run ffmpeg commands in the shell to convert original
.MOV files to .WAV audio files
"""
source_dir = "./SourceFiles/"
source_path = os.path.abspath(source_dir) + "/"
dest_path = os.path.abspath("./WAVFiles") + "/"
for filename in os.listdir(source_dir):
print(filename)
if filename == ".DS_Store":
continue
src_file = source_path + filename
dest_file = dest_path + filename[:-3] + "wav"
command = "ffmpeg -i {} -ab 160k -ac 2 -ar 44100 -vn {}".format(
src_file, dest_file)
subprocess.call(command, shell=True)
def mov_to_mp3():
"""
Uses moviepy library to convert original .MOV files to .MP3 audio files
"""
source_dir = "./SourceFiles/"
source_path = os.path.abspath(source_dir) + "/"
dest_path = os.path.abspath("./MP3Files") + "/"
for filename in os.listdir(source_dir):
print(filename)
if filename == ".DS_Store":
continue
clip = mp.VideoFileClip(source_path + filename)
clip.audio.write_audiofile(dest_path + filename[:-3] + "mp3")
if __name__ == '__main__':
main()
|
the-stack_106_13682
|
'''Multiple Testing and P-Value Correction
Author: Josef Perktold
License: BSD-3
'''
import numpy as np
from statsmodels.stats._knockoff import RegressionFDR
__all__ = ['fdrcorrection', 'fdrcorrection_twostage', 'local_fdr',
'multipletests', 'NullDistribution', 'RegressionFDR']
# ==============================================
#
# Part 1: Multiple Tests and P-Value Correction
#
# ==============================================
def _ecdf(x):
'''no frills empirical cdf used in fdrcorrection
'''
nobs = len(x)
return np.arange(1,nobs+1)/float(nobs)
multitest_methods_names = {'b': 'Bonferroni',
's': 'Sidak',
'h': 'Holm',
'hs': 'Holm-Sidak',
'sh': 'Simes-Hochberg',
'ho': 'Hommel',
'fdr_bh': 'FDR Benjamini-Hochberg',
'fdr_by': 'FDR Benjamini-Yekutieli',
'fdr_tsbh': 'FDR 2-stage Benjamini-Hochberg',
'fdr_tsbky': 'FDR 2-stage Benjamini-Krieger-Yekutieli',
'fdr_gbs': 'FDR adaptive Gavrilov-Benjamini-Sarkar'
}
_alias_list = [['b', 'bonf', 'bonferroni'],
['s', 'sidak'],
['h', 'holm'],
['hs', 'holm-sidak'],
['sh', 'simes-hochberg'],
['ho', 'hommel'],
['fdr_bh', 'fdr_i', 'fdr_p', 'fdri', 'fdrp'],
['fdr_by', 'fdr_n', 'fdr_c', 'fdrn', 'fdrcorr'],
['fdr_tsbh', 'fdr_2sbh'],
['fdr_tsbky', 'fdr_2sbky', 'fdr_twostage'],
['fdr_gbs']
]
multitest_alias = {}
for m in _alias_list:
multitest_alias[m[0]] = m[0]
for a in m[1:]:
multitest_alias[a] = m[0]
def multipletests(pvals, alpha=0.05, method='hs', is_sorted=False,
returnsorted=False):
"""
Test results and p-value correction for multiple tests
Parameters
----------
pvals : array_like, 1-d
uncorrected p-values. Must be 1-dimensional.
alpha : float
FWER, family-wise error rate, e.g. 0.1
method : str
Method used for testing and adjustment of pvalues. Can be either the
full name or initial letters. Available methods are:
- `bonferroni` : one-step correction
- `sidak` : one-step correction
- `holm-sidak` : step down method using Sidak adjustments
- `holm` : step-down method using Bonferroni adjustments
- `simes-hochberg` : step-up method (independent)
- `hommel` : closed method based on Simes tests (non-negative)
- `fdr_bh` : Benjamini/Hochberg (non-negative)
- `fdr_by` : Benjamini/Yekutieli (negative)
- `fdr_tsbh` : two stage fdr correction (non-negative)
- `fdr_tsbky` : two stage fdr correction (non-negative)
is_sorted : bool
If False (default), the p_values will be sorted, but the corrected
pvalues are in the original order. If True, then it assumed that the
pvalues are already sorted in ascending order.
returnsorted : bool
not tested, return sorted p-values instead of original sequence
Returns
-------
reject : ndarray, boolean
true for hypothesis that can be rejected for given alpha
pvals_corrected : ndarray
p-values corrected for multiple tests
alphacSidak : float
corrected alpha for Sidak method
alphacBonf : float
corrected alpha for Bonferroni method
Notes
-----
There may be API changes for this function in the future.
Except for 'fdr_twostage', the p-value correction is independent of the
alpha specified as argument. In these cases the corrected p-values
can also be compared with a different alpha. In the case of 'fdr_twostage',
the corrected p-values are specific to the given alpha, see
``fdrcorrection_twostage``.
The 'fdr_gbs' procedure is not verified against another package, p-values
are derived from scratch and are not derived in the reference. In Monte
Carlo experiments the method worked correctly and maintained the false
discovery rate.
All procedures that are included, control FWER or FDR in the independent
case, and most are robust in the positively correlated case.
`fdr_gbs`: high power, fdr control for independent case and only small
violation in positively correlated case
**Timing**:
Most of the time with large arrays is spent in `argsort`. When
we want to calculate the p-value for several methods, then it is more
efficient to presort the pvalues, and put the results back into the
original order outside of the function.
Method='hommel' is very slow for large arrays, since it requires the
evaluation of n partitions, where n is the number of p-values.
"""
import gc
pvals = np.asarray(pvals)
alphaf = alpha # Notation ?
if not is_sorted:
sortind = np.argsort(pvals)
pvals = np.take(pvals, sortind)
ntests = len(pvals)
alphacSidak = 1 - np.power((1. - alphaf), 1./ntests)
alphacBonf = alphaf / float(ntests)
if method.lower() in ['b', 'bonf', 'bonferroni']:
reject = pvals <= alphacBonf
pvals_corrected = pvals * float(ntests)
elif method.lower() in ['s', 'sidak']:
reject = pvals <= alphacSidak
pvals_corrected = 1 - np.power((1. - pvals), ntests)
elif method.lower() in ['hs', 'holm-sidak']:
alphacSidak_all = 1 - np.power((1. - alphaf),
1./np.arange(ntests, 0, -1))
notreject = pvals > alphacSidak_all
del alphacSidak_all
nr_index = np.nonzero(notreject)[0]
if nr_index.size == 0:
# nonreject is empty, all rejected
notrejectmin = len(pvals)
else:
notrejectmin = np.min(nr_index)
notreject[notrejectmin:] = True
reject = ~notreject
del notreject
# It's eqivalent to 1 - np.power((1. - pvals),
# np.arange(ntests, 0, -1))
# but prevents the issue of the floating point precision
pvals_corrected_raw = -np.expm1(np.arange(ntests, 0, -1)*np.log1p(-pvals))
pvals_corrected = np.maximum.accumulate(pvals_corrected_raw)
del pvals_corrected_raw
elif method.lower() in ['h', 'holm']:
notreject = pvals > alphaf / np.arange(ntests, 0, -1)
nr_index = np.nonzero(notreject)[0]
if nr_index.size == 0:
# nonreject is empty, all rejected
notrejectmin = len(pvals)
else:
notrejectmin = np.min(nr_index)
notreject[notrejectmin:] = True
reject = ~notreject
pvals_corrected_raw = pvals * np.arange(ntests, 0, -1)
pvals_corrected = np.maximum.accumulate(pvals_corrected_raw)
del pvals_corrected_raw
gc.collect()
elif method.lower() in ['sh', 'simes-hochberg']:
alphash = alphaf / np.arange(ntests, 0, -1)
reject = pvals <= alphash
rejind = np.nonzero(reject)
if rejind[0].size > 0:
rejectmax = np.max(np.nonzero(reject))
reject[:rejectmax] = True
pvals_corrected_raw = np.arange(ntests, 0, -1) * pvals
pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1]
del pvals_corrected_raw
elif method.lower() in ['ho', 'hommel']:
# we need a copy because we overwrite it in a loop
a = pvals.copy()
for m in range(ntests, 1, -1):
cim = np.min(m * pvals[-m:] / np.arange(1,m+1.))
a[-m:] = np.maximum(a[-m:], cim)
a[:-m] = np.maximum(a[:-m], np.minimum(m * pvals[:-m], cim))
pvals_corrected = a
reject = a <= alphaf
elif method.lower() in ['fdr_bh', 'fdr_i', 'fdr_p', 'fdri', 'fdrp']:
# delegate, call with sorted pvals
reject, pvals_corrected = fdrcorrection(pvals, alpha=alpha,
method='indep',
is_sorted=True)
elif method.lower() in ['fdr_by', 'fdr_n', 'fdr_c', 'fdrn', 'fdrcorr']:
# delegate, call with sorted pvals
reject, pvals_corrected = fdrcorrection(pvals, alpha=alpha,
method='n',
is_sorted=True)
elif method.lower() in ['fdr_tsbky', 'fdr_2sbky', 'fdr_twostage']:
# delegate, call with sorted pvals
reject, pvals_corrected = fdrcorrection_twostage(pvals, alpha=alpha,
method='bky',
is_sorted=True)[:2]
elif method.lower() in ['fdr_tsbh', 'fdr_2sbh']:
# delegate, call with sorted pvals
reject, pvals_corrected = fdrcorrection_twostage(pvals, alpha=alpha,
method='bh',
is_sorted=True)[:2]
elif method.lower() in ['fdr_gbs']:
#adaptive stepdown in Gavrilov, Benjamini, Sarkar, Annals of Statistics 2009
## notreject = pvals > alphaf / np.arange(ntests, 0, -1) #alphacSidak
## notrejectmin = np.min(np.nonzero(notreject))
## notreject[notrejectmin:] = True
## reject = ~notreject
ii = np.arange(1, ntests + 1)
q = (ntests + 1. - ii)/ii * pvals / (1. - pvals)
pvals_corrected_raw = np.maximum.accumulate(q) #up requirementd
pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1]
del pvals_corrected_raw
reject = pvals_corrected <= alpha
else:
raise ValueError('method not recognized')
if pvals_corrected is not None: #not necessary anymore
pvals_corrected[pvals_corrected>1] = 1
if is_sorted or returnsorted:
return reject, pvals_corrected, alphacSidak, alphacBonf
else:
pvals_corrected_ = np.empty_like(pvals_corrected)
pvals_corrected_[sortind] = pvals_corrected
del pvals_corrected
reject_ = np.empty_like(reject)
reject_[sortind] = reject
return reject_, pvals_corrected_, alphacSidak, alphacBonf
def fdrcorrection(pvals, alpha=0.05, method='indep', is_sorted=False):
'''
pvalue correction for false discovery rate.
This covers Benjamini/Hochberg for independent or positively correlated and
Benjamini/Yekutieli for general or negatively correlated tests.
Parameters
----------
pvals : array_like, 1d
Set of p-values of the individual tests.
alpha : float, optional
Family-wise error rate. Defaults to ``0.05``.
method : {'i', 'indep', 'p', 'poscorr', 'n', 'negcorr'}, optional
Which method to use for FDR correction.
``{'i', 'indep', 'p', 'poscorr'}`` all refer to ``fdr_bh``
(Benjamini/Hochberg for independent or positively
correlated tests). ``{'n', 'negcorr'}`` both refer to ``fdr_by``
(Benjamini/Yekutieli for general or negatively correlated tests).
Defaults to ``'indep'``.
is_sorted : bool, optional
If False (default), the p_values will be sorted, but the corrected
pvalues are in the original order. If True, then it assumed that the
pvalues are already sorted in ascending order.
Returns
-------
rejected : ndarray, bool
True if a hypothesis is rejected, False if not
pvalue-corrected : ndarray
pvalues adjusted for multiple hypothesis testing to limit FDR
Notes
-----
If there is prior information on the fraction of true hypothesis, then alpha
should be set to ``alpha * m/m_0`` where m is the number of tests,
given by the p-values, and m_0 is an estimate of the true hypothesis.
(see Benjamini, Krieger and Yekuteli)
The two-step method of Benjamini, Krieger and Yekutiel that estimates the number
of false hypotheses will be available (soon).
Both methods exposed via this function (Benjamini/Hochberg, Benjamini/Yekutieli)
are also available in the function ``multipletests``, as ``method="fdr_bh"`` and
``method="fdr_by"``, respectively.
See also
--------
multipletests
'''
pvals = np.asarray(pvals)
assert pvals.ndim == 1, "pvals must be 1-dimensional, that is of shape (n,)"
if not is_sorted:
pvals_sortind = np.argsort(pvals)
pvals_sorted = np.take(pvals, pvals_sortind)
else:
pvals_sorted = pvals # alias
if method in ['i', 'indep', 'p', 'poscorr']:
ecdffactor = _ecdf(pvals_sorted)
elif method in ['n', 'negcorr']:
cm = np.sum(1./np.arange(1, len(pvals_sorted)+1)) #corrected this
ecdffactor = _ecdf(pvals_sorted) / cm
## elif method in ['n', 'negcorr']:
## cm = np.sum(np.arange(len(pvals)))
## ecdffactor = ecdf(pvals_sorted)/cm
else:
raise ValueError('only indep and negcorr implemented')
reject = pvals_sorted <= ecdffactor*alpha
if reject.any():
rejectmax = max(np.nonzero(reject)[0])
reject[:rejectmax] = True
pvals_corrected_raw = pvals_sorted / ecdffactor
pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1]
del pvals_corrected_raw
pvals_corrected[pvals_corrected>1] = 1
if not is_sorted:
pvals_corrected_ = np.empty_like(pvals_corrected)
pvals_corrected_[pvals_sortind] = pvals_corrected
del pvals_corrected
reject_ = np.empty_like(reject)
reject_[pvals_sortind] = reject
return reject_, pvals_corrected_
else:
return reject, pvals_corrected
def fdrcorrection_twostage(pvals, alpha=0.05, method='bky', iter=False,
is_sorted=False):
'''(iterated) two stage linear step-up procedure with estimation of number of true
hypotheses
Benjamini, Krieger and Yekuteli, procedure in Definition 6
Parameters
----------
pvals : array_like
set of p-values of the individual tests.
alpha : float
error rate
method : {'bky', 'bh')
see Notes for details
* 'bky' - implements the procedure in Definition 6 of Benjamini, Krieger
and Yekuteli 2006
* 'bh' - the two stage method of Benjamini and Hochberg
iter : bool
Returns
-------
rejected : ndarray, bool
True if a hypothesis is rejected, False if not
pvalue-corrected : ndarray
pvalues adjusted for multiple hypotheses testing to limit FDR
m0 : int
ntest - rej, estimated number of true hypotheses
alpha_stages : list of floats
A list of alphas that have been used at each stage
Notes
-----
The returned corrected p-values are specific to the given alpha, they
cannot be used for a different alpha.
The returned corrected p-values are from the last stage of the fdr_bh
linear step-up procedure (fdrcorrection0 with method='indep') corrected
for the estimated fraction of true hypotheses.
This means that the rejection decision can be obtained with
``pval_corrected <= alpha``, where ``alpha`` is the original significance
level.
(Note: This has changed from earlier versions (<0.5.0) of statsmodels.)
BKY described several other multi-stage methods, which would be easy to implement.
However, in their simulation the simple two-stage method (with iter=False) was the
most robust to the presence of positive correlation
TODO: What should be returned?
'''
pvals = np.asarray(pvals)
if not is_sorted:
pvals_sortind = np.argsort(pvals)
pvals = np.take(pvals, pvals_sortind)
ntests = len(pvals)
if method == 'bky':
fact = (1.+alpha)
alpha_prime = alpha / fact
elif method == 'bh':
fact = 1.
alpha_prime = alpha
else:
raise ValueError("only 'bky' and 'bh' are available as method")
alpha_stages = [alpha_prime]
rej, pvalscorr = fdrcorrection(pvals, alpha=alpha_prime, method='indep',
is_sorted=True)
r1 = rej.sum()
if (r1 == 0) or (r1 == ntests):
return rej, pvalscorr * fact, ntests - r1, alpha_stages
ri_old = r1
while True:
ntests0 = 1.0 * ntests - ri_old
alpha_star = alpha_prime * ntests / ntests0
alpha_stages.append(alpha_star)
#print ntests0, alpha_star
rej, pvalscorr = fdrcorrection(pvals, alpha=alpha_star, method='indep',
is_sorted=True)
ri = rej.sum()
if (not iter) or ri == ri_old:
break
elif ri < ri_old:
# prevent cycles and endless loops
raise RuntimeError(" oops - should not be here")
ri_old = ri
# make adjustment to pvalscorr to reflect estimated number of Non-Null cases
# decision is then pvalscorr < alpha (or <=)
pvalscorr *= ntests0 * 1.0 / ntests
if method == 'bky':
pvalscorr *= (1. + alpha)
if not is_sorted:
pvalscorr_ = np.empty_like(pvalscorr)
pvalscorr_[pvals_sortind] = pvalscorr
del pvalscorr
reject = np.empty_like(rej)
reject[pvals_sortind] = rej
return reject, pvalscorr_, ntests - ri, alpha_stages
else:
return rej, pvalscorr, ntests - ri, alpha_stages
def local_fdr(zscores, null_proportion=1.0, null_pdf=None, deg=7,
nbins=30, alpha=0):
"""
Calculate local FDR values for a list of Z-scores.
Parameters
----------
zscores : array_like
A vector of Z-scores
null_proportion : float
The assumed proportion of true null hypotheses
null_pdf : function mapping reals to positive reals
The density of null Z-scores; if None, use standard normal
deg : int
The maximum exponent in the polynomial expansion of the
density of non-null Z-scores
nbins : int
The number of bins for estimating the marginal density
of Z-scores.
alpha : float
Use Poisson ridge regression with parameter alpha to estimate
the density of non-null Z-scores.
Returns
-------
fdr : array_like
A vector of FDR values
References
----------
B Efron (2008). Microarrays, Empirical Bayes, and the Two-Groups
Model. Statistical Science 23:1, 1-22.
Examples
--------
Basic use (the null Z-scores are taken to be standard normal):
>>> from statsmodels.stats.multitest import local_fdr
>>> import numpy as np
>>> zscores = np.random.randn(30)
>>> fdr = local_fdr(zscores)
Use a Gaussian null distribution estimated from the data:
>>> null = EmpiricalNull(zscores)
>>> fdr = local_fdr(zscores, null_pdf=null.pdf)
"""
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod.generalized_linear_model import families
from statsmodels.regression.linear_model import OLS
# Bins for Poisson modeling of the marginal Z-score density
minz = min(zscores)
maxz = max(zscores)
bins = np.linspace(minz, maxz, nbins)
# Bin counts
zhist = np.histogram(zscores, bins)[0]
# Bin centers
zbins = (bins[:-1] + bins[1:]) / 2
# The design matrix at bin centers
dmat = np.vander(zbins, deg + 1)
# Rescale the design matrix
sd = dmat.std(0)
ii = sd >1e-8
dmat[:, ii] /= sd[ii]
start = OLS(np.log(1 + zhist), dmat).fit().params
# Poisson regression
if alpha > 0:
md = GLM(zhist, dmat, family=families.Poisson()).fit_regularized(L1_wt=0, alpha=alpha, start_params=start)
else:
md = GLM(zhist, dmat, family=families.Poisson()).fit(start_params=start)
# The design matrix for all Z-scores
dmat_full = np.vander(zscores, deg + 1)
dmat_full[:, ii] /= sd[ii]
# The height of the estimated marginal density of Z-scores,
# evaluated at every observed Z-score.
fz = md.predict(dmat_full) / (len(zscores) * (bins[1] - bins[0]))
# The null density.
if null_pdf is None:
f0 = np.exp(-0.5 * zscores**2) / np.sqrt(2 * np.pi)
else:
f0 = null_pdf(zscores)
# The local FDR values
fdr = null_proportion * f0 / fz
fdr = np.clip(fdr, 0, 1)
return fdr
class NullDistribution(object):
"""
Estimate a Gaussian distribution for the null Z-scores.
The observed Z-scores consist of both null and non-null values.
The fitted distribution of null Z-scores is Gaussian, but may have
non-zero mean and/or non-unit scale.
Parameters
----------
zscores : array_like
The observed Z-scores.
null_lb : float
Z-scores between `null_lb` and `null_ub` are all considered to be
true null hypotheses.
null_ub : float
See `null_lb`.
estimate_mean : bool
If True, estimate the mean of the distribution. If False, the
mean is fixed at zero.
estimate_scale : bool
If True, estimate the scale of the distribution. If False, the
scale parameter is fixed at 1.
estimate_null_proportion : bool
If True, estimate the proportion of true null hypotheses (i.e.
the proportion of z-scores with expected value zero). If False,
this parameter is fixed at 1.
Attributes
----------
mean : float
The estimated mean of the empirical null distribution
sd : float
The estimated standard deviation of the empirical null distribution
null_proportion : float
The estimated proportion of true null hypotheses among all hypotheses
References
----------
B Efron (2008). Microarrays, Empirical Bayes, and the Two-Groups
Model. Statistical Science 23:1, 1-22.
Notes
-----
See also:
http://nipy.org/nipy/labs/enn.html#nipy.algorithms.statistics.empirical_pvalue.NormalEmpiricalNull.fdr
"""
def __init__(self, zscores, null_lb=-1, null_ub=1, estimate_mean=True,
estimate_scale=True, estimate_null_proportion=False):
# Extract the null z-scores
ii = np.flatnonzero((zscores >= null_lb) & (zscores <= null_ub))
if len(ii) == 0:
raise RuntimeError("No Z-scores fall between null_lb and null_ub")
zscores0 = zscores[ii]
# Number of Z-scores, and null Z-scores
n_zs, n_zs0 = len(zscores), len(zscores0)
# Unpack and transform the parameters to the natural scale, hold
# parameters fixed as specified.
def xform(params):
mean = 0.
sd = 1.
prob = 1.
ii = 0
if estimate_mean:
mean = params[ii]
ii += 1
if estimate_scale:
sd = np.exp(params[ii])
ii += 1
if estimate_null_proportion:
prob = 1 / (1 + np.exp(-params[ii]))
return mean, sd, prob
from scipy.stats.distributions import norm
def fun(params):
"""
Negative log-likelihood of z-scores.
The function has three arguments, packed into a vector:
mean : location parameter
logscale : log of the scale parameter
logitprop : logit of the proportion of true nulls
The implementation follows section 4 from Efron 2008.
"""
d, s, p = xform(params)
# Mass within the central region
central_mass = (norm.cdf((null_ub - d) / s) -
norm.cdf((null_lb - d) / s))
# Probability that a Z-score is null and is in the central region
cp = p * central_mass
# Binomial term
rval = n_zs0 * np.log(cp) + (n_zs - n_zs0) * np.log(1 - cp)
# Truncated Gaussian term for null Z-scores
zv = (zscores0 - d) / s
rval += np.sum(-zv**2 / 2) - n_zs0 * np.log(s)
rval -= n_zs0 * np.log(central_mass)
return -rval
# Estimate the parameters
from scipy.optimize import minimize
# starting values are mean = 0, scale = 1, p0 ~ 1
mz = minimize(fun, np.r_[0., 0, 3], method="Nelder-Mead")
mean, sd, prob = xform(mz['x'])
self.mean = mean
self.sd = sd
self.null_proportion = prob
# The fitted null density function
def pdf(self, zscores):
"""
Evaluates the fitted empirical null Z-score density.
Parameters
----------
zscores : scalar or array_like
The point or points at which the density is to be
evaluated.
Returns
-------
The empirical null Z-score density evaluated at the given
points.
"""
zval = (zscores - self.mean) / self.sd
return np.exp(-0.5*zval**2 - np.log(self.sd) - 0.5*np.log(2*np.pi))
|
the-stack_106_13687
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the MRUListEx Windows Registry plugin."""
from __future__ import unicode_literals
import unittest
from dfdatetime import filetime as dfdatetime_filetime
from dfwinreg import definitions as dfwinreg_definitions
from dfwinreg import fake as dfwinreg_fake
from plaso.formatters import winreg # pylint: disable=unused-import
from plaso.parsers.winreg_plugins import mrulistex
from tests.parsers.winreg_plugins import test_lib
class TestMRUListExStringWindowsRegistryPlugin(test_lib.RegistryPluginTestCase):
"""Tests for the string MRUListEx plugin."""
def _CreateTestKey(self, key_path, time_string):
"""Creates Registry keys and values for testing.
Args:
key_path (str): Windows Registry key path.
time_string (str): key last written date and time.
Returns:
dfwinreg.WinRegistryKey: a Windows Registry key.
"""
filetime = dfdatetime_filetime.Filetime()
filetime.CopyFromDateTimeString(time_string)
registry_key = dfwinreg_fake.FakeWinRegistryKey(
'MRUlist', key_path=key_path,
last_written_time=filetime.timestamp, offset=1456)
# The order is: 201
value_data = (
b'\x02\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\xff\xff\xff\xff')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'MRUListEx', data=value_data,
data_type=dfwinreg_definitions.REG_BINARY, offset=123)
registry_key.AddValue(registry_value)
value_data = 'Some random text here'.encode('utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'0', data=value_data, data_type=dfwinreg_definitions.REG_SZ,
offset=1892)
registry_key.AddValue(registry_value)
value_data = 'c:\\evil.exe\x00'.encode('utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'1', data=value_data, data_type=dfwinreg_definitions.REG_BINARY,
offset=612)
registry_key.AddValue(registry_value)
value_data = 'C:\\looks_legit.exe'.encode('utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'2', data=value_data, data_type=dfwinreg_definitions.REG_SZ,
offset=1001)
registry_key.AddValue(registry_value)
return registry_key
def testFilters(self):
"""Tests the FILTERS class attribute."""
plugin = mrulistex.MRUListExStringWindowsRegistryPlugin()
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Some Windows\\'
'InterestingApp\\MRUlist')
registry_key = dfwinreg_fake.FakeWinRegistryKey(
'MRUlist', key_path=key_path)
result = self._CheckFiltersOnKeyPath(plugin, registry_key)
self.assertFalse(result)
registry_value = dfwinreg_fake.FakeWinRegistryValue('MRUListEx')
registry_key.AddValue(registry_value)
registry_value = dfwinreg_fake.FakeWinRegistryValue('0')
registry_key.AddValue(registry_value)
result = self._CheckFiltersOnKeyPath(plugin, registry_key)
self.assertTrue(result)
self._AssertNotFiltersOnKeyPath(plugin, 'HKEY_LOCAL_MACHINE\\Bogus')
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\Shell\\BagMRU')
self._AssertNotFiltersOnKeyPath(plugin, key_path)
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Explorer\\ComDlg32\\OpenSavePidlMRU')
self._AssertNotFiltersOnKeyPath(plugin, key_path)
def testProcess(self):
"""Tests the Process function."""
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Some Windows\\'
'InterestingApp\\MRUlist')
time_string = '2012-08-28 09:23:49.002031'
registry_key = self._CreateTestKey(key_path, time_string)
plugin = mrulistex.MRUListExStringWindowsRegistryPlugin()
storage_writer = self._ParseKeyWithPlugin(registry_key, plugin)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 1)
events = list(storage_writer.GetEvents())
# A MRUListEx event.
event = events[0]
self.CheckTimestamp(event.timestamp, '2012-08-28 09:23:49.002031')
event_data = self._GetEventDataOfEvent(storage_writer, event)
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
self.assertEqual(event_data.parser, plugin.plugin_name)
self.assertEqual(event_data.data_type, 'windows:registry:mrulistex')
expected_message = (
'[{0:s}] '
'Index: 1 [MRU Value 2]: C:\\looks_legit.exe '
'Index: 2 [MRU Value 0]: Some random text here '
'Index: 3 [MRU Value 1]: c:\\evil.exe').format(key_path)
expected_short_message = '{0:s}...'.format(expected_message[:77])
self._TestGetMessageStrings(event, expected_message, expected_short_message)
class TestMRUListExShellItemListWindowsRegistryPlugin(
test_lib.RegistryPluginTestCase):
"""Tests for the shell item list MRUListEx plugin."""
def testFilters(self):
"""Tests the FILTERS class attribute."""
plugin = mrulistex.MRUListExShellItemListWindowsRegistryPlugin()
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Explorer\\ComDlg32\\OpenSavePidlMRU')
self._AssertFiltersOnKeyPath(plugin, key_path)
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Explorer\\StreamMRU')
self._AssertFiltersOnKeyPath(plugin, key_path)
self._AssertNotFiltersOnKeyPath(plugin, 'HKEY_LOCAL_MACHINE\\Bogus')
def testProcess(self):
"""Tests the Process function."""
test_file_entry = self._GetTestFileEntry(['NTUSER-WIN7.DAT'])
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Explorer\\ComDlg32\\OpenSavePidlMRU')
win_registry = self._GetWinRegistryFromFileEntry(test_file_entry)
registry_key = win_registry.GetKeyByPath(key_path)
plugin = mrulistex.MRUListExShellItemListWindowsRegistryPlugin()
storage_writer = self._ParseKeyWithPlugin(
registry_key, plugin, file_entry=test_file_entry)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 65)
events = list(storage_writer.GetEvents())
# A MRUListEx event.
event = events[40]
self.CheckTimestamp(event.timestamp, '2011-08-28 22:48:28.159309')
event_data = self._GetEventDataOfEvent(storage_writer, event)
self.assertEqual(event_data.pathspec, test_file_entry.path_spec)
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
self.assertEqual(event_data.parser, plugin.plugin_name)
self.assertEqual(event_data.data_type, 'windows:registry:mrulistex')
expected_message = (
'[{0:s}\\exe] '
'Index: 1 [MRU Value 1]: Shell item path: <My Computer> '
'P:\\Application Tools\\Firefox 6.0\\Firefox Setup 6.0.exe '
'Index: 2 [MRU Value 0]: Shell item path: <Computers and Devices> '
'<UNKNOWN: 0x00>\\\\controller\\WebDavShare\\Firefox Setup 3.6.12.exe'
'').format(key_path)
expected_short_message = '{0:s}...'.format(expected_message[:77])
self._TestGetMessageStrings(event, expected_message, expected_short_message)
# A shell item event.
event = events[0]
self.CheckTimestamp(event.timestamp, '2012-03-08 22:16:02.000000')
event_data = self._GetEventDataOfEvent(storage_writer, event)
self.assertEqual(event_data.data_type, 'windows:shell_item:file_entry')
expected_message = (
'Name: ALLOYR~1 '
'Long name: Alloy Research '
'NTFS file reference: 44518-33 '
'Shell item path: <Shared Documents Folder (Users Files)> '
'<UNKNOWN: 0x00>\\Alloy Research '
'Origin: {0:s}\\*').format(key_path)
expected_short_message = (
'Name: Alloy Research '
'NTFS file reference: 44518-33 '
'Origin: HKEY_CURRENT_USER\\...')
self._TestGetMessageStrings(event, expected_message, expected_short_message)
class TestMRUListExStringAndShellItemWindowsRegistryPlugin(
test_lib.RegistryPluginTestCase):
"""Tests for the string and shell item MRUListEx plugin."""
def testFilters(self):
"""Tests the FILTERS class attribute."""
plugin = mrulistex.MRUListExStringAndShellItemWindowsRegistryPlugin()
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Explorer\\RecentDocs')
self._AssertFiltersOnKeyPath(plugin, key_path)
self._AssertNotFiltersOnKeyPath(plugin, 'HKEY_LOCAL_MACHINE\\Bogus')
def testProcess(self):
"""Tests the Process function."""
test_file_entry = self._GetTestFileEntry(['NTUSER-WIN7.DAT'])
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Explorer\\RecentDocs')
win_registry = self._GetWinRegistryFromFileEntry(test_file_entry)
registry_key = win_registry.GetKeyByPath(key_path)
plugin = mrulistex.MRUListExStringAndShellItemWindowsRegistryPlugin()
storage_writer = self._ParseKeyWithPlugin(
registry_key, plugin, file_entry=test_file_entry)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 6)
events = list(storage_writer.GetEvents())
# A MRUListEx event.
event = events[0]
self.CheckTimestamp(event.timestamp, '2012-04-01 13:52:39.113742')
event_data = self._GetEventDataOfEvent(storage_writer, event)
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
self.assertEqual(event_data.parser, plugin.plugin_name)
self.assertEqual(event_data.data_type, 'windows:registry:mrulistex')
self.assertEqual(event_data.pathspec, test_file_entry.path_spec)
expected_message = (
'[{0:s}] '
'Index: 1 [MRU Value 17]: Path: The SHIELD, '
'Shell item: [The SHIELD.lnk] '
'Index: 2 [MRU Value 18]: '
'Path: captain_america_shield_by_almogrem-d48x9x8.jpg, '
'Shell item: [captain_america_shield_by_almogrem-d48x9x8.lnk] '
'Index: 3 [MRU Value 16]: Path: captain-america-shield-front.jpg, '
'Shell item: [captain-america-shield-front.lnk] '
'Index: 4 [MRU Value 12]: Path: Leadership, '
'Shell item: [Leadership.lnk] '
'Index: 5 [MRU Value 15]: Path: followership.pdf, '
'Shell item: [followership.lnk] '
'Index: 6 [MRU Value 14]: Path: leaderqualities.pdf, '
'Shell item: [leaderqualities.lnk] '
'Index: 7 [MRU Value 13]: Path: htlhtl.pdf, '
'Shell item: [htlhtl.lnk] '
'Index: 8 [MRU Value 8]: Path: StarFury, '
'Shell item: [StarFury (2).lnk] '
'Index: 9 [MRU Value 7]: Path: Earth_SA-26_Thunderbolt.jpg, '
'Shell item: [Earth_SA-26_Thunderbolt.lnk] '
'Index: 10 [MRU Value 11]: Path: 5031RR_BalancedLeadership.pdf, '
'Shell item: [5031RR_BalancedLeadership.lnk] '
'Index: 11 [MRU Value 10]: '
'Path: SA-23E Mitchell-Hyundyne Starfury.docx, '
'Shell item: [SA-23E Mitchell-Hyundyne Starfury.lnk] '
'Index: 12 [MRU Value 9]: Path: StarFury.docx, '
'Shell item: [StarFury (3).lnk] '
'Index: 13 [MRU Value 6]: Path: StarFury.zip, '
'Shell item: [StarFury.lnk] '
'Index: 14 [MRU Value 4]: Path: VIBRANIUM.docx, '
'Shell item: [VIBRANIUM.lnk] '
'Index: 15 [MRU Value 5]: Path: ADAMANTIUM-Background.docx, '
'Shell item: [ADAMANTIUM-Background.lnk] '
'Index: 16 [MRU Value 3]: Path: Pictures, '
'Shell item: [Pictures.lnk] '
'Index: 17 [MRU Value 2]: Path: nick_fury_77831.jpg, '
'Shell item: [nick_fury_77831.lnk] '
'Index: 18 [MRU Value 1]: Path: Downloads, '
'Shell item: [Downloads.lnk] '
'Index: 19 [MRU Value 0]: Path: wallpaper_medium.jpg, '
'Shell item: [wallpaper_medium.lnk]').format(key_path)
expected_short_message = '{0:s}...'.format(expected_message[:77])
self._TestGetMessageStrings(event, expected_message, expected_short_message)
class TestMRUListExStringAndShellItemListWindowsRegistryPlugin(
test_lib.RegistryPluginTestCase):
"""Tests for the string and shell item list MRUListEx plugin."""
def testFilters(self):
"""Tests the FILTERS class attribute."""
plugin = mrulistex.MRUListExStringAndShellItemListWindowsRegistryPlugin()
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Explorer\\ComDlg32\\LastVisitedPidlMRU')
self._AssertFiltersOnKeyPath(plugin, key_path)
self._AssertNotFiltersOnKeyPath(plugin, 'HKEY_LOCAL_MACHINE\\Bogus')
def testProcess(self):
"""Tests the Process function."""
test_file_entry = self._GetTestFileEntry(['NTUSER-WIN7.DAT'])
key_path = (
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\'
'Explorer\\ComDlg32\\LastVisitedPidlMRU')
win_registry = self._GetWinRegistryFromFileEntry(test_file_entry)
registry_key = win_registry.GetKeyByPath(key_path)
plugin = mrulistex.MRUListExStringAndShellItemListWindowsRegistryPlugin()
storage_writer = self._ParseKeyWithPlugin(
registry_key, plugin, file_entry=test_file_entry)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 31)
events = list(storage_writer.GetEvents())
# A MRUListEx event.
event = events[30]
self.CheckTimestamp(event.timestamp, '2012-04-01 13:52:38.966290')
event_data = self._GetEventDataOfEvent(storage_writer, event)
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
self.assertEqual(event_data.parser, plugin.plugin_name)
self.assertEqual(event_data.data_type, 'windows:registry:mrulistex')
self.assertEqual(event_data.pathspec, test_file_entry.path_spec)
expected_message = (
'[{0:s}] '
'Index: 1 [MRU Value 1]: Path: chrome.exe, '
'Shell item path: <Users Libraries> <UNKNOWN: 0x00> <UNKNOWN: 0x00> '
'<UNKNOWN: 0x00> '
'Index: 2 [MRU Value 7]: '
'Path: {{48E1ED6B-CF49-4609-B1C1-C082BFC3D0B4}}, '
'Shell item path: <Shared Documents Folder (Users Files)> '
'<UNKNOWN: 0x00>\\Alloy Research '
'Index: 3 [MRU Value 6]: '
'Path: {{427865A0-03AF-4F25-82EE-10B6CB1DED3E}}, '
'Shell item path: <Users Libraries> <UNKNOWN: 0x00> <UNKNOWN: 0x00> '
'Index: 4 [MRU Value 5]: '
'Path: {{24B5C9BB-48B5-47FF-8343-40481DBA1E2B}}, '
'Shell item path: <My Computer> C:\\Users\\nfury\\Documents '
'Index: 5 [MRU Value 4]: '
'Path: {{0B8CFE96-DB69-4D33-8E3C-36EAB4F709E0}}, '
'Shell item path: <My Computer> C:\\Users\\nfury\\Documents\\'
'Alloy Research '
'Index: 6 [MRU Value 3]: '
'Path: {{D4F85F66-003D-4127-BCE9-CAD7A57B2857}}, '
'Shell item path: <Users Libraries> <UNKNOWN: 0x00> <UNKNOWN: 0x00> '
'Index: 7 [MRU Value 0]: Path: iexplore.exe, '
'Shell item path: <My Computer> P:\\Application Tools\\Firefox 6.0 '
'Index: 8 [MRU Value 2]: Path: Skype.exe, '
'Shell item path: <Users Libraries> <UNKNOWN: 0x00>').format(key_path)
expected_short_message = '{0:s}...'.format(expected_message[:77])
self._TestGetMessageStrings(event, expected_message, expected_short_message)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_13689
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
r"""Use the pulse builder DSL to write pulse programs with an imperative syntax.
.. warning::
The pulse builder interface is still in active development. It may have
breaking API changes without deprecation warnings in future releases until
otherwise indicated.
To begin pulse programming we must first initialize our program builder
context with :func:`build`, after which we can begin adding program
statements. For example, below we write a simple program that :func:`play`\s
a pulse:
.. jupyter-execute::
from qiskit import execute, pulse
d0 = pulse.DriveChannel(0)
with pulse.build() as pulse_prog:
pulse.play(pulse.Constant(100, 1.0), d0)
pulse_prog.draw()
The builder initializes a :class:`pulse.Schedule`, ``pulse_prog``
and then begins to construct the program within the context. The output pulse
schedule will survive after the context is exited and can be executed like a
normal Qiskit schedule using ``qiskit.execute(pulse_prog, backend)``.
Pulse programming has a simple imperative style. This leaves the programmer
to worry about the raw experimental physics of pulse programming and not
constructing cumbersome data structures.
We can optionally pass a :class:`~qiskit.providers.BaseBackend` to
:func:`build` to enable enhanced functionality. Below, we prepare a Bell state
by automatically compiling the required pulses from their gate-level
representations, while simultaneously applying a long decoupling pulse to a
neighboring qubit. We terminate the experiment with a measurement to observe the
state we prepared. This program which mixes circuits and pulses will be
automatically lowered to be run as a pulse program:
.. jupyter-execute::
import math
from qiskit import pulse
from qiskit.test.mock import FakeOpenPulse3Q
# TODO: This example should use a real mock backend.
backend = FakeOpenPulse3Q()
d2 = pulse.DriveChannel(2)
with pulse.build(backend) as bell_prep:
pulse.u2(0, math.pi, 0)
pulse.cx(0, 1)
with pulse.build(backend) as decoupled_bell_prep_and_measure:
# We call our bell state preparation schedule constructed above.
with pulse.align_right():
pulse.call(bell_prep)
pulse.play(pulse.Constant(bell_prep.duration, 0.02), d2)
pulse.barrier(0, 1, 2)
registers = pulse.measure_all()
decoupled_bell_prep_and_measure.draw()
With the pulse builder we are able to blend programming on qubits and channels.
While the pulse schedule is based on instructions that operate on
channels, the pulse builder automatically handles the mapping from qubits to
channels for you.
In the example below we demonstrate some more features of the pulse builder:
.. jupyter-execute::
import math
from qiskit import pulse, QuantumCircuit
from qiskit.pulse import library
from qiskit.test.mock import FakeOpenPulse2Q
backend = FakeOpenPulse2Q()
with pulse.build(backend) as pulse_prog:
# Create a pulse.
gaussian_pulse = library.gaussian(10, 1.0, 2)
# Get the qubit's corresponding drive channel from the backend.
d0 = pulse.drive_channel(0)
d1 = pulse.drive_channel(1)
# Play a pulse at t=0.
pulse.play(gaussian_pulse, d0)
# Play another pulse directly after the previous pulse at t=10.
pulse.play(gaussian_pulse, d0)
# The default scheduling behavior is to schedule pulses in parallel
# across channels. For example, the statement below
# plays the same pulse on a different channel at t=0.
pulse.play(gaussian_pulse, d1)
# We also provide pulse scheduling alignment contexts.
# The default alignment context is align_left.
# The sequential context schedules pulse instructions sequentially in time.
# This context starts at t=10 due to earlier pulses above.
with pulse.align_sequential():
pulse.play(gaussian_pulse, d0)
# Play another pulse after at t=20.
pulse.play(gaussian_pulse, d1)
# We can also nest contexts as each instruction is
# contained in its local scheduling context.
# The output of a child context is a context-schedule
# with the internal instructions timing fixed relative to
# one another. This is schedule is then called in the parent context.
# Context starts at t=30.
with pulse.align_left():
# Start at t=30.
pulse.play(gaussian_pulse, d0)
# Start at t=30.
pulse.play(gaussian_pulse, d1)
# Context ends at t=40.
# Alignment context where all pulse instructions are
# aligned to the right, ie., as late as possible.
with pulse.align_right():
# Shift the phase of a pulse channel.
pulse.shift_phase(math.pi, d1)
# Starts at t=40.
pulse.delay(100, d0)
# Ends at t=140.
# Starts at t=130.
pulse.play(gaussian_pulse, d1)
# Ends at t=140.
# Acquire data for a qubit and store in a memory slot.
pulse.acquire(100, 0, pulse.MemorySlot(0))
# We also support a variety of macros for common operations.
# Measure all qubits.
pulse.measure_all()
# Delay on some qubits.
# This requires knowledge of which channels belong to which qubits.
# delay for 100 cycles on qubits 0 and 1.
pulse.delay_qubits(100, 0, 1)
# Call a quantum circuit. The pulse builder lazily constructs a quantum
# circuit which is then transpiled and scheduled before inserting into
# a pulse schedule.
# NOTE: Quantum register indices correspond to physical qubit indices.
qc = QuantumCircuit(2, 2)
qc.cx(0, 1)
pulse.call(qc)
# Calling a small set of standard gates and decomposing to pulses is
# also supported with more natural syntax.
pulse.u3(0, math.pi, 0, 0)
pulse.cx(0, 1)
# It is also be possible to call a preexisting schedule
tmp_sched = pulse.Schedule()
tmp_sched += pulse.Play(gaussian_pulse, d0)
pulse.call(tmp_sched)
# We also support:
# frequency instructions
pulse.set_frequency(5.0e9, d0)
# phase instructions
pulse.shift_phase(0.1, d0)
# offset contexts
with pulse.phase_offset(math.pi, d0):
pulse.play(gaussian_pulse, d0)
The above is just a small taste of what is possible with the builder. See the
rest of the module documentation for more information on its
capabilities.
"""
import collections
import contextvars
import functools
import itertools
import warnings
from contextlib import contextmanager
from typing import (
Any,
Callable,
ContextManager,
Dict,
Iterable,
List,
Mapping,
Optional,
Set,
Tuple,
TypeVar,
Union,
NewType,
)
import numpy as np
from qiskit import circuit
from qiskit.circuit.library import standard_gates as gates
from qiskit.circuit.parameterexpression import ParameterExpression, ParameterValueType
from qiskit.pulse import (
channels as chans,
configuration,
exceptions,
instructions,
macros,
library,
transforms,
utils,
)
from qiskit.pulse.instructions import directives
from qiskit.pulse.schedule import Schedule, ScheduleBlock
from qiskit.pulse.transforms.alignments import AlignmentKind
#: contextvars.ContextVar[BuilderContext]: active builder
BUILDER_CONTEXTVAR = contextvars.ContextVar("backend")
T = TypeVar("T") # pylint: disable=invalid-name
StorageLocation = NewType("StorageLocation", Union[chans.MemorySlot, chans.RegisterSlot])
def _compile_lazy_circuit_before(function: Callable[..., T]) -> Callable[..., T]:
"""Decorator thats schedules and calls the lazily compiled circuit before
executing the decorated builder method."""
@functools.wraps(function)
def wrapper(self, *args, **kwargs):
self._compile_lazy_circuit()
return function(self, *args, **kwargs)
return wrapper
def _requires_backend(function: Callable[..., T]) -> Callable[..., T]:
"""Decorator a function to raise if it is called without a builder with a
set backend.
"""
@functools.wraps(function)
def wrapper(self, *args, **kwargs):
if self.backend is None:
raise exceptions.BackendNotSet(
"This function requires the builder to " 'have a "backend" set.'
)
return function(self, *args, **kwargs)
return wrapper
class _PulseBuilder:
"""Builder context class."""
__alignment_kinds__ = {
"left": transforms.AlignLeft(),
"right": transforms.AlignRight(),
"sequential": transforms.AlignSequential(),
}
def __init__(
self,
backend=None,
block: Optional[ScheduleBlock] = None,
name: Optional[str] = None,
default_alignment: Union[str, AlignmentKind] = "left",
default_transpiler_settings: Mapping = None,
default_circuit_scheduler_settings: Mapping = None,
):
"""Initialize the builder context.
.. note::
At some point we may consider incorporating the builder into
the :class:`~qiskit.pulse.Schedule` class. However, the risk of
this is tying the user interface to the intermediate
representation. For now we avoid this at the cost of some code
duplication.
Args:
backend (Union[Backend, BaseBackend]): Input backend to use in
builder. If not set certain functionality will be unavailable.
block: Initital ``ScheduleBlock`` to build on.
name: Name of pulse program to be built.
default_alignment: Default scheduling alignment for builder.
One of ``left``, ``right``, ``sequential`` or an instance of
:class:`~qiskit.pulse.transforms.alignments.AlignmentKind` subclass.
default_transpiler_settings: Default settings for the transpiler.
default_circuit_scheduler_settings: Default settings for the
circuit to pulse scheduler.
Raises:
PulseError: When invalid ``default_alignment`` or `block` is specified.
"""
#: BaseBackend: Backend instance for context builder.
self._backend = backend
#: Union[None, ContextVar]: Token for this ``_PulseBuilder``'s ``ContextVar``.
self._backend_ctx_token = None
#: QuantumCircuit: Lazily constructed quantum circuit
self._lazy_circuit = None
#: Dict[str, Any]: Transpiler setting dictionary.
self._transpiler_settings = default_transpiler_settings or dict()
#: Dict[str, Any]: Scheduler setting dictionary.
self._circuit_scheduler_settings = default_circuit_scheduler_settings or dict()
#: List[ScheduleBlock]: Stack of context.
self._context_stack = []
#: str: Name of the output program
self._name = name
# Add root block if provided. Schedule will be built on top of this.
if block is not None:
if isinstance(block, ScheduleBlock):
root_block = block
elif isinstance(block, Schedule):
root_block = ScheduleBlock()
root_block.append(instructions.Call(subroutine=block))
else:
raise exceptions.PulseError(
f"Input `block` type {block.__class__.__name__} is "
"not a valid format. Specify a pulse program."
)
self._context_stack.append(root_block)
# Set default alignment context
alignment = _PulseBuilder.__alignment_kinds__.get(default_alignment, default_alignment)
if not isinstance(alignment, AlignmentKind):
raise exceptions.PulseError(
f"Given `default_alignment` {repr(default_alignment)} is "
"not a valid transformation. Set one of "
f'{", ".join(_PulseBuilder.__alignment_kinds__.keys())}, '
"or set an instance of `AlignmentKind` subclass."
)
self.push_context(alignment)
def __enter__(self) -> ScheduleBlock:
"""Enter this builder context and yield either the supplied schedule
or the schedule created for the user.
Returns:
The schedule that the builder will build on.
"""
self._backend_ctx_token = BUILDER_CONTEXTVAR.set(self)
output = self._context_stack[0]
output._name = self._name or output.name
return output
@_compile_lazy_circuit_before
def __exit__(self, exc_type, exc_val, exc_tb):
"""Exit the builder context and compile the built pulse program."""
self.compile()
BUILDER_CONTEXTVAR.reset(self._backend_ctx_token)
@property
def backend(self):
"""Returns the builder backend if set.
Returns:
Optional[Union[Backend, BaseBackend]]: The builder's backend.
"""
return self._backend
@_compile_lazy_circuit_before
def push_context(self, alignment: AlignmentKind):
"""Push new context to the stack."""
self._context_stack.append(ScheduleBlock(alignment_context=alignment))
@_compile_lazy_circuit_before
def pop_context(self) -> ScheduleBlock:
"""Pop the last context from the stack."""
if len(self._context_stack) == 1:
raise exceptions.PulseError("The root context cannot be popped out.")
return self._context_stack.pop()
def get_context(self) -> ScheduleBlock:
"""Get current context.
Notes:
New instruction can be added by `.append_block` or `.append_instruction` method.
Use above methods rather than directly accessing to the current context.
"""
return self._context_stack[-1]
@property
@_requires_backend
def num_qubits(self):
"""Get the number of qubits in the backend."""
return self.backend.configuration().n_qubits
@property
def transpiler_settings(self) -> Mapping:
"""The builder's transpiler settings."""
return self._transpiler_settings
@transpiler_settings.setter
@_compile_lazy_circuit_before
def transpiler_settings(self, settings: Mapping):
self._compile_lazy_circuit()
self._transpiler_settings = settings
@property
def circuit_scheduler_settings(self) -> Mapping:
"""The builder's circuit to pulse scheduler settings."""
return self._circuit_scheduler_settings
@circuit_scheduler_settings.setter
@_compile_lazy_circuit_before
def circuit_scheduler_settings(self, settings: Mapping):
self._compile_lazy_circuit()
self._circuit_scheduler_settings = settings
@_compile_lazy_circuit_before
def compile(self) -> ScheduleBlock:
"""Compile and output the built pulse program."""
# Not much happens because we currently compile as we build.
# This should be offloaded to a true compilation module
# once we define a more sophisticated IR.
while len(self._context_stack) > 1:
current = self.pop_context()
self.append_block(current)
return self._context_stack[0]
def _compile_lazy_circuit(self):
"""Call a context QuantumCircuit (lazy circuit) and append the output pulse schedule
to the builder's context schedule.
Note that the lazy circuit is not stored as a call instruction.
"""
if self._lazy_circuit:
lazy_circuit = self._lazy_circuit
# reset lazy circuit
self._lazy_circuit = self._new_circuit()
self.call_subroutine(subroutine=self._compile_circuit(lazy_circuit))
def _compile_circuit(self, circ) -> Schedule:
"""Take a QuantumCircuit and output the pulse schedule associated with the circuit."""
import qiskit.compiler as compiler # pylint: disable=cyclic-import
transpiled_circuit = compiler.transpile(circ, self.backend, **self.transpiler_settings)
sched = compiler.schedule(
transpiled_circuit, self.backend, **self.circuit_scheduler_settings
)
return sched
def _new_circuit(self):
"""Create a new circuit for lazy circuit scheduling."""
return circuit.QuantumCircuit(self.num_qubits)
@_compile_lazy_circuit_before
def append_instruction(self, instruction: instructions.Instruction):
"""Add an instruction to the builder's context schedule.
Args:
instruction: Instruction to append.
"""
self._context_stack[-1].append(instruction)
@_compile_lazy_circuit_before
def append_block(self, context_block: ScheduleBlock):
"""Add a :class:`ScheduleBlock` to the builder's context schedule.
Args:
context_block: ScheduleBlock to append to the current context block.
"""
# ignore empty context
if len(context_block) > 0:
self._context_stack[-1].append(context_block)
def call_subroutine(
self,
subroutine: Union[circuit.QuantumCircuit, Schedule, ScheduleBlock],
name: Optional[str] = None,
value_dict: Optional[Dict[ParameterExpression, ParameterValueType]] = None,
**kw_params: ParameterValueType,
):
"""Call a schedule or circuit defined outside of the current scope.
The ``subroutine`` is appended to the context schedule as a call instruction.
This logic just generates a convenient program representation in the compiler.
Thus this doesn't affect execution of inline subroutines.
See :class:`~pulse.instructions.Call` for more details.
Args:
subroutine: Target schedule or circuit to append to the current context.
name: Name of subroutine if defined.
value_dict: Parameter object and assigned value mapping. This is more precise way to
identify a parameter since mapping is managed with unique object id rather than
name. Especially there is any name collision in a parameter table.
kw_params: Parameter values to bind to the target subroutine
with string parameter names. If there are parameter name overlapping,
these parameters are updated with the same assigned value.
Raises:
PulseError:
- When specified parameter is not contained in the subroutine
- When input subroutine is not valid data format.
"""
if isinstance(subroutine, circuit.QuantumCircuit):
self._compile_lazy_circuit()
subroutine = self._compile_circuit(subroutine)
empty_subroutine = True
if isinstance(subroutine, Schedule):
if len(subroutine.instructions) > 0:
empty_subroutine = False
elif isinstance(subroutine, ScheduleBlock):
if len(subroutine.blocks) > 0:
empty_subroutine = False
else:
raise exceptions.PulseError(
f"Subroutine type {subroutine.__class__.__name__} is "
"not valid data format. Call QuantumCircuit, "
"Schedule, or ScheduleBlock."
)
if not empty_subroutine:
param_value_map = dict()
for param_name, assigned_value in kw_params.items():
param_objs = subroutine.get_parameters(param_name)
if len(param_objs) > 0:
for param_obj in param_objs:
param_value_map[param_obj] = assigned_value
else:
raise exceptions.PulseError(
f"Parameter {param_name} is not defined in the target subroutine. "
f'{", ".join(map(str, subroutine.parameters))} can be specified.'
)
if value_dict:
param_value_map.update(value_dict)
call_def = instructions.Call(subroutine, param_value_map, name)
self.append_instruction(call_def)
@_requires_backend
def call_gate(self, gate: circuit.Gate, qubits: Tuple[int, ...], lazy: bool = True):
"""Call the circuit ``gate`` in the pulse program.
The qubits are assumed to be defined on physical qubits.
If ``lazy == True`` this circuit will extend a lazily constructed
quantum circuit. When an operation occurs that breaks the underlying
circuit scheduling assumptions such as adding a pulse instruction or
changing the alignment context the circuit will be
transpiled and scheduled into pulses with the current active settings.
Args:
gate: Gate to call.
qubits: Qubits to call gate on.
lazy: If false the circuit will be transpiled and pulse scheduled
immediately. Otherwise, it will extend the active lazy circuit
as defined above.
"""
try:
iter(qubits)
except TypeError:
qubits = (qubits,)
if lazy:
self._call_gate(gate, qubits)
else:
self._compile_lazy_circuit()
self._call_gate(gate, qubits)
self._compile_lazy_circuit()
def _call_gate(self, gate, qargs):
if self._lazy_circuit is None:
self._lazy_circuit = self._new_circuit()
self._lazy_circuit.append(gate, qargs=qargs)
def build(
backend=None,
schedule: Optional[ScheduleBlock] = None,
name: Optional[str] = None,
default_alignment: Optional[Union[str, AlignmentKind]] = "left",
default_transpiler_settings: Optional[Dict[str, Any]] = None,
default_circuit_scheduler_settings: Optional[Dict[str, Any]] = None,
) -> ContextManager[ScheduleBlock]:
"""Create a context manager for launching the imperative pulse builder DSL.
To enter a building context and starting building a pulse program:
.. jupyter-execute::
from qiskit import execute, pulse
from qiskit.test.mock import FakeOpenPulse2Q
backend = FakeOpenPulse2Q()
d0 = pulse.DriveChannel(0)
with pulse.build() as pulse_prog:
pulse.play(pulse.Constant(100, 0.5), d0)
While the output program ``pulse_prog`` cannot be executed as we are using
a mock backend. If a real backend is being used, executing the program is
done with:
.. code-block:: python
qiskit.execute(pulse_prog, backend)
Args:
backend (Union[Backend, BaseBackend]): A Qiskit backend. If not supplied certain
builder functionality will be unavailable.
schedule: A pulse ``ScheduleBlock`` in which your pulse program will be built.
name: Name of pulse program to be built.
default_alignment: Default scheduling alignment for builder.
One of ``left``, ``right``, ``sequential`` or an alignment context.
default_transpiler_settings: Default settings for the transpiler.
default_circuit_scheduler_settings: Default settings for the
circuit to pulse scheduler.
Returns:
A new builder context which has the active builder initialized.
"""
return _PulseBuilder(
backend=backend,
block=schedule,
name=name,
default_alignment=default_alignment,
default_transpiler_settings=default_transpiler_settings,
default_circuit_scheduler_settings=default_circuit_scheduler_settings,
)
# Builder Utilities
def _active_builder() -> _PulseBuilder:
"""Get the active builder in the active context.
Returns:
The active active builder in this context.
Raises:
exceptions.NoActiveBuilder: If a pulse builder function is called
outside of a builder context.
"""
try:
return BUILDER_CONTEXTVAR.get()
except LookupError as ex:
raise exceptions.NoActiveBuilder(
"A Pulse builder function was called outside of "
"a builder context. Try calling within a builder "
'context, eg., "with pulse.build() as schedule: ...".'
) from ex
def active_backend():
"""Get the backend of the currently active builder context.
Returns:
Union[Backend, BaseBackend]: The active backend in the currently active
builder context.
Raises:
exceptions.BackendNotSet: If the builder does not have a backend set.
"""
builder = _active_builder().backend
if builder is None:
raise exceptions.BackendNotSet(
"This function requires the active builder to " 'have a "backend" set.'
)
return builder
def append_schedule(schedule: Union[Schedule, ScheduleBlock]):
"""Call a schedule by appending to the active builder's context block.
Args:
schedule: Schedule to append.
Raises:
PulseError: When input `schedule` is invalid data format.
"""
if isinstance(schedule, Schedule):
_active_builder().append_instruction(instructions.Call(subroutine=schedule))
elif isinstance(schedule, ScheduleBlock):
_active_builder().append_block(schedule)
else:
raise exceptions.PulseError(
f"Input program {schedule.__class__.__name__} is not "
"acceptable program format. Input `Schedule` or "
"`ScheduleBlock`."
)
def append_instruction(instruction: instructions.Instruction):
"""Append an instruction to the active builder's context schedule.
Examples:
.. jupyter-execute::
from qiskit import pulse
d0 = pulse.DriveChannel(0)
with pulse.build() as pulse_prog:
pulse.builder.append_instruction(pulse.Delay(10, d0))
print(pulse_prog.instructions)
"""
_active_builder().append_instruction(instruction)
def num_qubits() -> int:
"""Return number of qubits in the currently active backend.
Examples:
.. jupyter-execute::
from qiskit import pulse
from qiskit.test.mock import FakeOpenPulse2Q
backend = FakeOpenPulse2Q()
with pulse.build(backend):
print(pulse.num_qubits())
.. note:: Requires the active builder context to have a backend set.
"""
return active_backend().configuration().n_qubits
def seconds_to_samples(seconds: Union[float, np.ndarray]) -> Union[int, np.ndarray]:
"""Obtain the number of samples that will elapse in ``seconds`` on the
active backend.
Rounds down.
Args:
seconds: Time in seconds to convert to samples.
Returns:
The number of samples for the time to elapse
"""
if isinstance(seconds, np.ndarray):
return (seconds / active_backend().configuration().dt).astype(int)
return int(seconds / active_backend().configuration().dt)
def samples_to_seconds(samples: Union[int, np.ndarray]) -> Union[float, np.ndarray]:
"""Obtain the time in seconds that will elapse for the input number of
samples on the active backend.
Args:
samples: Number of samples to convert to time in seconds.
Returns:
The time that elapses in ``samples``.
"""
return samples * active_backend().configuration().dt
def qubit_channels(qubit: int) -> Set[chans.Channel]:
"""Returns the set of channels associated with a qubit.
Examples:
.. jupyter-execute::
from qiskit import pulse
from qiskit.test.mock import FakeOpenPulse2Q
backend = FakeOpenPulse2Q()
with pulse.build(backend):
print(pulse.qubit_channels(0))
.. note:: Requires the active builder context to have a backend set.
.. note:: A channel may still be associated with another qubit in this list
such as in the case where significant crosstalk exists.
"""
return set(active_backend().configuration().get_qubit_channels(qubit))
def _qubits_to_channels(*channels_or_qubits: Union[int, chans.Channel]) -> Set[chans.Channel]:
"""Returns the unique channels of the input qubits."""
channels = set()
for channel_or_qubit in channels_or_qubits:
if isinstance(channel_or_qubit, int):
channels |= qubit_channels(channel_or_qubit)
elif isinstance(channel_or_qubit, chans.Channel):
channels.add(channel_or_qubit)
else:
raise exceptions.PulseError(
'{} is not a "Channel" or ' "qubit (integer).".format(channel_or_qubit)
)
return channels
def active_transpiler_settings() -> Dict[str, Any]:
"""Return the current active builder context's transpiler settings.
Examples:
.. jupyter-execute::
from qiskit import pulse
from qiskit.test.mock import FakeOpenPulse2Q
backend = FakeOpenPulse2Q()
transpiler_settings = {'optimization_level': 3}
with pulse.build(backend,
default_transpiler_settings=transpiler_settings):
print(pulse.active_transpiler_settings())
"""
return dict(_active_builder().transpiler_settings)
def active_circuit_scheduler_settings() -> Dict[str, Any]: # pylint: disable=invalid-name
"""Return the current active builder context's circuit scheduler settings.
Examples:
.. jupyter-execute::
from qiskit import pulse
from qiskit.test.mock import FakeOpenPulse2Q
backend = FakeOpenPulse2Q()
circuit_scheduler_settings = {'method': 'alap'}
with pulse.build(
backend,
default_circuit_scheduler_settings=circuit_scheduler_settings):
print(pulse.active_circuit_scheduler_settings())
"""
return dict(_active_builder().circuit_scheduler_settings)
# Contexts
@contextmanager
def align_left() -> ContextManager[None]:
"""Left alignment pulse scheduling context.
Pulse instructions within this context are scheduled as early as possible
by shifting them left to the earliest available time.
Examples:
.. jupyter-execute::
from qiskit import pulse
d0 = pulse.DriveChannel(0)
d1 = pulse.DriveChannel(1)
with pulse.build() as pulse_prog:
with pulse.align_left():
# this pulse will start at t=0
pulse.play(pulse.Constant(100, 1.0), d0)
# this pulse will start at t=0
pulse.play(pulse.Constant(20, 1.0), d1)
pulse_prog = pulse.transforms.block_to_schedule(pulse_prog)
assert pulse_prog.ch_start_time(d0) == pulse_prog.ch_start_time(d1)
Yields:
None
"""
builder = _active_builder()
builder.push_context(transforms.AlignLeft())
try:
yield
finally:
current = builder.pop_context()
builder.append_block(current)
@contextmanager
def align_right() -> AlignmentKind:
"""Right alignment pulse scheduling context.
Pulse instructions within this context are scheduled as late as possible
by shifting them right to the latest available time.
Examples:
.. jupyter-execute::
from qiskit import pulse
d0 = pulse.DriveChannel(0)
d1 = pulse.DriveChannel(1)
with pulse.build() as pulse_prog:
with pulse.align_right():
# this pulse will start at t=0
pulse.play(pulse.Constant(100, 1.0), d0)
# this pulse will start at t=80
pulse.play(pulse.Constant(20, 1.0), d1)
pulse_prog = pulse.transforms.block_to_schedule(pulse_prog)
assert pulse_prog.ch_stop_time(d0) == pulse_prog.ch_stop_time(d1)
Yields:
None
"""
builder = _active_builder()
builder.push_context(transforms.AlignRight())
try:
yield
finally:
current = builder.pop_context()
builder.append_block(current)
@contextmanager
def align_sequential() -> AlignmentKind:
"""Sequential alignment pulse scheduling context.
Pulse instructions within this context are scheduled sequentially in time
such that no two instructions will be played at the same time.
Examples:
.. jupyter-execute::
from qiskit import pulse
d0 = pulse.DriveChannel(0)
d1 = pulse.DriveChannel(1)
with pulse.build() as pulse_prog:
with pulse.align_sequential():
# this pulse will start at t=0
pulse.play(pulse.Constant(100, 1.0), d0)
# this pulse will also start at t=100
pulse.play(pulse.Constant(20, 1.0), d1)
pulse_prog = pulse.transforms.block_to_schedule(pulse_prog)
assert pulse_prog.ch_stop_time(d0) == pulse_prog.ch_start_time(d1)
Yields:
None
"""
builder = _active_builder()
builder.push_context(transforms.AlignSequential())
try:
yield
finally:
current = builder.pop_context()
builder.append_block(current)
@contextmanager
def align_equispaced(duration: Union[int, ParameterExpression]) -> AlignmentKind:
"""Equispaced alignment pulse scheduling context.
Pulse instructions within this context are scheduled with the same interval spacing such that
the total length of the context block is ``duration``.
If the total free ``duration`` cannot be evenly divided by the number of instructions
within the context, the modulo is split and then prepended and appended to
the returned schedule. Delay instructions are automatically inserted in between pulses.
This context is convenient to write a schedule for periodical dynamic decoupling or
the Hahn echo sequence.
Examples:
.. jupyter-execute::
from qiskit import pulse
d0 = pulse.DriveChannel(0)
x90 = pulse.Gaussian(10, 0.1, 3)
x180 = pulse.Gaussian(10, 0.2, 3)
with pulse.build() as hahn_echo:
with pulse.align_equispaced(duration=100):
pulse.play(x90, d0)
pulse.play(x180, d0)
pulse.play(x90, d0)
hahn_echo.draw()
Args:
duration: Duration of this context. This should be larger than the schedule duration.
Yields:
None
Notes:
The scheduling is performed for sub-schedules within the context rather than
channel-wise. If you want to apply the equispaced context for each channel,
you should use the context independently for channels.
"""
builder = _active_builder()
builder.push_context(transforms.AlignEquispaced(duration=duration))
try:
yield
finally:
current = builder.pop_context()
builder.append_block(current)
@contextmanager
def align_func(
duration: Union[int, ParameterExpression], func: Callable[[int], float]
) -> AlignmentKind:
"""Callback defined alignment pulse scheduling context.
Pulse instructions within this context are scheduled at the location specified by
arbitrary callback function `position` that takes integer index and returns
the associated fractional location within [0, 1].
Delay instruction is automatically inserted in between pulses.
This context may be convenient to write a schedule of arbitrary dynamical decoupling
sequences such as Uhrig dynamical decoupling.
Examples:
.. jupyter-execute::
import numpy as np
from qiskit import pulse
d0 = pulse.DriveChannel(0)
x90 = pulse.Gaussian(10, 0.1, 3)
x180 = pulse.Gaussian(10, 0.2, 3)
def udd10_pos(j):
return np.sin(np.pi*j/(2*10 + 2))**2
with pulse.build() as udd_sched:
pulse.play(x90, d0)
with pulse.align_func(duration=300, func=udd10_pos):
for _ in range(10):
pulse.play(x180, d0)
pulse.play(x90, d0)
udd_sched.draw()
Args:
duration: Duration of context. This should be larger than the schedule duration.
func: A function that takes an index of sub-schedule and returns the
fractional coordinate of of that sub-schedule.
The returned value should be defined within [0, 1].
The pulse index starts from 1.
Yields:
None
Notes:
The scheduling is performed for sub-schedules within the context rather than
channel-wise. If you want to apply the numerical context for each channel,
you need to apply the context independently to channels.
"""
builder = _active_builder()
builder.push_context(transforms.AlignFunc(duration=duration, func=func))
try:
yield
finally:
current = builder.pop_context()
builder.append_block(current)
@contextmanager
def general_transforms(alignment_context: AlignmentKind) -> ContextManager[None]:
"""Arbitrary alignment transformation defined by a subclass instance of
:class:`~qiskit.pulse.transforms.alignments.AlignmentKind`.
Args:
alignment_context: Alignment context instance that defines schedule transformation.
Yields:
None
Raises:
PulseError: When input ``alignment_context`` is not ``AlignmentKind`` subclasses.
"""
if not isinstance(alignment_context, AlignmentKind):
raise exceptions.PulseError("Input alignment context is not `AlignmentKind` subclass.")
builder = _active_builder()
builder.push_context(alignment_context)
try:
yield
finally:
current = builder.pop_context()
builder.append_block(current)
@utils.deprecated_functionality
@contextmanager
def inline() -> ContextManager[None]:
"""Deprecated. Inline all instructions within this context into the parent context,
inheriting the scheduling policy of the parent context.
.. warning:: This will cause all scheduling directives within this context
to be ignored.
"""
def _flatten(block):
for inst in block.blocks:
if isinstance(inst, ScheduleBlock):
yield from _flatten(inst)
else:
yield inst
builder = _active_builder()
# set a placeholder
builder.push_context(transforms.AlignLeft())
try:
yield
finally:
placeholder = builder.pop_context()
for inst in _flatten(placeholder):
builder.append_instruction(inst)
@contextmanager
def pad(*chs: chans.Channel) -> ContextManager[None]: # pylint: disable=unused-argument
"""Deprecated. Pad all available timeslots with delays upon exiting context.
Args:
chs: Channels to pad with delays. Defaults to all channels in context
if none are supplied.
Yields:
None
"""
warnings.warn(
"Context-wise padding is being deprecated. Requested padding is being ignored. "
"Now the pulse builder generate a program in `ScheduleBlock` representation. "
"The padding with delay as a blocker is no longer necessary for this program. "
"However, if you still want delays, you can convert the output program "
"into `Schedule` representation by calling "
"`qiskit.pulse.transforms.target_qobj_transform`. Then, you can apply "
"`qiskit.pulse.transforms.pad` to the converted schedule. ",
DeprecationWarning,
)
try:
yield
finally:
pass
@contextmanager
def transpiler_settings(**settings) -> ContextManager[None]:
"""Set the currently active transpiler settings for this context.
Examples:
.. jupyter-execute::
from qiskit import pulse
from qiskit.test.mock import FakeOpenPulse2Q
backend = FakeOpenPulse2Q()
with pulse.build(backend):
print(pulse.active_transpiler_settings())
with pulse.transpiler_settings(optimization_level=3):
print(pulse.active_transpiler_settings())
"""
builder = _active_builder()
curr_transpiler_settings = builder.transpiler_settings
builder.transpiler_settings = collections.ChainMap(settings, curr_transpiler_settings)
try:
yield
finally:
builder.transpiler_settings = curr_transpiler_settings
@contextmanager
def circuit_scheduler_settings(**settings) -> ContextManager[None]:
"""Set the currently active circuit scheduler settings for this context.
Examples:
.. jupyter-execute::
from qiskit import pulse
from qiskit.test.mock import FakeOpenPulse2Q
backend = FakeOpenPulse2Q()
with pulse.build(backend):
print(pulse.active_circuit_scheduler_settings())
with pulse.circuit_scheduler_settings(method='alap'):
print(pulse.active_circuit_scheduler_settings())
"""
builder = _active_builder()
curr_circuit_scheduler_settings = builder.circuit_scheduler_settings
builder.circuit_scheduler_settings = collections.ChainMap(
settings, curr_circuit_scheduler_settings
)
try:
yield
finally:
builder.circuit_scheduler_settings = curr_circuit_scheduler_settings
@contextmanager
def phase_offset(phase: float, *channels: chans.PulseChannel) -> ContextManager[None]:
"""Shift the phase of input channels on entry into context and undo on exit.
Examples:
.. jupyter-execute::
import math
from qiskit import pulse
d0 = pulse.DriveChannel(0)
with pulse.build() as pulse_prog:
with pulse.phase_offset(math.pi, d0):
pulse.play(pulse.Constant(10, 1.0), d0)
assert len(pulse_prog.instructions) == 3
Args:
phase: Amount of phase offset in radians.
channels: Channels to offset phase of.
Yields:
None
"""
for channel in channels:
shift_phase(phase, channel)
try:
yield
finally:
for channel in channels:
shift_phase(-phase, channel)
@contextmanager
def frequency_offset(
frequency: float, *channels: chans.PulseChannel, compensate_phase: bool = False
) -> ContextManager[None]:
"""Shift the frequency of inputs channels on entry into context and undo on exit.
Examples:
.. code-block:: python
:emphasize-lines: 7, 16
from qiskit import pulse
d0 = pulse.DriveChannel(0)
with pulse.build(backend) as pulse_prog:
# shift frequency by 1GHz
with pulse.frequency_offset(1e9, d0):
pulse.play(pulse.Constant(10, 1.0), d0)
assert len(pulse_prog.instructions) == 3
with pulse.build(backend) as pulse_prog:
# Shift frequency by 1GHz.
# Undo accumulated phase in the shifted frequency frame
# when exiting the context.
with pulse.frequency_offset(1e9, d0, compensate_phase=True):
pulse.play(pulse.Constant(10, 1.0), d0)
assert len(pulse_prog.instructions) == 4
Args:
frequency: Amount of frequency offset in Hz.
channels: Channels to offset frequency of.
compensate_phase: Compensate for accumulated phase accumulated with
respect to the channels' frame at its initial frequency.
Yields:
None
"""
builder = _active_builder()
# TODO: Need proper implementation of compensation. t0 may depend on the parent context.
# For example, the instruction position within the equispaced context depends on
# the current total number of instructions, thus adding more instruction after
# offset context may change the t0 when the parent context is transformed.
t0 = builder.get_context().duration
for channel in channels:
shift_frequency(frequency, channel)
try:
yield
finally:
if compensate_phase:
duration = builder.get_context().duration - t0
dt = active_backend().configuration().dt
accumulated_phase = 2 * np.pi * ((duration * dt * frequency) % 1)
for channel in channels:
shift_phase(-accumulated_phase, channel)
for channel in channels:
shift_frequency(-frequency, channel)
# Channels
def drive_channel(qubit: int) -> chans.DriveChannel:
"""Return ``DriveChannel`` for ``qubit`` on the active builder backend.
Examples:
.. jupyter-execute::
from qiskit import pulse
from qiskit.test.mock import FakeOpenPulse2Q
backend = FakeOpenPulse2Q()
with pulse.build(backend):
assert pulse.drive_channel(0) == pulse.DriveChannel(0)
.. note:: Requires the active builder context to have a backend set.
"""
return active_backend().configuration().drive(qubit)
def measure_channel(qubit: int) -> chans.MeasureChannel:
"""Return ``MeasureChannel`` for ``qubit`` on the active builder backend.
Examples:
.. jupyter-execute::
from qiskit import pulse
from qiskit.test.mock import FakeOpenPulse2Q
backend = FakeOpenPulse2Q()
with pulse.build(backend):
assert pulse.measure_channel(0) == pulse.MeasureChannel(0)
.. note:: Requires the active builder context to have a backend set.
"""
return active_backend().configuration().measure(qubit)
def acquire_channel(qubit: int) -> chans.AcquireChannel:
"""Return ``AcquireChannel`` for ``qubit`` on the active builder backend.
Examples:
.. jupyter-execute::
from qiskit import pulse
from qiskit.test.mock import FakeOpenPulse2Q
backend = FakeOpenPulse2Q()
with pulse.build(backend):
assert pulse.acquire_channel(0) == pulse.AcquireChannel(0)
.. note:: Requires the active builder context to have a backend set.
"""
return active_backend().configuration().acquire(qubit)
def control_channels(*qubits: Iterable[int]) -> List[chans.ControlChannel]:
"""Return ``AcquireChannel`` for ``qubit`` on the active builder backend.
Return the secondary drive channel for the given qubit -- typically
utilized for controlling multi-qubit interactions.
Examples:
.. jupyter-execute::
from qiskit import pulse
from qiskit.test.mock import FakeOpenPulse2Q
backend = FakeOpenPulse2Q()
with pulse.build(backend):
assert pulse.control_channels(0, 1) == [pulse.ControlChannel(0)]
.. note:: Requires the active builder context to have a backend set.
Args:
qubits: Tuple or list of ordered qubits of the form
`(control_qubit, target_qubit)`.
Returns:
List of control channels associated with the supplied ordered list
of qubits.
"""
return active_backend().configuration().control(qubits=qubits)
# Base Instructions
def delay(duration: int, channel: chans.Channel, name: Optional[str] = None):
"""Delay on a ``channel`` for a ``duration``.
Examples:
.. jupyter-execute::
from qiskit import pulse
d0 = pulse.DriveChannel(0)
with pulse.build() as pulse_prog:
pulse.delay(10, d0)
Args:
duration: Number of cycles to delay for on ``channel``.
channel: Channel to delay on.
name: Name of the instruction.
"""
append_instruction(instructions.Delay(duration, channel, name=name))
def play(
pulse: Union[library.Pulse, np.ndarray], channel: chans.PulseChannel, name: Optional[str] = None
):
"""Play a ``pulse`` on a ``channel``.
Examples:
.. jupyter-execute::
from qiskit import pulse
d0 = pulse.DriveChannel(0)
with pulse.build() as pulse_prog:
pulse.play(pulse.Constant(10, 1.0), d0)
Args:
pulse: Pulse to play.
channel: Channel to play pulse on.
name: Name of the pulse.
"""
if not isinstance(pulse, library.Pulse):
pulse = library.Waveform(pulse)
append_instruction(instructions.Play(pulse, channel, name=name))
def acquire(
duration: int,
qubit_or_channel: Union[int, chans.AcquireChannel],
register: StorageLocation,
**metadata: Union[configuration.Kernel, configuration.Discriminator],
):
"""Acquire for a ``duration`` on a ``channel`` and store the result
in a ``register``.
Examples:
.. jupyter-execute::
from qiskit import pulse
d0 = pulse.MeasureChannel(0)
mem0 = pulse.MemorySlot(0)
with pulse.build() as pulse_prog:
pulse.acquire(100, d0, mem0)
# measurement metadata
kernel = pulse.configuration.Kernel('linear_discriminator')
pulse.acquire(100, d0, mem0, kernel=kernel)
.. note:: The type of data acquire will depend on the execution
``meas_level``.
Args:
duration: Duration to acquire data for
qubit_or_channel: Either the qubit to acquire data for or the specific
:class:`~qiskit.pulse.channels.AcquireChannel` to acquire on.
register: Location to store measured result.
metadata: Additional metadata for measurement. See
:class:`~qiskit.pulse.instructions.Acquire` for more information.
Raises:
exceptions.PulseError: If the register type is not supported.
"""
if isinstance(qubit_or_channel, int):
qubit_or_channel = chans.AcquireChannel(qubit_or_channel)
if isinstance(register, chans.MemorySlot):
append_instruction(
instructions.Acquire(duration, qubit_or_channel, mem_slot=register, **metadata)
)
elif isinstance(register, chans.RegisterSlot):
append_instruction(
instructions.Acquire(duration, qubit_or_channel, reg_slot=register, **metadata)
)
else:
raise exceptions.PulseError(
'Register of type: "{}" is not supported'.format(type(register))
)
def set_frequency(frequency: float, channel: chans.PulseChannel, name: Optional[str] = None):
"""Set the ``frequency`` of a pulse ``channel``.
Examples:
.. jupyter-execute::
from qiskit import pulse
d0 = pulse.DriveChannel(0)
with pulse.build() as pulse_prog:
pulse.set_frequency(1e9, d0)
Args:
frequency: Frequency in Hz to set channel to.
channel: Channel to set frequency of.
name: Name of the instruction.
"""
append_instruction(instructions.SetFrequency(frequency, channel, name=name))
def shift_frequency(frequency: float, channel: chans.PulseChannel, name: Optional[str] = None):
"""Shift the ``frequency`` of a pulse ``channel``.
Examples:
.. code-block:: python
:emphasize-lines: 6
from qiskit import pulse
d0 = pulse.DriveChannel(0)
with pulse.build() as pulse_prog:
pulse.shift_frequency(1e9, d0)
Args:
frequency: Frequency in Hz to shift channel frequency by.
channel: Channel to shift frequency of.
name: Name of the instruction.
"""
append_instruction(instructions.ShiftFrequency(frequency, channel, name=name))
def set_phase(phase: float, channel: chans.PulseChannel, name: Optional[str] = None):
"""Set the ``phase`` of a pulse ``channel``.
Examples:
.. code-block:: python
:emphasize-lines: 8
import math
from qiskit import pulse
d0 = pulse.DriveChannel(0)
with pulse.build() as pulse_prog:
pulse.set_phase(math.pi, d0)
Args:
phase: Phase in radians to set channel carrier signal to.
channel: Channel to set phase of.
name: Name of the instruction.
"""
append_instruction(instructions.SetPhase(phase, channel, name=name))
def shift_phase(phase: float, channel: chans.PulseChannel, name: Optional[str] = None):
"""Shift the ``phase`` of a pulse ``channel``.
Examples:
.. jupyter-execute::
import math
from qiskit import pulse
d0 = pulse.DriveChannel(0)
with pulse.build() as pulse_prog:
pulse.shift_phase(math.pi, d0)
Args:
phase: Phase in radians to shift channel carrier signal by.
channel: Channel to shift phase of.
name: Name of the instruction.
"""
append_instruction(instructions.ShiftPhase(phase, channel, name))
def snapshot(label: str, snapshot_type: str = "statevector"):
"""Simulator snapshot.
Examples:
.. jupyter-execute::
from qiskit import pulse
with pulse.build() as pulse_prog:
pulse.snapshot('first', 'statevector')
Args:
label: Label for snapshot.
snapshot_type: Type of snapshot.
"""
append_instruction(instructions.Snapshot(label, snapshot_type=snapshot_type))
def call_schedule(schedule: Schedule):
"""Call a pulse ``schedule`` in the builder context.
Examples:
.. jupyter-execute::
from qiskit import pulse
from qiskit.pulse import builder
d0 = pulse.DriveChannel(0)
sched = pulse.Schedule()
sched += pulse.Play(pulse.Constant(10, 1.0), d0)
with pulse.build() as pulse_prog:
builder.call_schedule(sched)
assert pulse_prog == sched
Args:
Schedule to call.
"""
warnings.warn(
"``call_schedule`` is being deprecated. "
"``call`` function can take both a schedule and a circuit.",
DeprecationWarning,
)
call(schedule)
def call_circuit(circ: circuit.QuantumCircuit):
"""Call a quantum ``circuit`` within the active builder context.
.. note::
Calling gates directly within the pulse builder namespace will be
deprecated in the future in favor of tight integration with a circuit
builder interface which is under development.
Examples:
.. jupyter-execute::
from qiskit import circuit, pulse, schedule, transpile
from qiskit.pulse import builder
from qiskit.test.mock import FakeOpenPulse2Q
backend = FakeOpenPulse2Q()
d0 = pulse.DriveChannel(0)
qc = circuit.QuantumCircuit(2)
qc.cx(0, 1)
qc_transpiled = transpile(qc, optimization_level=3)
sched = schedule(qc_transpiled, backend)
with pulse.build(backend) as pulse_prog:
# with default settings
builder.call_circuit(qc)
with pulse.build(backend) as pulse_prog:
with pulse.transpiler_settings(optimization_level=3):
builder.call_circuit(qc)
assert pulse_prog == sched
.. note:: Requires the active builder context to have a backend set.
Args:
Circuit to call.
"""
warnings.warn(
"``call_circuit`` is being deprecated. "
"``call`` function can take both a schedule and a circuit.",
DeprecationWarning,
)
call(circ)
def call(
target: Union[circuit.QuantumCircuit, Schedule, ScheduleBlock],
name: Optional[str] = None,
value_dict: Optional[Dict[ParameterValueType, ParameterValueType]] = None,
**kw_params: ParameterValueType,
):
"""Call the ``target`` within the currently active builder context with arbitrary
parameters which will be assigned to the target program.
.. note::
The ``target`` program is inserted as a ``Call`` instruction.
This instruction defines a subroutine. See :class:`~qiskit.pulse.instructions.Call`
for more details.
Examples:
.. code-block:: python
from qiskit import circuit, pulse, schedule, transpile
from qiskit.test.mock import FakeOpenPulse2Q
backend = FakeOpenPulse2Q()
qc = circuit.QuantumCircuit(2)
qc.cx(0, 1)
qc_transpiled = transpile(qc, optimization_level=3)
sched = schedule(qc_transpiled, backend)
with pulse.build(backend) as pulse_prog:
pulse.call(sched)
pulse.call(qc)
This function can optionally take parameter dictionary with the parameterized target program.
.. code-block:: python
from qiskit import circuit, pulse
amp = circuit.Parameter('amp')
with pulse.build() as subroutine:
pulse.play(pulse.Gaussian(160, amp, 40), pulse.DriveChannel(0))
with pulse.build() as main_prog:
pulse.call(subroutine, amp=0.1)
pulse.call(subroutine, amp=0.3)
If there is any parameter name collision, you can distinguish them by specifying
each parameter object as a python dictionary. Otherwise ``amp1`` and ``amp2`` will be
updated with the same value.
.. code-block:: python
from qiskit import circuit, pulse
amp1 = circuit.Parameter('amp')
amp2 = circuit.Parameter('amp')
with pulse.build() as subroutine:
pulse.play(pulse.Gaussian(160, amp1, 40), pulse.DriveChannel(0))
pulse.play(pulse.Gaussian(160, amp2, 40), pulse.DriveChannel(1))
with pulse.build() as main_prog:
pulse.call(subroutine, value_dict={amp1: 0.1, amp2: 0.2})
Args:
target: Target circuit or pulse schedule to call.
name: Name of subroutine if defined.
value_dict: Parameter object and assigned value mapping. This is more precise way to
identify a parameter since mapping is managed with unique object id rather than
name. Especially there is any name collision in a parameter table.
kw_params: Parameter values to bind to the target subroutine
with string parameter names. If there are parameter name overlapping,
these parameters are updated with the same assigned value.
Raises:
exceptions.PulseError: If the input ``target`` type is not supported.
"""
if not isinstance(target, (circuit.QuantumCircuit, Schedule, ScheduleBlock)):
raise exceptions.PulseError(
f'Target of type "{target.__class__.__name__}" is not supported.'
)
_active_builder().call_subroutine(target, name, value_dict, **kw_params)
# Directives
def barrier(*channels_or_qubits: Union[chans.Channel, int], name: Optional[str] = None):
"""Barrier directive for a set of channels and qubits.
This directive prevents the compiler from moving instructions across
the barrier. Consider the case where we want to enforce that one pulse
happens after another on separate channels, this can be done with:
.. jupyter-kernel:: python3
:id: barrier
.. jupyter-execute::
from qiskit import pulse
from qiskit.test.mock import FakeOpenPulse2Q
backend = FakeOpenPulse2Q()
d0 = pulse.DriveChannel(0)
d1 = pulse.DriveChannel(1)
with pulse.build(backend) as barrier_pulse_prog:
pulse.play(pulse.Constant(10, 1.0), d0)
pulse.barrier(d0, d1)
pulse.play(pulse.Constant(10, 1.0), d1)
Of course this could have been accomplished with:
.. jupyter-execute::
from qiskit.pulse import transforms
with pulse.build(backend) as aligned_pulse_prog:
with pulse.align_sequential():
pulse.play(pulse.Constant(10, 1.0), d0)
pulse.play(pulse.Constant(10, 1.0), d1)
barrier_pulse_prog = transforms.target_qobj_transform(barrier_pulse_prog)
aligned_pulse_prog = transforms.target_qobj_transform(aligned_pulse_prog)
assert barrier_pulse_prog == aligned_pulse_prog
The barrier allows the pulse compiler to take care of more advanced
scheduling alignment operations across channels. For example
in the case where we are calling an outside circuit or schedule and
want to align a pulse at the end of one call:
.. jupyter-execute::
import math
d0 = pulse.DriveChannel(0)
with pulse.build(backend) as pulse_prog:
with pulse.align_right():
pulse.x(1)
# Barrier qubit 1 and d0.
pulse.barrier(1, d0)
# Due to barrier this will play before the gate on qubit 1.
pulse.play(pulse.Constant(10, 1.0), d0)
# This will end at the same time as the pulse above due to
# the barrier.
pulse.x(1)
.. note:: Requires the active builder context to have a backend set if
qubits are barriered on.
Args:
channels_or_qubits: Channels or qubits to barrier.
name: Name for the barrier
"""
channels = _qubits_to_channels(*channels_or_qubits)
if len(channels) > 1:
append_instruction(directives.RelativeBarrier(*channels, name=name))
# Macros
def macro(func: Callable):
"""Wrap a Python function and activate the parent builder context at calling time.
This enables embedding Python functions as builder macros. This generates a new
:class:`pulse.Schedule` that is embedded in the parent builder context with
every call of the decorated macro function. The decorated macro function will
behave as if the function code was embedded inline in the parent builder context
after parameter substitution.
Examples:
.. jupyter-execute::
from qiskit import pulse
@pulse.macro
def measure(qubit: int):
pulse.play(pulse.GaussianSquare(16384, 256, 15872), pulse.measure_channel(qubit))
mem_slot = pulse.MemorySlot(qubit)
pulse.acquire(16384, pulse.acquire_channel(qubit), mem_slot)
return mem_slot
with pulse.build(backend=backend) as sched:
mem_slot = measure(0)
print(f"Qubit measured into {mem_slot}")
sched.draw()
Args:
func: The Python function to enable as a builder macro. There are no
requirements on the signature of the function, any calls to pulse
builder methods will be added to builder context the wrapped function
is called from.
Returns:
Callable: The wrapped ``func``.
"""
func_name = getattr(func, "__name__", repr(func))
@functools.wraps(func)
def wrapper(*args, **kwargs):
_builder = _active_builder()
# activate the pulse builder before calling the function
with build(backend=_builder.backend, name=func_name) as built:
output = func(*args, **kwargs)
_builder.call_subroutine(built)
return output
return wrapper
def measure(
qubits: Union[List[int], int],
registers: Union[List[StorageLocation], StorageLocation] = None,
) -> Union[List[StorageLocation], StorageLocation]:
"""Measure a qubit within the currently active builder context.
At the pulse level a measurement is composed of both a stimulus pulse and
an acquisition instruction which tells the systems measurement unit to
acquire data and process it. We provide this measurement macro to automate
the process for you, but if desired full control is still available with
:func:`acquire` and :func:`play`.
To use the measurement it is as simple as specifying the qubit you wish to
measure:
.. jupyter-kernel:: python3
:id: measure
.. jupyter-execute::
from qiskit import pulse
from qiskit.test.mock import FakeOpenPulse2Q
backend = FakeOpenPulse2Q()
qubit = 0
with pulse.build(backend) as pulse_prog:
# Do something to the qubit.
qubit_drive_chan = pulse.drive_channel(0)
pulse.play(pulse.Constant(100, 1.0), qubit_drive_chan)
# Measure the qubit.
reg = pulse.measure(qubit)
For now it is not possible to do much with the handle to ``reg`` but in the
future we will support using this handle to a result register to build
up ones program. It is also possible to supply this register:
.. jupyter-execute::
with pulse.build(backend) as pulse_prog:
pulse.play(pulse.Constant(100, 1.0), qubit_drive_chan)
# Measure the qubit.
mem0 = pulse.MemorySlot(0)
reg = pulse.measure(qubit, mem0)
assert reg == mem0
.. note:: Requires the active builder context to have a backend set.
Args:
qubits: Physical qubit to measure.
registers: Register to store result in. If not selected the current
behavior is to return the :class:`MemorySlot` with the same
index as ``qubit``. This register will be returned.
Returns:
The ``register`` the qubit measurement result will be stored in.
"""
backend = active_backend()
try:
qubits = list(qubits)
except TypeError:
qubits = [qubits]
if registers is None:
registers = [chans.MemorySlot(qubit) for qubit in qubits]
else:
try:
registers = list(registers)
except TypeError:
registers = [registers]
measure_sched = macros.measure(
qubits=qubits,
inst_map=backend.defaults().instruction_schedule_map,
meas_map=backend.configuration().meas_map,
qubit_mem_slots={qubit: register.index for qubit, register in zip(qubits, registers)},
)
# note this is not a subroutine.
# just a macro to automate combination of stimulus and acquisition.
_active_builder().call_subroutine(measure_sched)
if len(qubits) == 1:
return registers[0]
else:
return registers
def measure_all() -> List[chans.MemorySlot]:
r"""Measure all qubits within the currently active builder context.
A simple macro function to measure all of the qubits in the device at the
same time. This is useful for handling device ``meas_map`` and single
measurement constraints.
Examples:
.. jupyter-execute::
from qiskit import pulse
from qiskit.test.mock import FakeOpenPulse2Q
backend = FakeOpenPulse2Q()
with pulse.build(backend) as pulse_prog:
# Measure all qubits and return associated registers.
regs = pulse.measure_all()
.. note::
Requires the active builder context to have a backend set.
Returns:
The ``register``\s the qubit measurement results will be stored in.
"""
backend = active_backend()
qubits = range(num_qubits())
registers = [chans.MemorySlot(qubit) for qubit in qubits]
measure_sched = macros.measure(
qubits=qubits,
inst_map=backend.defaults().instruction_schedule_map,
meas_map=backend.configuration().meas_map,
qubit_mem_slots={qubit: qubit for qubit in qubits},
)
# note this is not a subroutine.
# just a macro to automate combination of stimulus and acquisition.
_active_builder().call_subroutine(measure_sched)
return registers
def delay_qubits(duration: int, *qubits: Union[int, Iterable[int]]):
r"""Insert delays on all of the :class:`channels.Channel`\s that correspond
to the input ``qubits`` at the same time.
Examples:
.. jupyter-execute::
from qiskit import pulse
from qiskit.test.mock import FakeOpenPulse3Q
backend = FakeOpenPulse3Q()
with pulse.build(backend) as pulse_prog:
# Delay for 100 cycles on qubits 0, 1 and 2.
regs = pulse.delay_qubits(100, 0, 1, 2)
.. note:: Requires the active builder context to have a backend set.
Args:
duration: Duration to delay for.
qubits: Physical qubits to delay on. Delays will be inserted based on
the channels returned by :func:`pulse.qubit_channels`.
"""
qubit_chans = set(itertools.chain.from_iterable(qubit_channels(qubit) for qubit in qubits))
with align_left(): # pylint: disable=not-context-manager
for chan in qubit_chans:
delay(duration, chan)
# Gate instructions
def call_gate(gate: circuit.Gate, qubits: Tuple[int, ...], lazy: bool = True):
"""Call a gate and lazily schedule it to its corresponding
pulse instruction.
.. note::
Calling gates directly within the pulse builder namespace will be
deprecated in the future in favor of tight integration with a circuit
builder interface which is under development.
.. jupyter-kernel:: python3
:id: call_gate
Examples:
.. jupyter-execute::
from qiskit import pulse
from qiskit.pulse import builder
from qiskit.circuit.library import standard_gates as gates
from qiskit.test.mock import FakeOpenPulse2Q
backend = FakeOpenPulse2Q()
with pulse.build(backend) as pulse_prog:
builder.call_gate(gates.CXGate(), (0, 1))
We can see the role of the transpiler in scheduling gates by optimizing
away two consecutive CNOT gates:
.. jupyter-execute::
with pulse.build(backend) as pulse_prog:
with pulse.transpiler_settings(optimization_level=3):
builder.call_gate(gates.CXGate(), (0, 1))
builder.call_gate(gates.CXGate(), (0, 1))
assert pulse_prog == pulse.Schedule()
.. note:: If multiple gates are called in a row they may be optimized by
the transpiler, depending on the
:func:`pulse.active_transpiler_settings``.
.. note:: Requires the active builder context to have a backend set.
Args:
gate: Circuit gate instance to call.
qubits: Qubits to call gate on.
lazy: If ``false`` the gate will be compiled immediately, otherwise
it will be added onto a lazily evaluated quantum circuit to be
compiled when the builder is forced to by a circuit assumption
being broken, such as the inclusion of a pulse instruction or
new alignment context.
"""
_active_builder().call_gate(gate, qubits, lazy=lazy)
def cx(control: int, target: int): # pylint: disable=invalid-name
"""Call a :class:`~qiskit.circuit.library.standard_gates.CXGate` on the
input physical qubits.
.. note::
Calling gates directly within the pulse builder namespace will be
deprecated in the future in favor of tight integration with a circuit
builder interface which is under development.
Examples:
.. jupyter-execute::
from qiskit import pulse
from qiskit.test.mock import FakeOpenPulse2Q
backend = FakeOpenPulse2Q()
with pulse.build(backend) as pulse_prog:
pulse.cx(0, 1)
"""
call_gate(gates.CXGate(), (control, target))
def u1(theta: float, qubit: int): # pylint: disable=invalid-name
"""Call a :class:`~qiskit.circuit.library.standard_gates.U1Gate` on the
input physical qubit.
.. note::
Calling gates directly within the pulse builder namespace will be
deprecated in the future in favor of tight integration with a circuit
builder interface which is under development.
Examples:
.. jupyter-execute::
import math
from qiskit import pulse
from qiskit.test.mock import FakeOpenPulse2Q
backend = FakeOpenPulse2Q()
with pulse.build(backend) as pulse_prog:
pulse.u1(math.pi, 1)
"""
call_gate(gates.U1Gate(theta), qubit)
def u2(phi: float, lam: float, qubit: int): # pylint: disable=invalid-name
"""Call a :class:`~qiskit.circuit.library.standard_gates.U2Gate` on the
input physical qubit.
.. note::
Calling gates directly within the pulse builder namespace will be
deprecated in the future in favor of tight integration with a circuit
builder interface which is under development.
Examples:
.. jupyter-execute::
import math
from qiskit import pulse
from qiskit.test.mock import FakeOpenPulse2Q
backend = FakeOpenPulse2Q()
with pulse.build(backend) as pulse_prog:
pulse.u2(0, math.pi, 1)
"""
call_gate(gates.U2Gate(phi, lam), qubit)
def u3(theta: float, phi: float, lam: float, qubit: int): # pylint: disable=invalid-name
"""Call a :class:`~qiskit.circuit.library.standard_gates.U3Gate` on the
input physical qubit.
.. note::
Calling gates directly within the pulse builder namespace will be
deprecated in the future in favor of tight integration with a circuit
builder interface which is under development.
Examples:
.. jupyter-execute::
import math
from qiskit import pulse
from qiskit.test.mock import FakeOpenPulse2Q
backend = FakeOpenPulse2Q()
with pulse.build(backend) as pulse_prog:
pulse.u3(math.pi, 0, math.pi, 1)
"""
call_gate(gates.U3Gate(theta, phi, lam), qubit)
def x(qubit: int):
"""Call a :class:`~qiskit.circuit.library.standard_gates.XGate` on the
input physical qubit.
.. note::
Calling gates directly within the pulse builder namespace will be
deprecated in the future in favor of tight integration with a circuit
builder interface which is under development.
Examples:
.. jupyter-execute::
from qiskit import pulse
from qiskit.test.mock import FakeOpenPulse2Q
backend = FakeOpenPulse2Q()
with pulse.build(backend) as pulse_prog:
pulse.x(0)
"""
call_gate(gates.XGate(), qubit)
|
the-stack_106_13690
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests deprecation warnings in a few special cases."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class DeprecationTest(test.TestCase):
@test.mock.patch.object(logging, "warning", autospec=True)
def testDeprecatedFunction(self, mock_warning):
self.assertEqual(0, mock_warning.call_count)
tf.compat.v1.initializers.tables_initializer()
self.assertEqual(0, mock_warning.call_count)
tf.tables_initializer()
self.assertEqual(1, mock_warning.call_count)
self.assertRegexpMatches(
mock_warning.call_args[0][1],
"deprecation_test.py:")
self.assertRegexpMatches(
mock_warning.call_args[0][2], r"tables_initializer")
self.assertRegexpMatches(
mock_warning.call_args[0][3],
r"compat.v1.tables_initializer")
tf.tables_initializer()
self.assertEqual(1, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
def testDeprecatedClass(self, mock_warning):
value = np.array([1, 2, 3])
row_splits = np.array([1])
self.assertEqual(0, mock_warning.call_count)
tf.compat.v1.ragged.RaggedTensorValue(value, row_splits)
self.assertEqual(0, mock_warning.call_count)
tf.ragged.RaggedTensorValue(value, row_splits)
self.assertEqual(1, mock_warning.call_count)
self.assertRegexpMatches(
mock_warning.call_args[0][1],
"deprecation_test.py:")
self.assertRegexpMatches(
mock_warning.call_args[0][2], r"ragged.RaggedTensorValue")
self.assertRegexpMatches(
mock_warning.call_args[0][3],
r"compat.v1.ragged.RaggedTensorValue")
tf.ragged.RaggedTensorValue(value, row_splits)
self.assertEqual(1, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
def testDeprecatedFunctionEndpoint(self, mock_warning):
array = tf.IndexedSlices(
tf.compat.v1.convert_to_tensor(np.array([1, 2])),
tf.compat.v1.convert_to_tensor(np.array([0, 2])))
mask_indices = tf.compat.v1.convert_to_tensor(np.array([2]))
self.assertEqual(0, mock_warning.call_count)
tf.sparse.mask(array, mask_indices)
self.assertEqual(0, mock_warning.call_count)
tf.sparse_mask(array, mask_indices)
self.assertEqual(1, mock_warning.call_count)
self.assertRegexpMatches(
mock_warning.call_args[0][1],
"deprecation_test.py:")
self.assertRegexpMatches(
mock_warning.call_args[0][2], r"sparse_mask")
self.assertRegexpMatches(
mock_warning.call_args[0][3],
"sparse.mask")
tf.sparse_mask(array, mask_indices)
self.assertEqual(1, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
def testDeprecatedClassEndpoint(self, mock_warning):
self.assertEqual(0, mock_warning.call_count)
tf.io.VarLenFeature(tf.dtypes.int32)
self.assertEqual(0, mock_warning.call_count)
tf.VarLenFeature(tf.dtypes.int32)
self.assertEqual(1, mock_warning.call_count)
self.assertRegexpMatches(
mock_warning.call_args[0][1],
"deprecation_test.py:")
self.assertRegexpMatches(
mock_warning.call_args[0][2], r"VarLenFeature")
self.assertRegexpMatches(
mock_warning.call_args[0][3],
r"io.VarLenFeature")
tf.VarLenFeature(tf.dtypes.int32)
self.assertEqual(1, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
def testDeprecatedConstantEndpoint(self, mock_warning):
self.assertEqual(0, mock_warning.call_count)
tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY # pylint: disable=pointless-statement
self.assertEqual(0, mock_warning.call_count)
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY # pylint: disable=pointless-statement
self.assertEqual(1, mock_warning.call_count)
self.assertRegexpMatches(
mock_warning.call_args[0][1],
"deprecation_test.py:")
self.assertRegexpMatches(
mock_warning.call_args[0][2],
r"saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY")
self.assertRegexpMatches(
mock_warning.call_args[0][3],
r"saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY")
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY # pylint: disable=pointless-statement
self.assertEqual(1, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
def testKerasDeprecationNoWarning(self, mock_warning):
self.assertEqual(0, mock_warning.call_count)
tf.keras.layers.GRUCell(20)
self.assertLessEqual(mock_warning.call_count, 1)
if mock_warning.call_count == 1:
# The only message printed should be due to referencing init op.
self.assertRegexpMatches(
mock_warning.call_args[0][-1],
"Call initializer instance with the dtype argument instead of "
"passing it to the constructor")
@test.mock.patch.object(logging, "warning", autospec=True)
def testKerasDeprecation(self, mock_warning):
self.assertEqual(0, mock_warning.call_count)
tf.keras.backend.get_session()
self.assertEqual(1, mock_warning.call_count)
self.assertRegexpMatches(
mock_warning.call_args[0][-1],
"tf.compat.v1.keras.backend.get_session")
tf.keras.backend.get_session()
self.assertEqual(1, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
def testKerasEndpointDeprecation(self, mock_warning):
self.assertEqual(0, mock_warning.call_count)
tf.keras.metrics.cosine_proximity([0.5], [0.5])
self.assertEqual(1, mock_warning.call_count)
self.assertRegexpMatches(
mock_warning.call_args[0][-1],
"tf.keras.losses.cosine_similarity")
tf.keras.metrics.cosine_proximity([0.5], [0.5])
self.assertEqual(1, mock_warning.call_count)
@test.mock.patch.object(logging, "warning", autospec=True)
def testEstimatorDeprecation(self, mock_warning):
if "KMeans" in tf.estimator.experimental.__dict__:
self.assertEqual(0, mock_warning.call_count)
tf.estimator.experimental.KMeans(2)
self.assertEqual(2, mock_warning.call_count)
# First message is not a deprecation warning.
self.assertRegexpMatches(
mock_warning.call_args_list[1][0][0],
"Using temporary folder as model directory:")
# Second message is a deprecation warning.
self.assertRegexpMatches(
mock_warning.call_args_list[0][0][-1],
"tf.compat.v1.estimator.experimental.KMeans")
if __name__ == "__main__":
test.main()
|
the-stack_106_13693
|
# -*- coding: utf-8 -*-
# pylint: disable=C0103, R0913, R0902, C0326, R0903, W1401, too-many-lines
# disable snake_case warning, too many arguments, too many attributes,
# one space before assignment, too few public methods, anomalous backslash
# in string
"""Copyright 2015 Roger R Labbe Jr.
FilterPy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from numpy import dot
from filterpy.common import pretty_str
class GHFilterOrder(object):
""" A g-h filter of aspecified order 0, 1, or 2.
Strictly speaking, the g-h filter is order 1, and the 2nd order
filter is called the g-h-k filter. I'm not aware of any filter name
that encompasses orders 0, 1, and 2 under one name, or I would use it.
Parameters
----------
x0 : 1D np.array or scalar
Initial value for the filter state. Each value can be a scalar
or a np.array.
You can use a scalar for x0. If order > 0, then 0.0 is assumed
for the higher order terms.
x[0] is the value being tracked
x[1] is the first derivative (for order 1 and 2 filters)
x[2] is the second derivative (for order 2 filters)
dt : scalar
timestep
order : int
order of the filter. Defines the order of the system
0 - assumes system of form x = a_0 + a_1*t
1 - assumes system of form x = a_0 +a_1*t + a_2*t^2
2 - assumes system of form x = a_0 +a_1*t + a_2*t^2 + a_3*t^3
g : float
filter g gain parameter.
h : float, optional
filter h gain parameter, order 1 and 2 only
k : float, optional
filter k gain parameter, order 2 only
Atrributes
-------
x : np.array
State of the filter.
x[0] is the value being tracked
x[1] is the derivative of x[0] (order 1 and 2 only)
x[2] is the 2nd derivative of x[0] (order 2 only)
This is always an np.array, even for order 0 where you can
initialize x0 with a scalar.
y : np.array
Residual - difference between the measurement and the prediction
dt : scalar
timestep
order : int
order of the filter. Defines the order of the system
0 - assumes system of form x = a_0 + a_1*t
1 - assumes system of form x = a_0 +a_1*t + a_2*t^2
2 - assumes system of form x = a_0 +a_1*t + a_2*t^2 + a_3*t^3
g : float
filter g gain parameter.
h : float
filter h gain parameter, order 1 and 2 only
k : float
filter k gain parameter, order 2 only
z : 1D np.array or scalar
measurement passed into update()
"""
def __init__(self, x0, dt, order, g, h=None, k=None):
""" Creates a g-h filter of order 0, 1, or 2.
"""
if order < 0 or order > 2:
raise ValueError('order must be between 0 and 2')
if np.isscalar(x0):
self.x = np.zeros(order+1)
self.x[0] = x0
else:
self.x = np.copy(x0.astype(float))
self.dt = dt
self.order = order
self.g = g
self.h = h
self.k = k
self.y = np.zeros(len(self.x)) # residual
self.z = np.zeros(len(self.x)) # last measurement
def update(self, z, g=None, h=None, k=None):
"""
Update the filter with measurement z. z must be the same type
or treatable as the same type as self.x[0].
"""
if self.order == 0:
if g is None:
g = self.g
self.y = z - self.x[0]
self.x += dot(g, self.y)
elif self.order == 1:
if g is None:
g = self.g
if h is None:
h = self.h
x = self.x[0]
dx = self.x[1]
dxdt = dot(dx, self.dt)
self.y = z - (x + dxdt)
self.x[0] = x + dxdt + g*self.y
self.x[1] = dx + h*self.y / self.dt
self.z = z
else: # order == 2
if g is None:
g = self.g
if h is None:
h = self.h
if k is None:
k = self.k
x = self.x[0]
dx = self.x[1]
ddx = self.x[2]
dxdt = dot(dx, self.dt)
T2 = self.dt**2.
self.y = z -(x + dxdt +0.5*ddx*T2)
self.x[0] = x + dxdt + 0.5*ddx*T2 + g*self.y
self.x[1] = dx + ddx*self.dt + h*self.y / self.dt
self.x[2] = ddx + 2*k*self.y / (self.dt**2)
def __repr__(self):
return '\n'.join([
'GHFilterOrder object',
pretty_str('dt', self.dt),
pretty_str('order', self.order),
pretty_str('x', self.x),
pretty_str('g', self.g),
pretty_str('h', self.h),
pretty_str('k', self.k),
pretty_str('y', self.y),
pretty_str('z', self.z)
])
class GHFilter(object):
"""
Implements the g-h filter. The topic is too large to cover in
this comment. See my book "Kalman and Bayesian Filters in Python" [1]
or Eli Brookner's "Tracking and Kalman Filters Made Easy" [2].
A few basic examples are below, and the tests in ./gh_tests.py may
give you more ideas on use.
Parameters
----------
x : 1D np.array or scalar
Initial value for the filter state. Each value can be a scalar
or a np.array.
You can use a scalar for x0. If order > 0, then 0.0 is assumed
for the higher order terms.
x[0] is the value being tracked
x[1] is the first derivative (for order 1 and 2 filters)
x[2] is the second derivative (for order 2 filters)
dx : 1D np.array or scalar
Initial value for the derivative of the filter state.
dt : scalar
time step
g : float
filter g gain parameter.
h : float
filter h gain parameter.
Attributes
----------
x : 1D np.array or scalar
filter state
dx : 1D np.array or scalar
derivative of the filter state.
x_prediction : 1D np.array or scalar
predicted filter state
dx_prediction : 1D np.array or scalar
predicted derivative of the filter state.
dt : scalar
time step
g : float
filter g gain parameter.
h : float
filter h gain parameter.
y : np.array, or scalar
residual (difference between measurement and prior)
z : np.array, or scalar
measurement passed into update()
Examples
--------
Create a basic filter for a scalar value with g=.8, h=.2.
Initialize to 0, with a derivative(velocity) of 0.
>>> from filterpy.gh import GHFilter
>>> f = GHFilter (x=0., dx=0., dt=1., g=.8, h=.2)
Incorporate the measurement of 1
>>> f.update(z=1)
(0.8, 0.2)
Incorporate a measurement of 2 with g=1 and h=0.01
>>> f.update(z=2, g=1, h=0.01)
(2.0, 0.21000000000000002)
Create a filter with two independent variables.
>>> from numpy import array
>>> f = GHFilter (x=array([0,1]), dx=array([0,0]), dt=1, g=.8, h=.02)
and update with the measurements (2,4)
>>> f.update(array([2,4])
(array([ 1.6, 3.4]), array([ 0.04, 0.06]))
References
----------
[1] Labbe, "Kalman and Bayesian Filters in Python"
http://rlabbe.github.io/Kalman-and-Bayesian-Filters-in-Python
[2] Brookner, "Tracking and Kalman Filters Made Easy". John Wiley and
Sons, 1998.
"""
def __init__(self, x, dx, dt, g, h):
self.x = x
self.dx = dx
self.dt = dt
self.g = g
self.h = h
self.dx_prediction = self.dx
self.x_prediction = self.x
if np.ndim(x) == 0:
self.y = 0. # residual
self.z = 0.
else:
self.y = np.zeros(len(x))
self.z = np.zeros(len(x))
def update(self, z, g=None, h=None):
"""
performs the g-h filter predict and update step on the
measurement z. Modifies the member variables listed below,
and returns the state of x and dx as a tuple as a convienence.
**Modified Members**
x
filtered state variable
dx
derivative (velocity) of x
residual
difference between the measurement and the prediction for x
x_prediction
predicted value of x before incorporating the measurement z.
dx_prediction
predicted value of the derivative of x before incorporating the
measurement z.
Parameters
----------
z : any
the measurement
g : scalar (optional)
Override the fixed self.g value for this update
h : scalar (optional)
Override the fixed self.h value for this update
Returns
-------
x filter output for x
dx filter output for dx (derivative of x
"""
if g is None:
g = self.g
if h is None:
h = self.h
#prediction step
self.dx_prediction = self.dx
self.x_prediction = self.x + (self.dx*self.dt)
# update step
self.y = z - self.x_prediction
self.dx = self.dx_prediction + h * self.y / self.dt
self.x = self.x_prediction + g * self.y
return (self.x, self.dx)
def batch_filter(self, data, save_predictions=False, saver=None):
"""
Given a sequenced list of data, performs g-h filter
with a fixed g and h. See update() if you need to vary g and/or h.
Uses self.x and self.dx to initialize the filter, but DOES NOT
alter self.x and self.dx during execution, allowing you to use this
class multiple times without reseting self.x and self.dx. I'm not sure
how often you would need to do that, but the capability is there.
More exactly, none of the class member variables are modified
by this function, in distinct contrast to update(), which changes
most of them.
Parameters
----------
data : list like
contains the data to be filtered.
save_predictions : boolean
the predictions will be saved and returned if this is true
saver : filterpy.common.Saver, optional
filterpy.common.Saver object. If provided, saver.save() will be
called after every epoch
Returns
-------
results : np.array shape (n+1, 2), where n=len(data)
contains the results of the filter, where
results[i,0] is x , and
results[i,1] is dx (derivative of x)
First entry is the initial values of x and dx as set by __init__.
predictions : np.array shape(n), optional
the predictions for each step in the filter. Only retured if
save_predictions == True
"""
x = self.x
dx = self.dx
n = len(data)
results = np.zeros((n+1, 2))
results[0, 0] = x
results[0, 1] = dx
if save_predictions:
predictions = np.zeros(n)
# optimization to avoid n computations of h / dt
h_dt = self.h / self.dt
for i, z in enumerate(data):
#prediction step
x_est = x + (dx * self.dt)
# update step
residual = z - x_est
dx = dx + h_dt * residual # i.e. dx = dx + h * residual / dt
x = x_est + self.g * residual
results[i+1, 0] = x
results[i+1, 1] = dx
if save_predictions:
predictions[i] = x_est
if saver is not None:
saver.save()
if save_predictions:
return results, predictions
return results
def VRF_prediction(self):
"""
Returns the Variance Reduction Factor of the prediction
step of the filter. The VRF is the
normalized variance for the filter, as given in the equation below.
.. math::
VRF(\hat{x}_{n+1,n}) = \\frac{VAR(\hat{x}_{n+1,n})}{\sigma^2_x}
References
----------
Asquith, "Weight Selection in First Order Linear Filters"
Report No RG-TR-69-12, U.S. Army Missle Command. Redstone Arsenal, Al.
November 24, 1970.
"""
g = self.g
h = self.h
return (2*g**2 + 2*h + g*h) / (g*(4 - 2*g - h))
def VRF(self):
"""
Returns the Variance Reduction Factor (VRF) of the state variable
of the filter (x) and its derivatives (dx, ddx). The VRF is the
normalized variance for the filter, as given in the equations below.
.. math::
VRF(\hat{x}_{n,n}) = \\frac{VAR(\hat{x}_{n,n})}{\sigma^2_x}
VRF(\hat{\dot{x}}_{n,n}) = \\frac{VAR(\hat{\dot{x}}_{n,n})}{\sigma^2_x}
VRF(\hat{\ddot{x}}_{n,n}) = \\frac{VAR(\hat{\ddot{x}}_{n,n})}{\sigma^2_x}
Returns
-------
vrf_x VRF of x state variable
vrf_dx VRF of the dx state variable (derivative of x)
"""
g = self.g
h = self.h
den = g*(4 - 2*g - h)
vx = (2*g**2 + 2*h - 3*g*h) / den
vdx = 2*h**2 / (self.dt**2 * den)
return (vx, vdx)
def __repr__(self):
return '\n'.join([
'GHFilter object',
pretty_str('dt', self.dt),
pretty_str('g', self.g),
pretty_str('h', self.h),
pretty_str('x', self.x),
pretty_str('dx', self.dx),
pretty_str('x_prediction', self.x_prediction),
pretty_str('dx_prediction', self.dx_prediction),
pretty_str('y', self.y),
pretty_str('z', self.z)
])
class GHKFilter(object):
"""
Implements the g-h-k filter.
Parameters
----------
x : 1D np.array or scalar
Initial value for the filter state. Each value can be a scalar
or a np.array.
You can use a scalar for x0. If order > 0, then 0.0 is assumed
for the higher order terms.
x[0] is the value being tracked
x[1] is the first derivative (for order 1 and 2 filters)
x[2] is the second derivative (for order 2 filters)
dx : 1D np.array or scalar
Initial value for the derivative of the filter state.
ddx : 1D np.array or scalar
Initial value for the second derivative of the filter state.
dt : scalar
time step
g : float
filter g gain parameter.
h : float
filter h gain parameter.
k : float
filter k gain parameter.
Attributes
----------
x : 1D np.array or scalar
filter state
dx : 1D np.array or scalar
derivative of the filter state.
ddx : 1D np.array or scalar
second derivative of the filter state.
x_prediction : 1D np.array or scalar
predicted filter state
dx_prediction : 1D np.array or scalar
predicted derivative of the filter state.
ddx_prediction : 1D np.array or scalar
second predicted derivative of the filter state.
dt : scalar
time step
g : float
filter g gain parameter.
h : float
filter h gain parameter.
k : float
filter k gain parameter.
y : np.array, or scalar
residual (difference between measurement and prior)
z : np.array, or scalar
measurement passed into update()
References
----------
Brookner, "Tracking and Kalman Filters Made Easy". John Wiley and
Sons, 1998.
"""
def __init__(self, x, dx, ddx, dt, g, h, k):
self.x = x
self.dx = dx
self.ddx = ddx
self.x_prediction = self.x
self.dx_prediction = self.dx
self.ddx_prediction = self.ddx
self.dt = dt
self.g = g
self.h = h
self.k = k
if np.ndim(x) == 0:
self.y = 0. # residual
self.z = 0.
else:
self.y = np.zeros(len(x))
self.z = np.zeros(len(x))
def update(self, z, g=None, h=None, k=None):
"""
Performs the g-h filter predict and update step on the
measurement z.
On return, self.x, self.dx, self.y, and self.x_prediction
will have been updated with the results of the computation. For
convienence, self.x and self.dx are returned in a tuple.
Parameters
----------
z : scalar
the measurement
g : scalar (optional)
Override the fixed self.g value for this update
h : scalar (optional)
Override the fixed self.h value for this update
k : scalar (optional)
Override the fixed self.k value for this update
Returns
-------
x filter output for x
dx filter output for dx (derivative of x
"""
if g is None:
g = self.g
if h is None:
h = self.h
if k is None:
k = self.k
dt = self.dt
dt_sqr = dt**2
#prediction step
self.ddx_prediction = self.ddx
self.dx_prediction = self.dx + self.ddx*dt
self.x_prediction = self.x + self.dx*dt + .5*self.ddx*(dt_sqr)
# update step
self.y = z - self.x_prediction
self.ddx = self.ddx_prediction + 2*k*self.y / dt_sqr
self.dx = self.dx_prediction + h * self.y / dt
self.x = self.x_prediction + g * self.y
return (self.x, self.dx)
def batch_filter(self, data, save_predictions=False):
"""
Performs g-h filter with a fixed g and h.
Uses self.x and self.dx to initialize the filter, but DOES NOT
alter self.x and self.dx during execution, allowing you to use this
class multiple times without reseting self.x and self.dx. I'm not sure
how often you would need to do that, but the capability is there.
More exactly, none of the class member variables are modified
by this function.
Parameters
----------
data : list_like
contains the data to be filtered.
save_predictions : boolean
The predictions will be saved and returned if this is true
Returns
-------
results : np.array shape (n+1, 2), where n=len(data)
contains the results of the filter, where
results[i,0] is x , and
results[i,1] is dx (derivative of x)
First entry is the initial values of x and dx as set by __init__.
predictions : np.array shape(n), or None
the predictions for each step in the filter. Only returned if
save_predictions == True
"""
x = self.x
dx = self.dx
n = len(data)
results = np.zeros((n+1, 2))
results[0, 0] = x
results[0, 1] = dx
if save_predictions:
predictions = np.zeros(n)
# optimization to avoid n computations of h / dt
h_dt = self.h / self.dt
for i, z in enumerate(data):
#prediction step
x_est = x + (dx*self.dt)
# update step
residual = z - x_est
dx = dx + h_dt * residual # i.e. dx = dx + h * residual / dt
x = x_est + self.g * residual
results[i+1, 0] = x
results[i+1, 1] = dx
if save_predictions:
predictions[i] = x_est
if save_predictions:
return results, predictions
return results
def VRF_prediction(self):
"""
Returns the Variance Reduction Factor for x of the prediction
step of the filter.
This implements the equation
.. math::
VRF(\hat{x}_{n+1,n}) = \\frac{VAR(\hat{x}_{n+1,n})}{\sigma^2_x}
References
----------
Asquith and Woods, "Total Error Minimization in First
and Second Order Prediction Filters" Report No RE-TR-70-17, U.S.
Army Missle Command. Redstone Arsenal, Al. November 24, 1970.
"""
g = self.g
h = self.h
k = self.k
gh2 = 2*g + h
return ((g*k*(gh2-4)+ h*(g*gh2+2*h)) /
(2*k - (g*(h+k)*(gh2-4))))
def bias_error(self, dddx):
"""
Returns the bias error given the specified constant jerk(dddx)
Parameters
----------
dddx : type(self.x)
3rd derivative (jerk) of the state variable x.
References
----------
Asquith and Woods, "Total Error Minimization in First
and Second Order Prediction Filters" Report No RE-TR-70-17, U.S.
Army Missle Command. Redstone Arsenal, Al. November 24, 1970.
"""
return -self.dt**3 * dddx / (2*self.k)
def VRF(self):
"""
Returns the Variance Reduction Factor (VRF) of the state variable
of the filter (x) and its derivatives (dx, ddx). The VRF is the
normalized variance for the filter, as given in the equations below.
.. math::
VRF(\hat{x}_{n,n}) = \\frac{VAR(\hat{x}_{n,n})}{\sigma^2_x}
VRF(\hat{\dot{x}}_{n,n}) = \\frac{VAR(\hat{\dot{x}}_{n,n})}{\sigma^2_x}
VRF(\hat{\ddot{x}}_{n,n}) = \\frac{VAR(\hat{\ddot{x}}_{n,n})}{\sigma^2_x}
Returns
-------
vrf_x : type(x)
VRF of x state variable
vrf_dx : type(x)
VRF of the dx state variable (derivative of x)
vrf_ddx : type(x)
VRF of the ddx state variable (second derivative of x)
"""
g = self.g
h = self.h
k = self.k
# common subexpressions in the equations pulled out for efficiency,
# they don't 'mean' anything.
hg4 = 4- 2*g - h
ghk = g*h + g*k - 2*k
vx = (2*h*(2*(g**2) + 2*h - 3*g*h) - 2*g*k*hg4) / (2*k - g*(h+k) * hg4)
vdx = (2*(h**3) - 4*(h**2)*k + 4*(k**2)*(2-g)) / (2*hg4*ghk)
vddx = 8*h*(k**2) / ((self.dt**4)*hg4*ghk)
return (vx, vdx, vddx)
def __repr__(self):
return '\n'.join([
'GHFilter object',
pretty_str('dt', self.dt),
pretty_str('g', self.g),
pretty_str('h', self.h),
pretty_str('k', self.k),
pretty_str('x', self.x),
pretty_str('dx', self.dx),
pretty_str('ddx', self.ddx),
pretty_str('x_prediction', self.x_prediction),
pretty_str('dx_prediction', self.dx_prediction),
pretty_str('ddx_prediction', self.dx_prediction),
pretty_str('y', self.y),
pretty_str('z', self.z)
])
def optimal_noise_smoothing(g):
""" provides g,h,k parameters for optimal smoothing of noise for a given
value of g. This is due to Polge and Bhagavan[1].
Parameters
----------
g : float
value for g for which we will optimize for
Returns
-------
(g,h,k) : (float, float, float)
values for g,h,k that provide optimal smoothing of noise
Examples
--------
.. code-block:: Python
from filterpy.gh import GHKFilter, optimal_noise_smoothing
g,h,k = optimal_noise_smoothing(g)
f = GHKFilter(0,0,0,1,g,h,k)
f.update(1.)
References
----------
[1] Polge and Bhagavan. "A Study of the g-h-k Tracking Filter".
Report No. RE-CR-76-1. University of Alabama in Huntsville.
July, 1975
"""
h = ((2*g**3 - 4*g**2) + (4*g**6 -64*g**5 + 64*g**4)**.5) / (8*(1-g))
k = (h*(2-g) - g**2) / g
return (g, h, k)
def least_squares_parameters(n):
""" An order 1 least squared filter can be computed by a g-h filter
by varying g and h over time according to the formulas below, where
the first measurement is at n=0, the second is at n=1, and so on:
.. math::
h_n = \\frac{6}{(n+2)(n+1)}
g_n = \\frac{2(2n+1)}{(n+2)(n+1)}
Parameters
----------
n : int
the nth measurement, starting at 0 (i.e. first measurement has n==0)
Returns
-------
(g,h) : (float, float)
g and h parameters for this time step for the least-squares filter
Examples
--------
.. code-block:: Python
from filterpy.gh import GHFilter, least_squares_parameters
lsf = GHFilter (0, 0, 1, 0, 0)
z = 10
for i in range(10):
g,h = least_squares_parameters(i)
lsf.update(z, g, h)
"""
den = (n+2)*(n+1)
g = (2*(2*n + 1)) / den
h = 6 / den
return (g, h)
def critical_damping_parameters(theta, order=2):
""" Computes values for g and h (and k for g-h-k filter) for a
critically damped filter.
The idea here is to create a filter that reduces the influence of
old data as new data comes in. This allows the filter to track a
moving target better. This goes by different names. It may be called the
discounted least-squares g-h filter, a fading-memory polynomal filter
of order 1, or a critically damped g-h filter.
In a normal least-squares filter we compute the error for each point as
.. math::
\epsilon_t = (z-\\hat{x})^2
For a crically damped filter we reduce the influence of each error by
.. math::
\\theta^{t-i}
where
.. math::
0 <= \\theta <= 1
In other words the last error is scaled by theta, the next to last by
theta squared, the next by theta cubed, and so on.
Parameters
----------
theta : float, 0 <= theta <= 1
scaling factor for previous terms
order : int, 2 (default) or 3
order of filter to create the parameters for. g and h will be
calculated for the order 2, and g, h, and k for order 3.
Returns
-------
g : scalar
optimal value for g in the g-h or g-h-k filter
h : scalar
optimal value for h in the g-h or g-h-k filter
k : scalar
optimal value for g in the g-h-k filter
Examples
--------
.. code-block:: Python
from filterpy.gh import GHFilter, critical_damping_parameters
g,h = critical_damping_parameters(0.3)
critical_filter = GHFilter(0, 0, 1, g, h)
References
----------
Brookner, "Tracking and Kalman Filters Made Easy". John Wiley and
Sons, 1998.
Polge and Bhagavan. "A Study of the g-h-k Tracking Filter".
Report No. RE-CR-76-1. University of Alabama in Huntsville.
July, 1975
"""
if theta < 0 or theta > 1:
raise ValueError('theta must be between 0 and 1')
if order == 2:
return (1. - theta**2, (1. - theta)**2)
if order == 3:
return (1. - theta**3, 1.5*(1.-theta**2)*(1.-theta), .5*(1 - theta)**3)
raise ValueError('bad order specified: {}'.format(order))
def benedict_bornder_constants(g, critical=False):
""" Computes the g,h constants for a Benedict-Bordner filter, which
minimizes transient errors for a g-h filter.
Returns the values g,h for a specified g. Strictly speaking, only h
is computed, g is returned unchanged.
The default formula for the Benedict-Bordner allows ringing. We can
"nearly" critically damp it; ringing will be reduced, but not entirely
eliminated at the cost of reduced performance.
Parameters
----------
g : float
scaling factor g for the filter
critical : boolean, default False
Attempts to critically damp the filter.
Returns
-------
g : float
scaling factor g (same as the g that was passed in)
h : float
scaling factor h that minimizes the transient errors
Examples
--------
.. code-block:: Python
from filterpy.gh import GHFilter, benedict_bornder_constants
g, h = benedict_bornder_constants(.855)
f = GHFilter(0, 0, 1, g, h)
References
----------
Brookner, "Tracking and Kalman Filters Made Easy". John Wiley and
Sons, 1998.
"""
g_sqr = g**2
if critical:
return (g, 0.8 * (2. - g_sqr - 2*(1-g_sqr)**.5) / g_sqr)
return (g, g_sqr / (2.-g))
|
the-stack_106_13695
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.graphgym.config import cfg
from torch_geometric.graphgym.models.head import head_dict
from torch_geometric.graphgym.models.layer import (new_layer_config,
GeneralLayer,
GeneralMultiLayer,
BatchNorm1dNode)
from torch_geometric.graphgym.init import init_weights
from torch_geometric.graphgym.models.encoder import node_encoder_dict, \
edge_encoder_dict
import torch_geometric.graphgym.register as register
def GNNLayer(dim_in, dim_out, has_act=True):
"""
Wrapper for a GNN layer
Args:
dim_in (int): Input dimension
dim_out (int): Output dimension
has_act (bool): Whether has activation function after the layer
"""
return GeneralLayer(cfg.gnn.layer_type,
layer_config=new_layer_config(
dim_in, dim_out, 1,
has_act=has_act, has_bias=False, cfg=cfg))
def GNNPreMP(dim_in, dim_out):
"""
Wrapper for NN layer before GNN message passing
Args:
dim_in (int): Input dimension
dim_out (int): Output dimension
"""
return GeneralMultiLayer('linear',
layer_config=new_layer_config(
dim_in, dim_out, 1,
has_act=False, has_bias=False, cfg=cfg))
class GNNStackStage(nn.Module):
"""
Simple Stage that stack GNN layers
Args:
dim_in (int): Input dimension
dim_out (int): Output dimension
num_layers (int): Number of GNN layers
"""
def __init__(self, dim_in, dim_out, num_layers):
super(GNNStackStage, self).__init__()
self.num_layers = num_layers
for i in range(num_layers):
if cfg.gnn.stage_type == 'skipconcat':
d_in = dim_in if i == 0 else dim_in + i * dim_out
else:
d_in = dim_in if i == 0 else dim_out
layer = GNNLayer(d_in, dim_out)
self.add_module('layer{}'.format(i), layer)
def forward(self, batch):
""""""
for i, layer in enumerate(self.children()):
x = batch.x
batch = layer(batch)
if cfg.gnn.stage_type == 'skipsum':
batch.x = x + batch.x
elif cfg.gnn.stage_type == 'skipconcat' and \
i < self.num_layers - 1:
batch.x = torch.cat([x, batch.x], dim=1)
if cfg.gnn.l2norm:
batch.x = F.normalize(batch.x, p=2, dim=-1)
return batch
stage_dict = {
'stack': GNNStackStage,
'skipsum': GNNStackStage,
'skipconcat': GNNStackStage,
}
stage_dict = {**register.stage_dict, **stage_dict}
class FeatureEncoder(nn.Module):
"""
Encoding node and edge features
Args:
dim_in (int): Input feature dimension
"""
def __init__(self, dim_in):
super(FeatureEncoder, self).__init__()
self.dim_in = dim_in
if cfg.dataset.node_encoder:
# Encode integer node features via nn.Embeddings
NodeEncoder = node_encoder_dict[cfg.dataset.node_encoder_name]
self.node_encoder = NodeEncoder(cfg.gnn.dim_inner)
if cfg.dataset.node_encoder_bn:
self.node_encoder_bn = BatchNorm1dNode(new_layer_config(
cfg.gnn.dim_inner, -1, -1,
has_act=False, has_bias=False, cfg=cfg))
# Update dim_in to reflect the new dimension fo the node features
self.dim_in = cfg.gnn.dim_inner
if cfg.dataset.edge_encoder:
# Encode integer edge features via nn.Embeddings
EdgeEncoder = edge_encoder_dict[cfg.dataset.edge_encoder_name]
self.edge_encoder = EdgeEncoder(cfg.gnn.dim_inner)
if cfg.dataset.edge_encoder_bn:
self.edge_encoder_bn = BatchNorm1dNode(new_layer_config(
cfg.gnn.dim_inner, -1, -1,
has_act=False, has_bias=False, cfg=cfg))
def forward(self, batch):
""""""
for module in self.children():
batch = module(batch)
return batch
class GNN(nn.Module):
"""
General GNN model: encoder + stage + head
Args:
dim_in (int): Input dimension
dim_out (int): Output dimension
**kwargs (optional): Optional additional args
"""
def __init__(self, dim_in, dim_out, **kwargs):
super(GNN, self).__init__()
GNNStage = stage_dict[cfg.gnn.stage_type]
GNNHead = head_dict[cfg.gnn.head]
self.encoder = FeatureEncoder(dim_in)
dim_in = self.encoder.dim_in
if cfg.gnn.layers_pre_mp > 0:
self.pre_mp = GNNPreMP(dim_in, cfg.gnn.dim_inner)
dim_in = cfg.gnn.dim_inner
if cfg.gnn.layers_mp > 0:
self.mp = GNNStage(dim_in=dim_in, dim_out=cfg.gnn.dim_inner,
num_layers=cfg.gnn.layers_mp)
self.post_mp = GNNHead(dim_in=cfg.gnn.dim_inner, dim_out=dim_out)
self.apply(init_weights)
def forward(self, batch):
""""""
for module in self.children():
batch = module(batch)
return batch
if cfg.benchmark:
# register to measure the forward pass
from torch_geometric.graphgym.benchmark import global_line_profiler
global_line_profiler.add_function(forward)
|
the-stack_106_13696
|
from xml.etree.ElementTree import Element
from frappe.model.document import Document
from trebelge.TRUBLCommonElementsStrategy.TRUBLCommonElement import TRUBLCommonElement
from trebelge.TRUBLCommonElementsStrategy.TRUBLPeriod import TRUBLPeriod
from trebelge.TRUBLCommonElementsStrategy.TRUBLPerson import TRUBLPerson
from trebelge.TRUBLCommonElementsStrategy.TRUBLTransportMeans import TRUBLTransportMeans
class TRUBLShipmentStage(TRUBLCommonElement):
_frappeDoctype: str = 'UBL TR ShipmentStage'
def process_element(self, element: Element, cbcnamespace: str, cacnamespace: str) -> Document:
frappedoc: dict = {}
# ['ID'] = ('cbc', '', 'Seçimli (0...1)')
# ['TransportModeCode'] = ('cbc', '', 'Seçimli (0...1)')
# ['TransportMeansTypeCode'] = ('cbc', '', 'Seçimli (0...1)')
# ['Instructions'] = ('cbc', '', 'Seçimli (0...1)')
cbcsecimli01: list = ['ID', 'TransportModeCode', 'TransportMeansTypeCode', 'Instructions']
for elementtag_ in cbcsecimli01:
field_: Element = element.find('./' + cbcnamespace + elementtag_)
if field_ is not None:
if field_.text is not None:
frappedoc[elementtag_.lower()] = field_.text
# ['TransitDirectionCode'] = ('cbc', '', 'Seçimli (0...n)')
transitdirectioncodes_: list = element.findall('./' + cbcnamespace + 'TransitDirectionCode')
if len(transitdirectioncodes_) != 0:
transitdirectioncode = list()
for transitdirectioncode_ in transitdirectioncodes_:
if transitdirectioncode_.text is not None:
transitdirectioncode.append(transitdirectioncode_.text)
if len(transitdirectioncode) != 0:
frappedoc['transitdirectioncode'] = transitdirectioncode
# ['TransitPeriod'] = ('cac', 'Period', 'Seçimli (0...1)')
transitperiod_: Element = element.find('./' + cbcnamespace + 'TransitPeriod')
if transitperiod_ is not None:
tmp: dict = TRUBLPeriod().process_elementasdict(transitperiod_, cbcnamespace, cacnamespace)
if tmp != {}:
for key in ['startdate', 'starttime', 'enddate', 'endtime', 'durationmeasure',
'durationmeasure_unitcode', 'description']:
try:
frappedoc[key] = tmp[key]
except KeyError:
pass
# ['TransportMeans'] = ('cac', 'TransportMeans', 'Seçimli (0...1)')
transportmeans_: Element = element.find('./' + cbcnamespace + 'TransportMeans')
if transportmeans_ is not None:
tmp: Document = TRUBLTransportMeans().process_element(transportmeans_, cbcnamespace, cacnamespace)
if tmp is not None:
frappedoc['transportmeans'] = tmp.name
if frappedoc == {}:
return None
# ['DriverPerson'] = ('cac', 'Person', 'Seçimli (0...n)')
driverpeople = list()
driverpeople_: list = element.findall('./' + cacnamespace + 'DriverPerson')
if len(driverpeople_) != 0:
for driverperson_ in driverpeople_:
tmp: Document = TRUBLPerson().process_element(driverperson_, cbcnamespace, cacnamespace)
if tmp is not None:
driverpeople.append(tmp)
if len(driverpeople) == 0:
document: Document = self._get_frappedoc(self._frappeDoctype, frappedoc)
else:
document: Document = self._get_frappedoc(self._frappeDoctype, frappedoc, False)
document.driverperson = driverpeople
document.save()
return document
def process_elementasdict(self, element: Element, cbcnamespace: str, cacnamespace: str) -> dict:
pass
|
the-stack_106_13699
|
# coding=utf-8
# Copyright 2021 The Meta-Dataset Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
r"""Script for training models on the benchmark.
Launching command for batch baseline:
# pylint: disable=line-too-long
python -m meta_dataset.train \
--train_checkpoint_dir=/tmp/bench --summary_dir=/tmp/bench \
--records_root_dir=<records_root> \
--alsologtostderr \
--gin_config=meta_dataset/learn/gin/default/<exp_name>.gin
--gin_bindings="Trainer.experiment_name='<exp_name>'"
# pylint: enable=line-too-long
where:
<exp_name> is e.g. 'debug_proto_mini_imagenet'
To override elements from the config, you can use arguments of the form:
For gin: --gin_bindings='foo = 1000000'
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
from absl import app
from absl import logging
import gin.tf
# TODO(lamblinp): Better organize module imports for exposure of Gin
# configurables.
from meta_dataset import data
from meta_dataset import learners # pylint: disable=unused-import
from meta_dataset import trainer
from meta_dataset.data import config # pylint: disable=unused-import
from meta_dataset.learners import experimental as experimental_learners # pylint: disable=unused-import
from meta_dataset.models.experimental import parameter_adapter # pylint: disable=unused-import
import tensorflow.compat.v1 as tf
tf.disable_eager_execution()
DEFAULT_SAVING_DIRECTORY = '/tmp/metadataset'
tf.flags.DEFINE_string('train_checkpoint_dir',
os.path.join(DEFAULT_SAVING_DIRECTORY, 'checkpoints'),
'The directory to save checkpoints.')
tf.flags.DEFINE_string('summary_dir',
os.path.join(DEFAULT_SAVING_DIRECTORY, 'summaries'),
'The directory for writing summaries.')
tf.flags.DEFINE_bool(
'reload_checkpoint_gin_config', False,
'Whether to reload an operative Gin configuration along with a checkpoint.')
tf.flags.DEFINE_string('records_root_dir', '',
'Root directory containing a subdirectory per dataset.')
tf.flags.DEFINE_bool(
'is_training', True, 'Whether we are in the training phase. '
'Used to control whether to perform training or evaluation.')
tf.flags.DEFINE_multi_string('gin_config', None,
'List of paths to the config files.')
tf.flags.DEFINE_multi_string('gin_bindings', None,
'List of Gin parameter bindings.')
tf.flags.DEFINE_string(
'eval_imbalance_dataset', '', 'A dataset on which to perform evaluation '
'for assessing how class imbalance affects performance in binary episodes. '
'By default it is empty and no imbalance analysis is performed.')
# TODO(crisnv): eval_split is supposed to substitute eval_finegrainedness and
# eval_finegrainedness_split in the future
tf.flags.DEFINE_enum(
'eval_split', None,
[trainer.TRAIN_SPLIT, trainer.VALID_SPLIT, trainer.TEST_SPLIT],
'Override the evaluation split. If None (default), regular logic is used, '
'that is, if "is_training" is True, "trainer.VALID_SPLIT" is used, '
'otherwise "trainer.TEST_SPLIT" is used. The "is_training" case also uses '
'the value of eval_finegrainedness_split if eval_finegrainedness is True.')
tf.flags.DEFINE_bool(
'eval_finegrainedness', False, 'Whether to perform only 2-way ImageNet '
'evaluation for assessing performance as a function of how finegrained '
'each task is. This differs from usual ImageNet eval in the sampling '
'procedure used to get episodes, and therefore requires its own setting.')
tf.flags.DEFINE_enum(
'eval_finegrainedness_split', trainer.TRAIN_SPLIT,
[trainer.TRAIN_SPLIT, trainer.VALID_SPLIT, trainer.TEST_SPLIT], 'The '
'split whose results we want to use for the fine-grainedness analysis.'
'Contrary to most analyses which are performed on the test split only, the '
'fine-grainedness analysis may also be performed on the train or valid '
'sub-graphs of ImageNet too, since the test sub-graph evidently does not '
'exhibit enough variation in the fine-grainedness of its different tasks '
'to allow for a meaningful analysis.')
# The following flag specifies substrings of variable names that should not be
# reloaded. `num_left_in_epoch' is a variable that influences the behavior of
# the EpochTrackers. Since the state of those trackers is not reloaded, neither
# should this variable. `fc_finetune' is a substring of the names of the
# variables in the episode-specific linear layer of the finetune baseline (used
# at meta-validation and meta-test times). Since this layer gets re-initialized
# to random weights in each new episode, there is no need to ever restore these
# weights. `linear_classifier' plays that role but for the MAML model: similarly
# in each new episode it is re-initialized (e.g. set to zeros or to the
# prototypes in the case of proto-MAML), so there is no need to restore these
# weights. `adam_opt' captures the variables of the within-episode optimizer of
# the finetune baseline when it is configured to perform that finetuning with
# adam. `fc' captures the variable names of the fully-connected layer for the
# all-way classification problem that the baselines solve at training time.
# There are certain situations where we need to omit reloading these weights to
# avoid getting an error. Consider for example the experiments where we train
# a baseline model, starting from weights that were previously trained on
# ImageNet. If this training now takes place on all datasets, the size of the
# all-way classification layer is now different (equal to the number of
# meta-training classes of all datasets not just of ImageNet). Thus when
# training baselines from pre-trained weights, we only reload the backbone and
# not the `fc' all-way classification layer (similarly for inference-only
# experiments for the same reason).
tf.flags.DEFINE_multi_enum(
'omit_from_saving_and_reloading', [
'num_left_in_epoch', 'fc_finetune', 'linear_classifier', 'adam_opt',
'weight_copy'
], [
'num_left_in_epoch', 'fc_finetune', 'linear_classifier', 'adam_opt',
'weight_copy', 'fc'
],
'A comma-separated list of substrings such that all variables containing '
'them should not be saved and reloaded.')
FLAGS = tf.flags.FLAGS
def parse_cmdline_gin_configurations():
"""Parse Gin configurations from all command-line sources."""
with gin.unlock_config():
gin.parse_config_files_and_bindings(
FLAGS.gin_config, FLAGS.gin_bindings, finalize_config=True)
def operative_config_path(operative_config_dir,
operative_config_filename='operative_config.gin'):
return os.path.join(operative_config_dir, operative_config_filename)
def load_operative_gin_configurations(operative_config_dir):
"""Load operative Gin configurations from the given directory."""
gin_log_file = operative_config_path(operative_config_dir)
with gin.unlock_config():
gin.parse_config_file(gin_log_file)
gin.finalize()
logging.info('Operative Gin configurations loaded from %s.', gin_log_file)
def record_operative_gin_configurations(operative_config_dir):
"""Record operative Gin configurations in the given directory."""
gin_log_file = operative_config_path(operative_config_dir)
# If it exists already, rename it instead of overwriting it.
# This just saves the previous one, not all the ones before.
if tf.io.gfile.exists(gin_log_file):
tf.io.gfile.rename(gin_log_file, gin_log_file + '.old', overwrite=True)
with tf.io.gfile.GFile(gin_log_file, 'w') as f:
f.write(gin.operative_config_str())
def main(unused_argv):
# Parse Gin configurations passed to this script.
parse_cmdline_gin_configurations()
if FLAGS.reload_checkpoint_gin_config:
# Try to reload a previously recorded Gin configuration from an operative
# Gin configuration file in one of the provided directories.
# TODO(eringrant): Allow querying of a value to be bound without binding it
# to avoid the redundant call to `parse_cmdline_gin_configurations` below.
try:
checkpoint_to_restore = gin.query_parameter(
'Trainer.checkpoint_to_restore')
except ValueError:
checkpoint_to_restore = None
# Load the operative Gin configurations from the checkpoint directory.
if checkpoint_to_restore:
restore_checkpoint_dir = os.path.dirname(checkpoint_to_restore)
load_operative_gin_configurations(restore_checkpoint_dir)
# Reload the command-line Gin configuration to allow overriding of the Gin
# configuration loaded from the checkpoint directory.
parse_cmdline_gin_configurations()
# Wrap object instantiations to print out full Gin configuration on failure.
try:
(train_datasets, eval_datasets, restrict_classes,
restrict_num_per_class) = trainer.get_datasets_and_restrictions()
# Get a trainer or evaluator.
trainer_instance = trainer.Trainer(
is_training=FLAGS.is_training,
train_dataset_list=train_datasets,
eval_dataset_list=eval_datasets,
restrict_classes=restrict_classes,
restrict_num_per_class=restrict_num_per_class,
checkpoint_dir=FLAGS.train_checkpoint_dir,
summary_dir=FLAGS.summary_dir,
records_root_dir=FLAGS.records_root_dir,
eval_finegrainedness=FLAGS.eval_finegrainedness,
eval_finegrainedness_split=FLAGS.eval_finegrainedness_split,
eval_imbalance_dataset=FLAGS.eval_imbalance_dataset,
omit_from_saving_and_reloading=FLAGS.omit_from_saving_and_reloading,
eval_split=FLAGS.eval_split,
)
except ValueError as e:
logging.info('Full Gin configurations:\n%s', gin.config_str())
raise e
# All configurable objects/functions should have been instantiated/called.
# TODO(evcu): Tie saving of Gin configuration at training and evaluation time.
logging.info('Operative Gin configurations:\n%s', gin.operative_config_str())
if FLAGS.is_training and FLAGS.train_checkpoint_dir:
record_operative_gin_configurations(FLAGS.train_checkpoint_dir)
elif not FLAGS.is_training and FLAGS.summary_dir:
record_operative_gin_configurations(FLAGS.summary_dir)
datasets = train_datasets if FLAGS.is_training else eval_datasets
logging.info('Starting %s for dataset(s) %s...',
'training' if FLAGS.is_training else 'evaluation', datasets)
if FLAGS.is_training:
trainer_instance.train()
elif set(datasets).intersection(trainer.DATASETS_WITH_EXAMPLE_SPLITS):
if not data.POOL_SUPPORTED:
raise NotImplementedError('Example-level splits or pools not supported.')
else:
if len(datasets) != 1:
raise ValueError('Requested datasets {} for evaluation, but evaluation '
'should be performed on individual datasets '
'only.'.format(datasets))
if FLAGS.eval_finegrainedness:
eval_split = FLAGS.eval_finegrainedness_split
elif FLAGS.eval_split:
eval_split = FLAGS.eval_split
else:
eval_split = trainer.TEST_SPLIT
_, _, acc_summary, ci_acc_summary = trainer_instance.evaluate(eval_split)
if trainer_instance.summary_writer:
trainer_instance.summary_writer.add_summary(acc_summary)
trainer_instance.summary_writer.add_summary(ci_acc_summary)
# Flushes the event file to disk and closes the file.
if trainer_instance.summary_writer:
trainer_instance.summary_writer.close()
# program = main
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
app.run(main)
|
the-stack_106_13701
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from collections import Counter
import os
import torch
class Dictionary(object):
"""A mapping from symbols to consecutive integers"""
def __init__(self, pad='<pad>', eos='</s>', unk='<unk>'):
self.unk_word, self.pad_word, self.eos_word = unk, pad, eos
self.symbols = []
self.count = []
self.indices = {}
# dictionary indexing starts at 1 for consistency with Lua
self.add_symbol('<Lua heritage>')
self.pad_index = self.add_symbol(pad)
self.eos_index = self.add_symbol(eos)
self.unk_index = self.add_symbol(unk)
self.nspecial = len(self.symbols)
def __eq__(self, other):
return self.indices == other.indices
def __getitem__(self, idx):
if idx < len(self.symbols):
return self.symbols[idx]
return self.unk_word
def __len__(self):
"""Returns the number of symbols in the dictionary"""
return len(self.symbols)
def index(self, sym):
"""Returns the index of the specified symbol"""
if sym in self.indices:
return self.indices[sym]
return self.unk_index
def string(self, tensor, bpe_symbol=None, escape_unk=False):
"""Helper for converting a tensor of token indices to a string.
Can optionally remove BPE symbols or escape <unk> words.
"""
if torch.is_tensor(tensor) and tensor.dim() == 2:
return '\n'.join(self.string(t) for t in tensor)
def token_string(i):
if i == self.unk():
return self.unk_string(escape_unk)
else:
return self[i]
sent = ' '.join(token_string(i) for i in tensor if i != self.eos())
if bpe_symbol is not None:
sent = (sent + ' ').replace(bpe_symbol, '').rstrip()
return sent
def unk_string(self, escape=False):
"""Return unknown string, optionally escaped as: <<unk>>"""
if escape:
return '<{}>'.format(self.unk_word)
else:
return self.unk_word
def add_symbol(self, word, n=1):
"""Adds a word to the dictionary"""
if word in self.indices:
idx = self.indices[word]
self.count[idx] = self.count[idx] + n
return idx
else:
idx = len(self.symbols)
self.indices[word] = idx
self.symbols.append(word)
self.count.append(n)
return idx
def update(self, new_dict):
"""Updates counts from new dictionary."""
for word in new_dict.symbols:
idx2 = new_dict.indices[word]
if word in self.indices:
idx = self.indices[word]
self.count[idx] = self.count[idx] + new_dict.count[idx2]
else:
idx = len(self.symbols)
self.indices[word] = idx
self.symbols.append(word)
self.count.append(new_dict.count[idx2])
def finalize(self, threshold=-1, nwords=-1, padding_factor=8):
"""Sort symbols by frequency in descending order, ignoring special ones.
Args:
- threshold defines the minimum word count
- nwords defines the total number of words in the final dictionary,
including special symbols
- padding_factor can be used to pad the dictionary size to be a
multiple of 8, which is important on some hardware (e.g., Nvidia
Tensor Cores).
"""
if nwords <= 0:
nwords = len(self)
new_indices = dict(zip(self.symbols[:self.nspecial], range(self.nspecial)))
new_symbols = self.symbols[:self.nspecial]
new_count = self.count[:self.nspecial]
c = Counter(dict(zip(self.symbols[self.nspecial:], self.count[self.nspecial:])))
for symbol, count in c.most_common(nwords - self.nspecial):
if count >= threshold:
new_indices[symbol] = len(new_symbols)
new_symbols.append(symbol)
new_count.append(count)
else:
break
threshold_nwords = len(new_symbols)
if padding_factor > 1:
i = 0
while threshold_nwords % padding_factor != 0:
symbol = 'madeupword{:04d}'.format(i)
new_indices[symbol] = len(new_symbols)
new_symbols.append(symbol)
new_count.append(0)
i += 1
threshold_nwords += 1
assert len(new_symbols) % padding_factor == 0
assert len(new_symbols) == len(new_indices)
self.count = list(new_count)
self.symbols = list(new_symbols)
self.indices = new_indices
def pad(self):
"""Helper to get index of pad symbol"""
return self.pad_index
def eos(self):
"""Helper to get index of end-of-sentence symbol"""
return self.eos_index
def unk(self):
"""Helper to get index of unk symbol"""
return self.unk_index
@classmethod
def load(cls, f, ignore_utf_errors=False):
"""Loads the dictionary from a text file with the format:
```
<symbol0> <count0>
<symbol1> <count1>
...
```
"""
if isinstance(f, str):
try:
if not ignore_utf_errors:
with open(f, 'r', encoding='utf-8') as fd:
return cls.load(fd)
else:
with open(f, 'r', encoding='utf-8', errors='ignore') as fd:
return cls.load(fd)
except FileNotFoundError as fnfe:
raise fnfe
except Exception:
raise Exception("Incorrect encoding detected in {}, please "
"rebuild the dataset".format(f))
d = cls()
for line in f.readlines():
idx = line.rfind(' ')
word = line[:idx]
count = int(line[idx+1:])
d.indices[word] = len(d.symbols)
d.symbols.append(word)
d.count.append(count)
return d
def save(self, f):
"""Stores dictionary into a text file"""
if isinstance(f, str):
os.makedirs(os.path.dirname(f), exist_ok=True)
with open(f, 'w', encoding='utf-8') as fd:
return self.save(fd)
for symbol, count in zip(self.symbols[self.nspecial:], self.count[self.nspecial:]):
print('{} {}'.format(symbol, count), file=f)
def dummy_sentence(self, length):
t = torch.Tensor(length).uniform_(self.nspecial + 1, len(self)).long()
t[-1] = self.eos()
return t
|
the-stack_106_13702
|
"""Restheart related commands."""
from __future__ import annotations
from flask.cli import AppGroup, with_appcontext
import labster.bus
restheart = AppGroup("restheart")
@restheart.command("init")
@with_appcontext
def restheart_init():
"""Create needed collections in RestHeart DB."""
labster.bus.init()
@restheart.command("sync")
@with_appcontext
def restheart_sync_all():
"""Sync (push all data) to RestHeart DB."""
labster.bus.sync_all_objects()
|
the-stack_106_13705
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##############################################################################
## Append some records to an existing file, using mode="ab"
##############################################################################
import unittest
import shutil
import tempfile
import os
from savReaderWriter import *
class test_SavWriter_append_data(unittest.TestCase):
def setUp(self):
self.savFileName = "test_data/Employee data.sav"
self.savFileNameCopy = os.path.join(tempfile.gettempdir(), "test.sav")
shutil.copy(self.savFileName, self.savFileNameCopy)
reader = None
try:
reader = SavReader(self.savFileName, rawMode=True)
self.NROWS_ORIGINAL = len(reader)
self.varNames, self.varTypes = reader.getSavFileInfo()[2:4]
finally:
if reader is not None:
reader.close()
def test_SavWriter_append_data(self):
"""Append 100 records to an existing file"""
NROWS_EXTRA = 100
line = [1.0, b'm', 11654150400.0, 15.0, 3.0,
57000.0, 27000.0, 98.0, 144.0, 0.0]
with SavWriter(self.savFileNameCopy, self.varNames,
self.varTypes, mode=b"ab") as writer:
for i in range(NROWS_EXTRA):
writer.writerow(line)
## Demonstrate that the number of records has increased by 100.
reader = None
try:
reader = SavReader(self.savFileNameCopy)
n_records_got = len(reader)
finally:
if reader is not None:
reader.close()
self.assertEqual(n_records_got, self.NROWS_ORIGINAL + NROWS_EXTRA)
def tearDown(self):
try:
os.remove(self.savFileNameCopy)
except:
pass
if __name__ == "__main__":
unittest.main()
|
the-stack_106_13706
|
import json
import logging
import os
import time
from typing import List
import praw
from praw import Reddit
from praw.models import ListingGenerator
from pymongo import MongoClient
from pymongo.collection import Collection, ReturnDocument
from pymongo.cursor import Cursor
from pymongo.database import Database
from ..reddit_post.reddit_post import RedditPost
logger = logging.getLogger("reddit_scraper")
class RedditScraper:
def __init__(self):
self.client_id = os.getenv("REDDIT_CLIENT_ID")
self.client_secret = os.getenv("REDDIT_CLIENT_SECRET")
self.username = os.getenv("REDDIT_USERNAME")
self.password = os.getenv("REDDIT_PASSWORD")
self.user_agent = "Python script written by @gordonpn on GitHub"
self.db_name = os.getenv("MONGO_INITDB_DATABASE")
self.db_username = os.getenv("MONGO_NON_ROOT_USERNAME")
self.db_password = os.getenv("MONGO_NON_ROOT_PASSWORD")
self.db_settings = os.getenv("MONGO_SETTINGS")
self.db_collection = os.getenv("MONGO_COLLECTION")
def run(self):
subscriptions = self.check_subscriptions()
reddit = self.get_reddit()
posts = self.scrape(reddit, subscriptions)
self.update_db(posts)
def get_reddit(self) -> Reddit:
return praw.Reddit(
client_id=self.client_id,
client_secret=self.client_secret,
username=self.username,
password=self.password,
user_agent=self.user_agent,
)
def connect_to_db(self) -> Database:
logger.debug("Making connection to mongodb")
host = "mongodb"
uri: str = f"mongodb://{self.db_username}:{self.db_password}@{host}:27017/{self.db_name}"
connection: MongoClient = MongoClient(uri)
db: Database = connection[self.db_name]
return db
def get_settings_collection(self) -> Collection:
db = self.connect_to_db()
return db.collection[self.db_settings]
def get_data_collection(self) -> Collection:
db = self.connect_to_db()
return db.collection[self.db_collection]
def check_subscriptions(self) -> List[str]:
logger.debug("Checking subscriptions")
collection = self.get_settings_collection()
cursor: Cursor = collection.find_one()
logger.debug(f"{cursor=}")
if cursor is None:
return []
subs: List[str] = cursor["subreddits"]
logger.debug(f"{subs=}")
return subs
def scrape(self, reddit: Reddit, subscriptions: List[str]) -> List[RedditPost]:
logger.debug("Scraping Reddit for hot posts")
limit: int = 5
time_filter: str = "day"
reddit_posts: List[RedditPost] = []
for subscription in subscriptions:
logger.debug(f"{subscription=}")
submissions: ListingGenerator = reddit.subreddit(subscription).top(
limit=limit, time_filter=time_filter
)
for submission in submissions:
if submission.stickied:
continue
title = submission.title
post_id = submission.id
votes = submission.score
link = submission.url
is_self = submission.is_self
unix_time = int(submission.created_utc)
logger.debug(f"Parsing: {post_id=}")
a_reddit_post = RedditPost(
title=title,
subreddit=subscription,
post_id=post_id,
votes=votes,
link=link,
unix_time=unix_time,
is_self=is_self,
seen=False,
)
reddit_posts.append(a_reddit_post)
return reddit_posts
def update_db(self, posts: List[RedditPost]):
logger.debug("Updating scrapings collections")
collection = self.get_data_collection()
for post in posts:
query = {"post_id": post.post_id}
logger.debug(f"Looking for {query}")
data = json.loads(post.to_json())
res = collection.find_one(filter=query)
if res is None:
logger.debug(f"Did not find {query}, inserting one")
result = collection.insert_one(document=data)
logger.debug(f"Insertion ID: {result.inserted_id}")
else:
logger.debug(f"Found {query}, updating one")
data.pop("seen", None)
result = collection.find_one_and_update(
filter=query,
update={"$set": data},
return_document=ReturnDocument.AFTER,
)
logger.debug(f"Update result: {result}")
def clean_up_old(self):
logger.debug("Cleaning up old posts")
collection = self.get_data_collection()
two_months: int = 5184000
unix_time_now: int = int(time.time())
two_months_ago: int = unix_time_now - two_months
collection.delete_many(filter={"unix_time": {"$lte": two_months_ago}})
|
the-stack_106_13707
|
# from gatenc import GAT
# from gptdec import GPT2
# from onmt.Models import GCNEncoder_DGL
from onmt.ModelConstructor import make_embeddings
from utils import lazily_load_dataset, load_fields, tally_parameters
from model import make_model
import torch
import torch.nn as nn
import torch.nn.functional as F
import glob, argparse, opts
import numpy as np
parser = argparse.ArgumentParser(
description='train.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# opts.py
opts.add_md_help_argument(parser)
opts.model_opts(parser)
opts.train_opts(parser)
opts.gcn_opts(parser) # adds GCN options
model_opt = parser.parse_args()
dataset = next(lazily_load_dataset("train"))
print(dataset.examples[0].__dict__)
print(dataset)
data_type = dataset.data_type # data_type: GCN
# Load fields generated from preprocess phase.
fields = load_fields(dataset, data_type, None) # checkpoint = None
print(type(fields))
print(fields)
src_dict = fields["src"].vocab
feature_dicts = onmt.io.collect_feature_vocabs(fields, 'src')
src_embeddings = make_embeddings(model_opt, src_dict,
feature_dicts)
tgt_dict = fields["tgt"].vocab
feature_dicts = onmt.io.collect_feature_vocabs(fields, 'tgt')
tgt_embeddings = make_embeddings(model_opt, tgt_dict,
feature_dicts, for_encoder=False)
make_model(model_opt, src_embeddings, tgt_embeddings)
# gcn_num_inputs=256, gcn_num_labels=5, gcn_num_layers=2, gcn_num_units=256, gcn_out_arcs=True, gcn_residual='residual', gcn_use_gates=False, gcn_use_glus=False
# python3 train.py -data data/${model_id}_exp -save_model data/${save_model_name} -encoder_type ${encoder} -encoder2_type ${encoder2} -layers 1 -gcn_num_layers 2 -gcn_num_labels 5 -gcn_residual residual -word_vec_size ${emb_size} -rnn_size ${hidden_size} -gcn_num_inputs ${hidden_size} -gcn_num_units ${hidden_size} -epochs 20 -optim adam -learning_rate 0.001 -learning_rate_decay 0.7 -seed 1 -gpuid 0 -start_checkpoint_at 15 -gcn_in_arcs -gcn_out_arcs -copy_attn -brnn -use_dgl
# model = make_model() # D: gcn features must be passed here
# tally_parameters(model) # print the parameter size
# check_save_model_path() # check if the model path exist
|
the-stack_106_13708
|
# Problem 102 : Triangle containment
# file which contains the triangle coordinates
triangle_file = "triangle.txt"
# read the triangle coordinates
with open(triangle_file, "r") as triangleFile:
triangles = triangleFile.read()
# creation of a list of lists which includes the 3 points coordinates (tuples)
triangles = triangles.strip().split("\n")
list_triangles = []
for triangle in triangles:
triangle = triangle.strip().split(',')
list_triangles.append([(int(triangle[0]), int(triangle[1])),
(int(triangle[2]), int(triangle[3])),
(int(triangle[4]), int(triangle[5]))
])
def contain_origin(triangle):
"""
this function takes a triangle, i-e 3 tuples, coordinates of triangle's vertices,
and returns whteher or not the origin is inside the triangle.
We search the barycentric coordinates of the origin :
O = A + (B-A)*s + (C-A)*t
if 0 <= s <= 1 and 0 <= t <= 1 and s+t<=1, the origin is inside the triangle
"""
s = 0
t = 0
[A, B, C] = triangle
if B[0] != A[0]:
if C[0] != A[0]:
if not (A[1] == 0 and B[1] == 0 and C[1] == 0):
t = (B[1]*A[0]-A[1]*B[0])/(C[1]*(B[0]-A[0]) +
B[1]*(A[0]-C[0])+A[1]*(C[0]-B[0]))
s = ((A[0]-C[0])*t-A[0])/(B[0]-A[0])
else:
if C[1] != A[1]:
s = A[0]/(A[0]-B[0])
t = (A[1]*B[0] - B[1]*A[0])/((A[0]-B[0])-(C[1]-A[1]))
else:
if A[0] != C[0]:
t = A[0]/(A[0]-C[0])
s = (A[1]*C[0]-C[1]*A[0])/((A[0]-C[0])*(B[1]-A[1]))
if 0 <= s <= 1 and 0 <= t <= 1 and 0 <= s+t <= 1:
return True
return False
# count of every triangle with the required property (origin inside the triangle)
counter = 0
for triangle in list_triangles:
if contain_origin(triangle):
counter += 1
print(counter)
# Result 228
|
the-stack_106_13710
|
#!/usr/bin/env python
###############################################################################
# $Id$
#
# Project: OGR Python samples
# Purpose: Apply a transformation to all OGR geometries.
# Author: Frank Warmerdam, [email protected]
#
###############################################################################
# Copyright (c) 2006, Frank Warmerdam <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import sys
from osgeo import ogr
#############################################################################
def TransformPoint(xyz):
x = xyz[0]
y = xyz[1]
z = xyz[2]
x = x + 1000
return (x, y, z)
#############################################################################
def WalkAndTransform(geom):
if geom.GetGeometryCount() > 0:
for i in range(geom.GetGeometryCount()):
old_geom = geom.GetGeometryRef(i)
new_geom = WalkAndTransform(old_geom)
if new_geom is not old_geom:
geom.SetGeometryDirectly(new_geom)
return geom
for i in range(geom.GetPointCount()):
xyz = (geom.GetX(i), geom.GetY(i), geom.GetZ(i))
xyz = TransformPoint(xyz)
geom.SetPoint(i, xyz[0], xyz[1], xyz[2])
return geom
#############################################################################
def Usage():
print('Usage: vec_tr.py infile outfile [layer]')
print('')
sys.exit(1)
#############################################################################
# Argument processing.
infile = None
outfile = None
layer_name = None
i = 1
while i < len(sys.argv):
arg = sys.argv[i]
if infile is None:
infile = arg
elif outfile is None:
outfile = arg
elif layer_name is None:
layer_name = arg
else:
Usage()
i = i + 1
if outfile is None:
Usage()
#############################################################################
# Open the datasource to operate on.
in_ds = ogr.Open(infile, update=0)
if layer_name is not None:
in_layer = in_ds.GetLayerByName(layer_name)
else:
in_layer = in_ds.GetLayer(0)
in_defn = in_layer.GetLayerDefn()
#############################################################################
# Create output file with similar information.
shp_driver = ogr.GetDriverByName('ESRI Shapefile')
shp_driver.DeleteDataSource(outfile)
shp_ds = shp_driver.CreateDataSource(outfile)
shp_layer = shp_ds.CreateLayer(in_defn.GetName(),
geom_type=in_defn.GetGeomType(),
srs=in_layer.GetSpatialRef())
in_field_count = in_defn.GetFieldCount()
for fld_index in range(in_field_count):
src_fd = in_defn.GetFieldDefn(fld_index)
fd = ogr.FieldDefn(src_fd.GetName(), src_fd.GetType())
fd.SetWidth(src_fd.GetWidth())
fd.SetPrecision(src_fd.GetPrecision())
shp_layer.CreateField(fd)
#############################################################################
# Process all features in input layer.
in_feat = in_layer.GetNextFeature()
while in_feat is not None:
geom = in_feat.GetGeometryRef().Clone()
geom = WalkAndTransform(geom)
out_feat = ogr.Feature(feature_def=shp_layer.GetLayerDefn())
out_feat.SetFrom(in_feat)
out_feat.SetGeometryDirectly(geom)
shp_layer.CreateFeature(out_feat)
out_feat.Destroy()
in_feat.Destroy()
in_feat = in_layer.GetNextFeature()
#############################################################################
# Cleanup
shp_ds.Destroy()
in_ds.Destroy()
|
the-stack_106_13711
|
import pathlib
import os
import logging
logging = logging.getLogger(__name__)
def write_bytes_to_file(path, bytes):
logging.info("Writing %d bytes to file %s", len(bytes), path)
dir = os.path.dirname(path)
pathlib.Path(dir).mkdir(parents=True, exist_ok=True)
with open(path, "wb") as f:
f.write(bytes)
def write_string_to_file(path, s):
logging.info("Writing %d chars to file %s", len(s), path)
dir = os.path.dirname(path)
pathlib.Path(dir).mkdir(parents=True, exist_ok=True)
with open(path, "w") as f:
f.write(s)
|
the-stack_106_13712
|
import os
import datetime
import traceback
import pandas as pd
import StockAnalysisSystem.core.api as sasApi
from StockAnalysisSystem.core.api_util import ensure_dir
from StockAnalysisSystem.core.Utility.TagsLib import Tags
from StockAnalysisSystem.core.Utility.event_queue import Event
from StockAnalysisSystem.core.SubServiceManager import SubServiceContext
from StockAnalysisSystem.core.Utility.relative_import import RelativeImport
with RelativeImport(__file__):
from StockMemoService.StockMemo import StockMemo
from StockMemoService.BlackList import BlackList
# try:
# from StockMemo.StockMemo import StockMemo
# from StockMemo.BlackList import BlackList
# except Exception as e:
# root_path = os.path.dirname(os.path.abspath(__file__))
# os.sys.path.append(root_path)
#
# from StockMemo.StockMemo import StockMemo
# from StockMemo.BlackList import BlackList
# finally:
# pass
# ----------------------------------------------------------------------------------------------------------------------
class StockMemoService:
def __init__(self, sas_api: sasApi, memo_path: str):
self.__sas_api: sasApi = sas_api
self.__memo_path: str = memo_path
print('==> Init stock memo with path: ' + memo_path)
self.__stock_tags = Tags(os.path.join(memo_path, 'tags.json'))
self.__stock_memo = StockMemo(os.path.join(memo_path, 'stock_memo.csv'))
self.__black_list = BlackList(self.__stock_memo, self.__stock_tags)
def update_root_path(self, root_path: str):
self.__stock_tags.load(os.path.join(root_path, 'tags.json'))
self.__stock_memo.raw_record().load(os.path.join(root_path, 'stock_memo.csv'))
def register_sys_call(self):
# Stock memo
self.__sas_api.register_sys_call('stock_memo_save', self.__stock_memo.stock_memo_save, group='stock_memo')
self.__sas_api.register_sys_call('stock_memo_load', self.__stock_memo.stock_memo_load, group='stock_memo')
self.__sas_api.register_sys_call('stock_memo_filter_record', self.__stock_memo.stock_memo_filter_record, group='stock_memo')
self.__sas_api.register_sys_call('stock_memo_get_record', self.__stock_memo.stock_memo_get_record, group='stock_memo')
self.__sas_api.register_sys_call('stock_memo_add_record', self.__stock_memo.stock_memo_add_record, group='stock_memo')
self.__sas_api.register_sys_call('stock_memo_update_record', self.__stock_memo.stock_memo_update_record, group='stock_memo')
self.__sas_api.register_sys_call('stock_memo_delete_record', self.__stock_memo.stock_memo_delete_record, group='stock_memo')
self.__sas_api.register_sys_call('stock_memo_get_all_security', self.__stock_memo.stock_memo_get_all_security, group='stock_memo')
# Stock memo tags
self.__sas_api.register_sys_call('stock_memo_save_tags', self.__stock_tags.save, group='stock_memo_tags')
self.__sas_api.register_sys_call('stock_memo_load_tags', self.__stock_tags.load, group='stock_memo_tags')
self.__sas_api.register_sys_call('stock_memo_all_tags', self.__stock_tags.all_tags, group='stock_memo_tags')
self.__sas_api.register_sys_call('stock_memo_all_securities', self.__stock_tags.all_objs, group='stock_memo_tags')
self.__sas_api.register_sys_call('stock_memo_tags_of_securities', self.__stock_tags.tags_of_objs, group='stock_memo_tags')
self.__sas_api.register_sys_call('stock_memo_securities_from_tags', self.__stock_tags.objs_from_tags, group='stock_memo_tags')
self.__sas_api.register_sys_call('stock_memo_set_security_tags', self.__stock_tags.set_obj_tags, group='stock_memo_tags')
# Black list
self.__sas_api.register_sys_call('save_black_list', self.__black_list.save_black_list, group='black_list')
self.__sas_api.register_sys_call('in_black_list', self.__black_list.in_black_list, group='black_list')
self.__sas_api.register_sys_call('all_black_list', self.__black_list.all_black_list, group='black_list')
self.__sas_api.register_sys_call('add_to_black_list', self.__black_list.add_to_black_list, group='black_list')
self.__sas_api.register_sys_call('remove_from_black_list', self.__black_list.remove_from_black_list, group='black_list')
self.__sas_api.register_sys_call('get_black_list_data', self.__black_list.get_black_list_data, group='black_list')
self.__sas_api.register_sys_call('reload_black_list_data', self.__black_list.reload_black_list_data, group='black_list')
# ----------------------------------------------------------------------------------------------------------------------
def plugin_prob() -> dict:
return {
'plugin_id': 'bd9a7d9f-dbcc-4dc8-8992-16dac9191ff9',
'plugin_name': 'Stock Memo',
'plugin_version': '0.0.0.1',
'tags': ['stock_memo', 'Sleepy'],
}
def plugin_adapt(method: str) -> bool:
return method in ['bd9a7d9f-dbcc-4dc8-8992-16dac9191ff9']
def plugin_capacities() -> list:
return ['api']
# ----------------------------------------------------------------------------------------------------------------------
stockMemoService: StockMemoService = None
subServiceContext: SubServiceContext = None
def init(sub_service_context: SubServiceContext) -> bool:
try:
global stockMemoService
global subServiceContext
subServiceContext = sub_service_context
default_memo_path = os.path.join(os.getcwd(), 'StockMemo')
memo_path = subServiceContext.sas_api.config().get('memo_path', default_memo_path)
if not ensure_dir(memo_path):
print('Check stock memo dir fail.')
assert False
stockMemoService = StockMemoService(subServiceContext.sas_api, memo_path)
stockMemoService.register_sys_call()
except Exception as e:
import traceback
print('Plugin-in init error: ' + str(e))
print(traceback.format_exc())
finally:
pass
return True
def startup() -> bool:
return True
|
the-stack_106_13714
|
#!/usr/bin/env python
#
# slack_utils documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import slack_utils
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Slack Utils'
copyright = "2020, mac"
author = "mac"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = slack_utils.__version__
# The full version, including alpha/beta/rc tags.
release = slack_utils.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'slack_utilsdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'slack_utils.tex',
'Slack Utils Documentation',
'mac', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'slack_utils',
'Slack Utils Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'slack_utils',
'Slack Utils Documentation',
author,
'slack_utils',
'One line description of project.',
'Miscellaneous'),
]
|
the-stack_106_13715
|
import logging
from cobiv.modules.core.book.sqlite.sqlite_book_manager import SqliteBookManager
from cobiv.modules.core.session.session import Session
from cobiv.modules.database.datasources.datasource import Datasource
from cobiv.modules.database.sqlitedb.search.searchmanager import SearchManager
from cobiv.modules.database.sqlitedb.sqlitedb import SqliteCursor
from cobiv.modules.database.sqlitedb.sqlitesetmanager import SqliteSetManager
from cobiv.modules.core.session.cursor import Cursor, CursorInterface
logging.basicConfig(level=logging.DEBUG)
import sqlite3
import unittest
from functools import partial
from kivy.app import App
from kivy.clock import Clock
from kivy.uix.widget import Widget
class TestMainWidget(Widget):
def execute_cmd(self, *args):
pass
class TestDatasource(Datasource):
def create_connection(self):
conn = sqlite3.connect(':memory:', check_same_thread=False)
conn.row_factory = sqlite3.Row
c = conn.execute('PRAGMA temp_store = MEMORY')
c.execute('PRAGMA locking_mode = EXCLUSIVE')
fd = open('resources/sql/sqlite_db.sql')
c.executescript(fd.read())
fd.close()
c.execute("insert into catalog (name) values(?) ", ("default",))
c.execute('insert into repository (catalog_key,path,recursive) values (?,?,?)',
('default', 'memory', 0))
c.execute('create temporary table marked (file_key int)')
c.execute('create temporary table current_set as select * from set_detail where 1=2')
# fill values
c.execute('insert into catalog (name) values (?)', ('test',))
c.execute('insert into repository (catalog_key,path,recursive) values (?,?,?)', (c.lastrowid, '/', True))
repo_key = c.lastrowid
data = [(repo_key, 'file{}.png'.format(i), 1, 'file') for i in range(10000)]
c.executemany('insert into file (repo_key,name,searchable,file_type) values (?,?,?,?)', data)
return conn
ds = TestDatasource()
class TestApp(App):
configuration = {
'thumbnails.path': ''
}
def build(self):
self.session = Session()
self.datasource = ds
self.set_manager = SqliteSetManager()
return TestMainWidget()
def get_config_value(self, key, default=""):
if key in self.configuration:
return self.configuration[key]
else:
return default
def lookup(self, name, category):
if name == "session":
return self.session
elif name == "sqlite_ds":
return self.datasource
elif name == "sqliteSetManager":
return self.set_manager
else:
return None
def fire_event(self, *args, **kwargs):
pass
class SQLiteCursorTest(unittest.TestCase):
def setUp(self):
self.search_manager = SearchManager()
self.conn = ds.get_connection()
self.clear_data()
def clear_data(self):
with self.conn:
self.conn.execute('delete from set_head').execute('delete from set_detail')
self.conn.execute('delete from file_map').execute('delete from file where file_type="book"')
def _create_set_mgr(self):
self.search_manager.ready()
mgr = SqliteBookManager()
mgr.ready()
mgr.set_manager.ready()
mgr.set_manager.regenerate_default()
return mgr
def _test_initialization(self, app, *args):
self._create_set_mgr()
app.stop()
def _test_add_book(self, app, *args):
mgr = self._create_set_mgr()
set_mgr = mgr.set_manager
with self.conn:
set_mgr.query_to_current_set("select id from file where rowid=2")
book1_id=mgr.create_book("book1")
self.assertEqual(1, self.conn.execute(
'select count(*) from file_map').fetchone()[0])
self.assertEqual(book1_id,self.conn.execute('select id from file where name=?',('book1',)).fetchone()[0])
self.assertEqual(1, self.conn.execute(
'select count(*) from file where name=?', ('book1',)).fetchone()[0])
self.assertEqual(1, self.conn.execute(
'select count(*) from set_detail,file where set_detail.file_key=file.id and file.name=?',
('book1',)).fetchone()[0])
set_mgr.query_to_current_set("select id from file where rowid between 7 and 8")
book7_id=mgr.create_book("book7")
self.assertEqual(3, self.conn.execute(
'select count(*) from file_map').fetchone()[0])
self.assertEqual(1, self.conn.execute(
'select count(*) from file where name=?', ('book7',)).fetchone()[0])
self.assertEqual(1, self.conn.execute(
'select count(*) from set_detail,file where set_detail.file_key=file.id and file.name=?',
('book7',)).fetchone()[0])
self.assertEqual(book7_id,self.conn.execute('select id from file where name=?',('book7',)).fetchone()[0])
self.assertNotEqual(book1_id,book7_id)
app.stop()
def _test_remove_book(self, app, *args):
mgr = self._create_set_mgr()
set_mgr = mgr.set_manager
with self.conn:
set_mgr.query_to_current_set("select id from file where rowid=2")
book1_id = mgr.create_book("book1")
self.assertEqual(1, self.conn.execute(
'select count(*) from file_map').fetchone()[0])
self.assertEqual(1, self.conn.execute(
'select count(*) from file where name=?', ('book1',)).fetchone()[0])
mgr.delete_book(book1_id)
self.assertEqual(0, self.conn.execute(
'select count(*) from file_map').fetchone()[0])
self.assertEqual(0, self.conn.execute(
'select count(*) from file where name=?', ('book1',)).fetchone()[0])
set_mgr.query_to_current_set("select id from file where rowid=2")
book1_id = mgr.create_book("book1")
set_mgr.query_to_current_set("select id from file where rowid between 7 and 8")
book7_id = mgr.create_book("book7")
mgr.delete_book(book1_id)
mgr.delete_book(book1_id)
self.assertEqual(2, self.conn.execute(
'select count(*) from file_map').fetchone()[0])
self.assertEqual(0, self.conn.execute(
'select count(*) from file where name=?', ('book1',)).fetchone()[0])
self.assertEqual(1, self.conn.execute(
'select count(*) from file where name=?', ('book7',)).fetchone()[0])
mgr.delete_book(book7_id)
self.assertEqual(0, self.conn.execute(
'select count(*) from file_map').fetchone()[0])
self.assertEqual(0, self.conn.execute(
'select count(*) from file where name=?', ('book7',)).fetchone()[0])
app.stop()
def _test_open_book(self, app, *args):
mgr = self._create_set_mgr()
set_mgr = mgr.set_manager
with self.conn:
set_mgr.query_to_current_set("select id from file where rowid between 20 and 70")
book1_id = mgr.create_book("book1")
set_mgr.query_to_current_set("select id from file where rowid between 100 and 300")
book2_id = mgr.create_book("book2")
set_mgr.query_to_current_set("select id from file where rowid = 1 ")
mgr.open_book(book1_id)
self.assertEqual(51, self.conn.execute('select count(*) from current_set').fetchone()[0])
mgr.open_book(book2_id)
self.assertEqual(201, self.conn.execute('select count(*) from current_set').fetchone()[0])
mgr.open_book(book1_id)
self.assertEqual(51, self.conn.execute('select count(*) from current_set').fetchone()[0])
app.stop()
def _test_read_tags(self, app, *args):
c=Cursor()
mgr = self._create_set_mgr()
set_mgr = mgr.set_manager
with self.conn:
set_mgr.query_to_current_set("select id from file order by id")
self.assertEqual(1, self.conn.execute('select file_key from current_set where position=0').fetchone()[0])
self.assertNotEqual(0,self.conn.execute('select count(*) from current_set').fetchone()[0])
row=self.conn.execute('select rowid, * from current_set where position=1').fetchone()
c.set_implementation(SqliteCursor(row=row, backend=self.conn, search_manager=self.search_manager))
self.assertEqual('file',c.get_tag(0,'file_type',0))
set_mgr.query_to_current_set("select id from file where rowid between 20 and 70")
book1_id = mgr.create_book("book1")
self.assertEqual(10001, book1_id)
self.assertEqual(10001, self.conn.execute('select count(*) from file').fetchone()[0])
self.assertEqual("book1", self.conn.execute('select name from file where id=10001').fetchone()[0])
set_mgr.query_to_current_set("select id from file order by id")
self.assertEqual(10001, self.conn.execute('select count(*) from current_set').fetchone()[0])
self.assertEqual(10000, self.conn.execute('select position from current_set where file_key=10001').fetchone()[0])
c.go_last()
self.assertEqual(book1_id,c.file_id)
self.assertEqual("book1",c.filename)
self.assertEqual('book',c.get_tag(0,'file_type',0))
app.stop()
def call_test(self, func):
a = TestApp()
p = partial(func, a)
Clock.schedule_once(p, 0.0001)
a.run()
def test_initialization(self):
self.call_test(self._test_initialization)
def test_add_book(self):
self.call_test(self._test_add_book)
def test_remove_book(self):
self.call_test(self._test_remove_book)
def test_open_book(self):
self.call_test(self._test_open_book)
def test_read_tags(self):
self.assertTrue(issubclass(SqliteCursor,CursorInterface))
self.call_test(self._test_read_tags)
if __name__ == "__main__":
unittest.main()
|
the-stack_106_13717
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
# Copyright (c) 2021 INTEL CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from nnunet.utilities.nd_softmax import softmax_helper
from torch import nn
import torch
import numpy as np
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.network_architecture.neural_network import SegmentationNetwork
import torch.nn.functional
from torch.quantization import QuantStub, DeQuantStub
from neural_compressor.adaptor.pytorch import get_torch_version, PyTorchVersionMode
class ConvDropoutNormNonlin(nn.Module):
"""
fixes a bug in ConvDropoutNormNonlin where lrelu was used regardless of nonlin. Bad.
"""
def __init__(self, input_channels, output_channels,
conv_op=nn.Conv2d, conv_kwargs=None,
norm_op=nn.BatchNorm2d, norm_op_kwargs=None,
dropout_op=nn.Dropout2d, dropout_op_kwargs=None,
nonlin=nn.LeakyReLU, nonlin_kwargs=None):
super(ConvDropoutNormNonlin, self).__init__()
if nonlin_kwargs is None:
nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
if dropout_op_kwargs is None:
dropout_op_kwargs = {'p': 0.5, 'inplace': True}
if norm_op_kwargs is None:
norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}
if conv_kwargs is None:
conv_kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1, 'dilation': 1, 'bias': True}
self.nonlin_kwargs = nonlin_kwargs
self.nonlin = nonlin
self.dropout_op = dropout_op
self.dropout_op_kwargs = dropout_op_kwargs
self.norm_op_kwargs = norm_op_kwargs
self.conv_kwargs = conv_kwargs
self.conv_op = conv_op
self.norm_op = norm_op
self.conv = self.conv_op(input_channels, output_channels, **self.conv_kwargs)
if self.dropout_op is not None and self.dropout_op_kwargs['p'] is not None and self.dropout_op_kwargs[
'p'] > 0:
self.dropout = self.dropout_op(**self.dropout_op_kwargs)
else:
self.dropout = None
self.instnorm = self.norm_op(output_channels, **self.norm_op_kwargs)
self.lrelu = self.nonlin(**self.nonlin_kwargs)
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.conv(x)
if self.dropout is not None:
x = self.dropout(x)
version = get_torch_version()
if version >= PyTorchVersionMode.PT17.value:
return self.lrelu(self.quant(self.instnorm(self.dequant(x))))
else:
return self.quant(self.lrelu(self.instnorm(self.dequant(x))))
class ConvDropoutNonlinNorm(ConvDropoutNormNonlin):
def forward(self, x):
x = self.conv(x)
if self.dropout is not None:
x = self.dropout(x)
return self.quant(self.instnorm(self.lrelu(self.dequant(x))))
class StackedConvLayers(nn.Module):
def __init__(self, input_feature_channels, output_feature_channels, num_convs,
conv_op=nn.Conv2d, conv_kwargs=None,
norm_op=nn.BatchNorm2d, norm_op_kwargs=None,
dropout_op=nn.Dropout2d, dropout_op_kwargs=None,
nonlin=nn.LeakyReLU, nonlin_kwargs=None, first_stride=None, basic_block=ConvDropoutNormNonlin):
'''
stacks ConvDropoutNormLReLU layers. initial_stride will only be applied to first layer in the stack. The other parameters affect all layers
:param input_feature_channels:
:param output_feature_channels:
:param num_convs:
:param dilation:
:param kernel_size:
:param padding:
:param dropout:
:param initial_stride:
:param conv_op:
:param norm_op:
:param dropout_op:
:param inplace:
:param neg_slope:
:param norm_affine:
:param conv_bias:
'''
self.input_channels = input_feature_channels
self.output_channels = output_feature_channels
if nonlin_kwargs is None:
nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
if dropout_op_kwargs is None:
dropout_op_kwargs = {'p': 0.5, 'inplace': True}
if norm_op_kwargs is None:
norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}
if conv_kwargs is None:
conv_kwargs = {'kernel_size': 3, 'stride': 1, 'padding': 1, 'dilation': 1, 'bias': True}
self.nonlin_kwargs = nonlin_kwargs
self.nonlin = nonlin
self.dropout_op = dropout_op
self.dropout_op_kwargs = dropout_op_kwargs
self.norm_op_kwargs = norm_op_kwargs
self.conv_kwargs = conv_kwargs
self.conv_op = conv_op
self.norm_op = norm_op
if first_stride is not None:
self.conv_kwargs_first_conv = deepcopy(conv_kwargs)
self.conv_kwargs_first_conv['stride'] = first_stride
else:
self.conv_kwargs_first_conv = conv_kwargs
super(StackedConvLayers, self).__init__()
self.blocks = nn.Sequential(
*([basic_block(input_feature_channels, output_feature_channels, self.conv_op,
self.conv_kwargs_first_conv,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,
self.nonlin, self.nonlin_kwargs)] +
[basic_block(output_feature_channels, output_feature_channels, self.conv_op,
self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,
self.nonlin, self.nonlin_kwargs) for _ in range(num_convs - 1)]))
def forward(self, x):
return self.blocks(x)
def print_module_training_status(module):
if isinstance(module, nn.Conv2d) or isinstance(module, nn.Conv3d) or isinstance(module, nn.Dropout3d) or \
isinstance(module, nn.Dropout2d) or isinstance(module, nn.Dropout) or isinstance(module, nn.InstanceNorm3d) \
or isinstance(module, nn.InstanceNorm2d) or isinstance(module, nn.InstanceNorm1d) \
or isinstance(module, nn.BatchNorm2d) or isinstance(module, nn.BatchNorm3d) or isinstance(module,
nn.BatchNorm1d):
print(str(module), module.training)
class Upsample(nn.Module):
def __init__(self, size=None, scale_factor=None, mode='nearest', align_corners=False):
super(Upsample, self).__init__()
self.align_corners = align_corners
self.mode = mode
self.scale_factor = scale_factor
self.size = size
def forward(self, x):
return nn.functional.interpolate(x, size=self.size, scale_factor=self.scale_factor, mode=self.mode,
align_corners=self.align_corners)
class Generic_UNet(SegmentationNetwork):
DEFAULT_BATCH_SIZE_3D = 2
DEFAULT_PATCH_SIZE_3D = (64, 192, 160)
SPACING_FACTOR_BETWEEN_STAGES = 2
BASE_NUM_FEATURES_3D = 30
MAX_NUMPOOL_3D = 999
MAX_NUM_FILTERS_3D = 320
DEFAULT_PATCH_SIZE_2D = (256, 256)
BASE_NUM_FEATURES_2D = 30
DEFAULT_BATCH_SIZE_2D = 50
MAX_NUMPOOL_2D = 999
MAX_FILTERS_2D = 480
use_this_for_batch_size_computation_2D = 19739648
use_this_for_batch_size_computation_3D = 520000000 # 505789440
def __init__(self, input_channels, base_num_features, num_classes, num_pool, num_conv_per_stage=2,
feat_map_mul_on_downscale=2, conv_op=nn.Conv2d,
norm_op=nn.BatchNorm2d, norm_op_kwargs=None,
dropout_op=nn.Dropout2d, dropout_op_kwargs=None,
nonlin=nn.LeakyReLU, nonlin_kwargs=None, deep_supervision=True, dropout_in_localization=False,
final_nonlin=softmax_helper, weightInitializer=InitWeights_He(1e-2), pool_op_kernel_sizes=None,
conv_kernel_sizes=None,
upscale_logits=False, convolutional_pooling=False, convolutional_upsampling=False,
max_num_features=None, basic_block=ConvDropoutNormNonlin,
seg_output_use_bias=False):
"""
basically more flexible than v1, architecture is the same
Does this look complicated? Nah bro. Functionality > usability
This does everything you need, including world peace.
Questions? -> [email protected]
"""
super(Generic_UNet, self).__init__()
self.convolutional_upsampling = convolutional_upsampling
self.convolutional_pooling = convolutional_pooling
self.upscale_logits = upscale_logits
if nonlin_kwargs is None:
nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
if dropout_op_kwargs is None:
dropout_op_kwargs = {'p': 0.5, 'inplace': True}
if norm_op_kwargs is None:
norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}
self.conv_kwargs = {'stride': 1, 'dilation': 1, 'bias': True}
self.nonlin = nonlin
self.nonlin_kwargs = nonlin_kwargs
self.dropout_op_kwargs = dropout_op_kwargs
self.norm_op_kwargs = norm_op_kwargs
self.weightInitializer = weightInitializer
self.conv_op = conv_op
self.norm_op = norm_op
self.dropout_op = dropout_op
self.num_classes = num_classes
self.final_nonlin = final_nonlin
self._deep_supervision = deep_supervision
self.do_ds = deep_supervision
if conv_op == nn.Conv2d:
upsample_mode = 'bilinear'
pool_op = nn.MaxPool2d
transpconv = nn.ConvTranspose2d
if pool_op_kernel_sizes is None:
pool_op_kernel_sizes = [(2, 2)] * num_pool
if conv_kernel_sizes is None:
conv_kernel_sizes = [(3, 3)] * (num_pool + 1)
elif conv_op == nn.Conv3d:
upsample_mode = 'trilinear'
pool_op = nn.MaxPool3d
transpconv = nn.ConvTranspose3d
if pool_op_kernel_sizes is None:
pool_op_kernel_sizes = [(2, 2, 2)] * num_pool
if conv_kernel_sizes is None:
conv_kernel_sizes = [(3, 3, 3)] * (num_pool + 1)
else:
raise ValueError("unknown convolution dimensionality, conv op: %s" % str(conv_op))
self.input_shape_must_be_divisible_by = np.prod(pool_op_kernel_sizes, 0, dtype=np.int64)
self.pool_op_kernel_sizes = pool_op_kernel_sizes
self.conv_kernel_sizes = conv_kernel_sizes
self.conv_pad_sizes = []
for krnl in self.conv_kernel_sizes:
self.conv_pad_sizes.append([1 if i == 3 else 0 for i in krnl])
if max_num_features is None:
if self.conv_op == nn.Conv3d:
self.max_num_features = self.MAX_NUM_FILTERS_3D
else:
self.max_num_features = self.MAX_FILTERS_2D
else:
self.max_num_features = max_num_features
self.conv_blocks_context = []
self.conv_blocks_localization = []
self.td = []
self.tu = []
self.seg_outputs = []
output_features = base_num_features
input_features = input_channels
for d in range(num_pool):
# determine the first stride
if d != 0 and self.convolutional_pooling:
first_stride = pool_op_kernel_sizes[d - 1]
else:
first_stride = None
self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[d]
self.conv_kwargs['padding'] = self.conv_pad_sizes[d]
# add convolutions
self.conv_blocks_context.append(StackedConvLayers(input_features, output_features, num_conv_per_stage,
self.conv_op, self.conv_kwargs, self.norm_op,
self.norm_op_kwargs, self.dropout_op,
self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs,
first_stride, basic_block=basic_block))
if not self.convolutional_pooling:
self.td.append(pool_op(pool_op_kernel_sizes[d]))
input_features = output_features
output_features = int(np.round(output_features * feat_map_mul_on_downscale))
output_features = min(output_features, self.max_num_features)
# now the bottleneck.
# determine the first stride
if self.convolutional_pooling:
first_stride = pool_op_kernel_sizes[-1]
else:
first_stride = None
# the output of the last conv must match the number of features from the skip connection if we are not using
# convolutional upsampling. If we use convolutional upsampling then the reduction in feature maps will be
# done by the transposed conv
if self.convolutional_upsampling:
final_num_features = output_features
else:
final_num_features = self.conv_blocks_context[-1].output_channels
self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[num_pool]
self.conv_kwargs['padding'] = self.conv_pad_sizes[num_pool]
self.conv_blocks_context.append(nn.Sequential(
StackedConvLayers(input_features, output_features, num_conv_per_stage - 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,
self.nonlin_kwargs, first_stride, basic_block=basic_block),
StackedConvLayers(output_features, final_num_features, 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,
self.nonlin_kwargs, basic_block=basic_block)))
# if we don't want to do dropout in the localization pathway then we set the dropout prob to zero here
if not dropout_in_localization:
old_dropout_p = self.dropout_op_kwargs['p']
self.dropout_op_kwargs['p'] = 0.0
# now lets build the localization pathway
for u in range(num_pool):
nfeatures_from_down = final_num_features
nfeatures_from_skip = self.conv_blocks_context[
-(2 + u)].output_channels # self.conv_blocks_context[-1] is bottleneck, so start with -2
n_features_after_tu_and_concat = nfeatures_from_skip * 2
# the first conv reduces the number of features to match those of skip
# the following convs work on that number of features
# if not convolutional upsampling then the final conv reduces the num of features again
if u != num_pool - 1 and not self.convolutional_upsampling:
final_num_features = self.conv_blocks_context[-(3 + u)].output_channels
else:
final_num_features = nfeatures_from_skip
if not self.convolutional_upsampling:
self.tu.append(Upsample(scale_factor=pool_op_kernel_sizes[-(u + 1)], mode=upsample_mode))
else:
self.tu.append(transpconv(nfeatures_from_down, nfeatures_from_skip, pool_op_kernel_sizes[-(u + 1)],
pool_op_kernel_sizes[-(u + 1)], bias=False))
self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[- (u + 1)]
self.conv_kwargs['padding'] = self.conv_pad_sizes[- (u + 1)]
self.conv_blocks_localization.append(nn.Sequential(
StackedConvLayers(n_features_after_tu_and_concat, nfeatures_from_skip, num_conv_per_stage - 1,
self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op,
self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs, basic_block=basic_block),
StackedConvLayers(nfeatures_from_skip, final_num_features, 1, self.conv_op, self.conv_kwargs,
self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,
self.nonlin, self.nonlin_kwargs, basic_block=basic_block)
))
for ds in range(len(self.conv_blocks_localization)):
self.seg_outputs.append(conv_op(self.conv_blocks_localization[ds][-1].output_channels, num_classes,
1, 1, 0, 1, 1, seg_output_use_bias))
self.upscale_logits_ops = []
cum_upsample = np.cumprod(np.vstack(pool_op_kernel_sizes), axis=0)[::-1]
for usl in range(num_pool - 1):
if self.upscale_logits:
self.upscale_logits_ops.append(Upsample(scale_factor=tuple([int(i) for i in cum_upsample[usl + 1]]),
mode=upsample_mode))
else:
self.upscale_logits_ops.append(lambda x: x)
if not dropout_in_localization:
self.dropout_op_kwargs['p'] = old_dropout_p
# register all modules properly
self.conv_blocks_localization = nn.ModuleList(self.conv_blocks_localization)
self.conv_blocks_context = nn.ModuleList(self.conv_blocks_context)
self.td = nn.ModuleList(self.td)
self.tu = nn.ModuleList(self.tu)
self.seg_outputs = nn.ModuleList(self.seg_outputs)
if self.upscale_logits:
self.upscale_logits_ops = nn.ModuleList(
self.upscale_logits_ops) # lambda x:x is not a Module so we need to distinguish here
if self.weightInitializer is not None:
self.apply(self.weightInitializer)
# self.apply(print_module_training_status)
self.quant = QuantStub()
self.dequant = DeQuantStub()
for u in range(len(self.tu)):
setattr(self, 'cat_quant' + str(u), QuantStub())
def forward(self, x):
skips = []
seg_outputs = []
x = self.quant(x)
for d in range(len(self.conv_blocks_context) - 1):
x = self.conv_blocks_context[d](x)
skips.append(self.dequant(x))
if not self.convolutional_pooling:
x = self.td[d](x)
x = self.conv_blocks_context[-1](x)
for u in range(len(self.tu)):
x = self.tu[u](self.dequant(x))
x = torch.cat((x, skips[-(u + 1)]), dim=1)
x = self.conv_blocks_localization[u](getattr(self, 'cat_quant' + str(u))(x))
seg_outputs.append(self.dequant(self.final_nonlin(self.seg_outputs[u](x))))
if self._deep_supervision and self.do_ds:
return tuple([seg_outputs[-1]] + [i(j) for i, j in
zip(list(self.upscale_logits_ops)[::-1], seg_outputs[:-1][::-1])])
else:
return seg_outputs[-1]
@staticmethod
def compute_approx_vram_consumption(patch_size, num_pool_per_axis, base_num_features, max_num_features,
num_modalities, num_classes, pool_op_kernel_sizes, deep_supervision=False,
conv_per_stage=2):
"""
This only applies for num_conv_per_stage and convolutional_upsampling=True
not real vram consumption. just a constant term to which the vram consumption will be approx proportional
(+ offset for parameter storage)
:param deep_supervision:
:param patch_size:
:param num_pool_per_axis:
:param base_num_features:
:param max_num_features:
:param num_modalities:
:param num_classes:
:param pool_op_kernel_sizes:
:return:
"""
if not isinstance(num_pool_per_axis, np.ndarray):
num_pool_per_axis = np.array(num_pool_per_axis)
npool = len(pool_op_kernel_sizes)
map_size = np.array(patch_size)
tmp = np.int64((conv_per_stage * 2 + 1) * np.prod(map_size, dtype=np.int64) * base_num_features +
num_modalities * np.prod(map_size, dtype=np.int64) +
num_classes * np.prod(map_size, dtype=np.int64))
num_feat = base_num_features
for p in range(npool):
for pi in range(len(num_pool_per_axis)):
map_size[pi] /= pool_op_kernel_sizes[p][pi]
num_feat = min(num_feat * 2, max_num_features)
num_blocks = (conv_per_stage * 2 + 1) if p < (npool - 1) else conv_per_stage # conv_per_stage + conv_per_stage for the convs of encode/decode and 1 for transposed conv
tmp += num_blocks * np.prod(map_size, dtype=np.int64) * num_feat
if deep_supervision and p < (npool - 2):
tmp += np.prod(map_size, dtype=np.int64) * num_classes
# print(p, map_size, num_feat, tmp)
return tmp
|
the-stack_106_13718
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from future.moves.urllib.parse import urlparse, unquote
import logging
import os
import ftplib
import time
from flexget import plugin
from flexget.event import event
from flexget.utils.pathscrub import pathscrub
from flexget.utils.template import RenderError
log = logging.getLogger('ftp')
class OutputFtp(object):
"""
Ftp Download plugin
input-url: ftp://<user>:<password>@<host>:<port>/<path to file>
Example: ftp://anonymous:[email protected]:21/torrent-files-dir
config:
ftp_download:
use-ssl: <True/False>
ftp_tmp_path: <path>
delete_origin: <True/False>
download_empty_dirs: <True/False>
TODO:
- Resume downloads
- create banlists files
- validate connection parameters
"""
schema = {
'type': 'object',
'properties': {
'use-ssl': {'type': 'boolean', 'default': False},
'ftp_tmp_path': {'type': 'string', 'format': 'path'},
'delete_origin': {'type': 'boolean', 'default': False},
'download_empty_dirs': {'type': 'boolean', 'default': False},
},
'additionalProperties': False,
}
def prepare_config(self, config, task):
config.setdefault('use-ssl', False)
config.setdefault('delete_origin', False)
config.setdefault('ftp_tmp_path', os.path.join(task.manager.config_base, 'temp'))
config.setdefault('download_empty_dirs', False)
return config
def ftp_connect(self, config, ftp_url, current_path):
if config['use-ssl']:
ftp = ftplib.FTP_TLS()
else:
ftp = ftplib.FTP()
# ftp.set_debuglevel(2)
log.debug("Connecting to " + ftp_url.hostname)
ftp.connect(ftp_url.hostname, ftp_url.port)
ftp.login(ftp_url.username, ftp_url.password)
if config['use-ssl']:
ftp.prot_p()
ftp.sendcmd('TYPE I')
ftp.set_pasv(True)
log.debug("Changing directory to: " + current_path)
ftp.cwd(current_path)
return ftp
def check_connection(self, ftp, config, ftp_url, current_path):
try:
ftp.voidcmd("NOOP")
except (IOError, ftplib.Error):
ftp = self.ftp_connect(config, ftp_url, current_path)
return ftp
def on_task_download(self, task, config):
config = self.prepare_config(config, task)
for entry in task.accepted:
ftp_url = urlparse(entry.get('url'))
ftp_url = ftp_url._replace(path=unquote(ftp_url.path))
current_path = os.path.dirname(ftp_url.path)
try:
ftp = self.ftp_connect(config, ftp_url, current_path)
except ftplib.all_errors as e:
entry.fail("Unable to connect to server : %s" % (e))
break
to_path = config['ftp_tmp_path']
try:
to_path = entry.render(to_path)
except RenderError as err:
raise plugin.PluginError(
"Path value replacement `%s` failed: %s" % (to_path, err.args[0])
)
# Clean invalid characters with pathscrub plugin
to_path = pathscrub(to_path)
if not os.path.exists(to_path):
log.debug("Creating base path: %s" % to_path)
os.makedirs(to_path)
if not os.path.isdir(to_path):
raise plugin.PluginWarning("Destination `%s` is not a directory." % to_path)
file_name = os.path.basename(ftp_url.path)
try:
# Directory
ftp = self.check_connection(ftp, config, ftp_url, current_path)
ftp.cwd(file_name)
self.ftp_walk(ftp, os.path.join(to_path, file_name), config, ftp_url, ftp_url.path)
ftp = self.check_connection(ftp, config, ftp_url, current_path)
ftp.cwd('..')
if config['delete_origin']:
ftp.rmd(file_name)
except ftplib.error_perm:
# File
self.ftp_down(ftp, file_name, to_path, config, ftp_url, current_path)
ftp.close()
def on_task_output(self, task, config):
"""Count this as an output plugin."""
def ftp_walk(self, ftp, tmp_path, config, ftp_url, current_path):
log.debug("DIR->" + ftp.pwd())
log.debug("FTP tmp_path : " + tmp_path)
try:
ftp = self.check_connection(ftp, config, ftp_url, current_path)
dirs = ftp.nlst(ftp.pwd())
except ftplib.error_perm as ex:
log.info("Error %s" % ex)
return ftp
if not dirs:
if config['download_empty_dirs']:
os.mkdir(tmp_path)
else:
log.debug("Empty directory, skipping.")
return ftp
for file_name in (path for path in dirs if path not in ('.', '..')):
file_name = os.path.basename(file_name)
try:
ftp = self.check_connection(ftp, config, ftp_url, current_path)
ftp.cwd(file_name)
if not os.path.isdir(tmp_path):
os.mkdir(tmp_path)
log.debug("Directory %s created" % tmp_path)
ftp = self.ftp_walk(
ftp,
os.path.join(tmp_path, os.path.basename(file_name)),
config,
ftp_url,
os.path.join(current_path, os.path.basename(file_name)),
)
ftp = self.check_connection(ftp, config, ftp_url, current_path)
ftp.cwd('..')
if config['delete_origin']:
ftp.rmd(os.path.basename(file_name))
except ftplib.error_perm:
ftp = self.ftp_down(
ftp, os.path.basename(file_name), tmp_path, config, ftp_url, current_path
)
ftp = self.check_connection(ftp, config, ftp_url, current_path)
return ftp
def ftp_down(self, ftp, file_name, tmp_path, config, ftp_url, current_path):
log.debug("Downloading %s into %s" % (file_name, tmp_path))
if not os.path.exists(tmp_path):
os.makedirs(tmp_path)
local_file = open(os.path.join(tmp_path, file_name), 'a+b')
ftp = self.check_connection(ftp, config, ftp_url, current_path)
try:
ftp.sendcmd("TYPE I")
file_size = ftp.size(file_name)
except Exception:
file_size = 1
max_attempts = 5
size_at_last_err = 0
log.info("Starting download of %s into %s" % (file_name, tmp_path))
while file_size > local_file.tell():
try:
if local_file.tell() != 0:
ftp = self.check_connection(ftp, config, ftp_url, current_path)
ftp.retrbinary('RETR %s' % file_name, local_file.write, local_file.tell())
else:
ftp = self.check_connection(ftp, config, ftp_url, current_path)
ftp.retrbinary('RETR %s' % file_name, local_file.write)
except Exception as error:
if max_attempts != 0:
if size_at_last_err == local_file.tell():
# Nothing new was downloaded so the error is most likely connected to the resume functionality.
# Delete the downloaded file and try again from the beginning.
local_file.close()
os.remove(os.path.join(tmp_path, file_name))
local_file = open(os.path.join(tmp_path, file_name), 'a+b')
max_attempts -= 1
size_at_last_err = local_file.tell()
log.debug("Retrying download after error %s" % error.args[0])
# Short timeout before retry.
time.sleep(1)
else:
log.error("Too many errors downloading %s. Aborting." % file_name)
break
local_file.close()
if config['delete_origin']:
ftp = self.check_connection(ftp, config, ftp_url, current_path)
ftp.delete(file_name)
return ftp
@event('plugin.register')
def register_plugin():
plugin.register(OutputFtp, 'ftp_download', api_ver=2)
|
the-stack_106_13725
|
import os
from collections import Counter
from rex.utils.io import dump_json, load_json, dump_line_json
from rex.utils.position import find_all_positions
def convert_data(dataset_name, filepath):
data = load_json(filepath)
final_data = []
lens = []
for ins_idx, ins in enumerate(data):
ins["text"] = ins["text"].replace(" ", "")
tokens = list(ins["text"])
lens.append(len(tokens))
d = {
"id": f"{dataset_name.upper()}.{ins_idx}",
"tokens": tokens, # char tokenize
"entities": [],
"relations": [],
}
for head, rel, tail in ins["triple_list"]:
head = head.replace(" ", "")
tail = tail.replace(" ", "")
try:
head_pos = find_all_positions(tokens, list(head))
tail_pos = find_all_positions(tokens, list(tail))
except KeyboardInterrupt:
raise KeyboardInterrupt
except Exception:
continue
if not head_pos or not tail_pos:
continue
head_pos = head_pos[0]
tail_pos = tail_pos[0]
head_ent = ["ENTITY", *head_pos, head]
if head_ent not in d["entities"]:
d["entities"].append(head_ent)
tail_ent = ["ENTITY", *tail_pos, tail]
if tail_ent not in d["entities"]:
d["entities"].append(tail_ent)
relation = rel
rels.add(relation)
d["relations"].append(
[
relation,
d["entities"].index(head_ent),
d["entities"].index(tail_ent),
[head_ent[3], tail_ent[3]],
]
)
final_data.append(d)
print(f"len of {dataset_name}:", len(final_data), final_data[:2])
dump_line_json(final_data, f"formatted/{dataset_name}.linejson")
len_counter = Counter(lens)
print(max(lens), len_counter.most_common())
if __name__ == "__main__":
os.makedirs("formatted", exist_ok=True)
rels = set()
for dn in ["train", "dev", "test"]:
convert_data(dn, f"raw/{dn}_triples.json")
dump_json(
{rel: idx for idx, rel in enumerate(rels)}, "formatted/rel2id.json", indent=2
)
|
the-stack_106_13726
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# Power by viekie2017-09-03 16:40:56
from collections import Counter
from operator import itemgetter as itemgtr
import jieba
import file_op_utils as foutils
import pdb
class WordCounter():
def __init__(self, text_list):
self.text_list = text_list
self.stop_word = self.Get_Stop_Words()
self.count_res = None
self.Word_Count(self.text_list)
def Get_Stop_Words(self):
ret = []
ret = foutils.read_pickle('./static/stop_words.pkl')
return ret
def Word_Count(self, text_list, cut_all=False):
flted_word_list = []
count = 0
pdb.set_trace()
for line in text_list:
res = jieba.cut(line, cut_all=cut_all)
res = list(res)
text_list[count] = res
count += 1
flted_word_list += res
self.count_res = MultiCounter(flted_word_list)
for word in flted_word_list:
try:
self.count_res.pop(word)
except Exception:
pass
class MultiCounter(Counter):
def __init__(self, element_list):
super().__init__(element_list)
def larger_than(self, minvalue, ret='list'):
temp = sorted(self.items(), key=itemgtr(1), reverse=True)
low = 0
high = len(temp)
while(high - low > 1):
mid = (low + high) >> 1
if temp[mid][1] >= minvalue:
low = mid
else:
high = mid
if temp[low][1] < minvalue:
if ret == 'dict':
return {}
else:
return []
if ret == 'dict':
ret_data = {}
for ele, count in temp[:high]:
ret_data[ele] = count
return ret_data
else:
return temp[:high]
def less_than(self, minvalue, ret='list'):
temp = sorted(self.items(), key=itemgtr(1))
low = 0
high = len(temp)
while(high - low > 1):
mid = (high + low) >> 1
if temp[mid][1] <= minvalue:
low = mid
else:
high = mid
if temp[low][1] < minvalue:
if ret == 'dict':
return {}
else:
return []
if ret == 'dict':
ret_data = {}
for ele, count in temp[:high]:
ret_data[ele] = count
return ret_data
else:
return temp[:high]
if __name__ == '__main__':
data = ['我是中国人,爱吃中国菜',
'你是日本人,滚回日本岛']
wc = WordCounter(data)
c = wc.count_res
print(c)
print(sum(c.values()))
|
the-stack_106_13731
|
from django import forms
from .models import Post
class PostForm(forms.ModelForm):
class Meta:
model=Post
fields=('post_title', 'image', 'post_text', 'post_author')
exclude = ('post_author',)
def __init__(self, *args, **kwargs):
super(PostForm,self).__init__(*args,**kwargs)
self.fields['post_title'].widget.attrs.update({'class':'form-control','placeholder':'Enter title (60 characters max)'})
self.fields['image'].widget.attrs.update({'class':'form-control','placeholder':'Upload image'})
self.fields['post_text'].widget.attrs.update({'class':'form-control','placeholder':'Write your post...'})
|
the-stack_106_13734
|
import pytest
from numpy.testing import assert_allclose
import chainladder as cl
@pytest.fixture
def atol():
return 1e-5
data = ['RAA', 'ABC', 'GenIns', 'MW2008', 'MW2014']
@pytest.mark.parametrize('data', data)
def test_benktander_to_chainladder(data, atol):
tri = cl.load_dataset(data)
a = cl.Chainladder().fit(tri).ibnr_
b = cl.Benktander(apriori=.8, n_iters=255).fit(tri, sample_weight=a).ibnr_
assert_allclose(a.triangle, b.triangle, atol=atol)
def test_bf_eq_cl_when_using_cl_apriori():
cl_ult = cl.Chainladder().fit(cl.load_dataset('quarterly')).ultimate_
cl_ult.rename('development', ['apriori'])
bf_ult = cl.BornhuetterFerguson().fit(cl.load_dataset('quarterly'),
sample_weight=cl_ult).ultimate_
assert_allclose(cl_ult.triangle, bf_ult.triangle, atol=1e-5)
|
the-stack_106_13736
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from fairseq.tasks import register_task
from fairseq.tasks.translation import TranslationTask
from .bert_dictionary_NAR import BertDictionaryNAR
from .prophetnet_NAR_generator import ProphetNetNARSequenceGenerator
@register_task('translation_prophetnet_nar')
class TranslationProphetnetNARTask(TranslationTask):
def __init__(self, args, src_dict, tgt_dict):
super().__init__(args, src_dict, tgt_dict)
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument('data', help='colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner')
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
help='source language')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
help='target language')
parser.add_argument('--lazy-load', action='store_true',
help='load the dataset lazily')
parser.add_argument('--raw-text', action='store_true',
help='load raw text dataset')
parser.add_argument('--load-alignments', action='store_true',
help='load the binarized alignments')
parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL',
help='pad the source on the left')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',
help='pad the target on the left')
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence')
parser.add_argument('--upsample-primary', default=1, type=int,
help='amount to upsample primary dataset')
parser.add_argument('--truncate-source', default=False, action='store_true',
help='boolean to truncate source to max-source-positions')
parser.add_argument('--language-model-path', default=None,
help='source language')
parser.add_argument('--lm-weight', default=0.5, type=float,
help='prob = lm_weight * prob_lm + (1-lm_weight) * prob_decoder')
# fmt: on
@classmethod
def load_dictionary(cls, filename):
return BertDictionaryNAR.load_from_file(filename)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.args.max_source_positions, self.args.max_target_positions)
def build_generator(self, args):
return ProphetNetNARSequenceGenerator(
self.target_dictionary,
beam_size=getattr(args, 'beam', 5),
max_len_a=getattr(args, 'max_len_a', 0),
max_len_b=getattr(args, 'max_len_b', 200),
min_len=getattr(args, 'min_len', 1),
normalize_scores=(not getattr(args, 'unnormalized', False)),
len_penalty=getattr(args, 'lenpen', 1),
unk_penalty=getattr(args, 'unkpen', 0),
sampling=getattr(args, 'sampling', False),
sampling_topk=getattr(args, 'sampling_topk', -1),
sampling_topp=getattr(args, 'sampling_topp', -1.0),
temperature=getattr(args, 'temperature', 1.),
diverse_beam_groups=getattr(args, 'diverse_beam_groups', -1),
diverse_beam_strength=getattr(args, 'diverse_beam_strength', 0.5),
match_source_len=getattr(args, 'match_source_len', False),
no_repeat_ngram_size=getattr(args, 'no_repeat_ngram_size', 0),
)
|
the-stack_106_13739
|
# the goal is to calculate function f such that dy/dt = f(y)
# here y is represented as a stream (y(0), y(dt), y(2*dt), ...)
# and we have the boundary condition of y(0) = y0
#
# to calulate y, we rely on two equations
# dy = f(y), y(n*dt) = y(0) + cumsum(dy) * dt
# above we use dy to abbreviate dy/dt in the following text
#
# the difficulty in stream processing is that dy and y depend on each other
# in our solution, in y's definition, we explicitly put dy inside lambda, so that it is lazily evaulated
# this is quite similar to sin/cos power series example
# if we use y = integral(dy, y0, dt), then both defnition of y and dy will depend on each other, which is illegal
import math
from typing import Callable
from sicp352_prime_number import InfStream
def solve(f: Callable[[float], float], y0: float, dt: float):
y = InfStream(y0, lambda: InfStream.add(y, dy.scale(dt)))
dy = InfStream.map(f, y)
return y
def test():
def f(y): return y
precision = 1e-4
y = solve(f, 1, precision)
e1 = y.nth_value(int(1/precision))
print('e precise: %.14f' % math.e)
print('e estimate: %.14f' % e1)
assert abs(math.e-e1) < 1e-3
if __name__ == '__main__':
test()
|
the-stack_106_13740
|
# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import re
import logging
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool2):
"""
Tool info for ABC: A System for Sequential Synthesis and Verification
URL: https://people.eecs.berkeley.edu/~alanmi/abc/
"""
def executable(self, tool_locator):
return tool_locator.find_executable("abc", subdir="bin")
def name(self):
return "ABC"
def cmdline(self, executable, options, task, rlimits):
return [executable] + options + [task.single_input_file]
def get_value_from_output(self, output, identifier):
# search for the identifier in the output and return the number after it
# the number can be an integer, a decimal, or a scientific notation
# warn if there are repeated matches (multiple statistics from sequential analysis?)
regex_integer = r"(\d+)"
regex_decimal = r"(\d+\.\d*|\d*\.\d+)"
regex_scinote = r"(\d\.?\d*[Ee][+\-]?\d+)"
regex_pattern = (
re.escape(identifier)
+ r"\s*[:=]\s*(-?("
+ regex_integer
+ r"|"
+ regex_decimal
+ r"|"
+ regex_scinote
+ r"))(\s|$)"
)
regex = re.compile(regex_pattern)
match = None
for line in output:
result = regex.search(line)
if result:
if match is None:
match = result.group(1)
else:
logging.warning(
"skipping repeated matches for identifier '%s': '%s'",
identifier,
line,
)
return match
|
the-stack_106_13741
|
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/EvidenceReport
Release: R5
Version: 4.5.0
Build ID: 0d95498
Last updated: 2021-04-03T00:34:11.075+00:00
"""
from pydantic.validators import bytes_validator # noqa: F401
from fhir.resources import fhirtypes # noqa: F401
from fhir.resources import evidencereport
def impl_evidencereport_1(inst):
assert inst.id == "example"
assert inst.meta.tag[0].code == "HTEST"
assert inst.meta.tag[0].display == "test health data"
assert (
inst.meta.tag[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActReason"
)
assert inst.status == "draft"
assert inst.subject.note[0].text == "This is just an example."
assert inst.text.div == (
'<div xmlns="http://www.w3.org/1999/xhtml">[Put rendering ' "here]</div>"
)
assert inst.text.status == "generated"
def test_evidencereport_1(base_settings):
"""No. 1 tests collection for EvidenceReport.
Test File: evidencereport-example.json
"""
filename = base_settings["unittest_data_dir"] / "evidencereport-example.json"
inst = evidencereport.EvidenceReport.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "EvidenceReport" == inst.resource_type
impl_evidencereport_1(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "EvidenceReport" == data["resourceType"]
inst2 = evidencereport.EvidenceReport(**data)
impl_evidencereport_1(inst2)
|
the-stack_106_13743
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# create reader and extract the velocity and temperature
reader = vtk.vtkPNGReader()
reader.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/vtk.png")
quant = vtk.vtkImageQuantizeRGBToIndex()
quant.SetInputConnection(reader.GetOutputPort())
quant.SetNumberOfColors(32)
i2pd = vtk.vtkImageToPolyDataFilter()
i2pd.SetInputConnection(quant.GetOutputPort())
i2pd.SetLookupTable(quant.GetLookupTable())
i2pd.SetColorModeToLUT()
i2pd.SetOutputStyleToPolygonalize()
i2pd.SetError(0)
i2pd.DecimationOn()
i2pd.SetDecimationError(0.0)
i2pd.SetSubImageSize(25)
#Need a triangle filter because the polygons are complex and concave
tf = vtk.vtkTriangleFilter()
tf.SetInputConnection(i2pd.GetOutputPort())
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(tf.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# Create graphics stuff
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
ren1.AddActor(actor)
ren1.SetBackground(1,1,1)
renWin.SetSize(300,250)
acamera = vtk.vtkCamera()
acamera.SetClippingRange(343.331,821.78)
acamera.SetPosition(-139.802,-85.6604,437.485)
acamera.SetFocalPoint(117.424,106.656,-14.6)
acamera.SetViewUp(0.430481,0.716032,0.549532)
acamera.SetViewAngle(30)
ren1.SetActiveCamera(acamera)
iren.Initialize()
# prevent the tk window from showing up then start the event loop
# --- end of script --
|
the-stack_106_13747
|
# Copyright (c) 2018 Mycroft AI, Inc.
#
# This file is part of Mycroft Skills Manager
# (see https://github.com/MatthewScholefield/mycroft-light).
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import time
import logging
from functools import wraps
from glob import glob
from multiprocessing.pool import ThreadPool
from os.path import expanduser, join, dirname, isdir
from typing import Dict, List
from msm import GitException
from msm.exceptions import (MsmException, SkillNotFound, MultipleSkillMatches,
AlreadyInstalled)
from msm.skill_entry import SkillEntry
from msm.skill_repo import SkillRepo
from msm.skills_data import (build_skill_entry, get_skill_entry,
write_skills_data, load_skills_data,
skills_data_hash)
from msm.util import MsmProcessLock
LOG = logging.getLogger(__name__)
CURRENT_SKILLS_DATA_VERSION = 1
def save_skills_data(func):
@wraps(func)
def func_wrapper(self, *args, **kwargs):
will_save = False
if not self.saving_handled:
will_save = self.saving_handled = True
try:
ret = func(self, *args, **kwargs)
# Write only if no exception occurs
if will_save:
self.write_skills_data()
finally:
# Always restore saving_handled flag
if will_save:
self.saving_handled = False
return ret
return func_wrapper
class MycroftSkillsManager(object):
SKILL_GROUPS = {'default', 'mycroft_mark_1', 'picroft', 'kde'}
DEFAULT_SKILLS_DIR = "/opt/mycroft/skills"
def __init__(self, platform='default', skills_dir=None, repo=None,
versioned=True):
self.platform = platform
self.skills_dir = (
expanduser(skills_dir or '') or self.DEFAULT_SKILLS_DIR
)
self.repo = repo or SkillRepo()
self.versioned = versioned
self.lock = MsmProcessLock()
self.skills_data = None
self.saving_handled = False
self.skills_data_hash = ''
with self.lock:
self.sync_skills_data()
def __upgrade_skills_data(self, skills_data):
new = {}
if skills_data.get('version', 0) == 0:
new['blacklist'] = []
new['version'] = 1
new['skills'] = []
local_skills = [s for s in self.list() if s.is_local]
default_skills = [s.name for s in self.list_defaults()]
for skill in local_skills:
if 'origin' in skills_data.get(skill.name, {}):
origin = skills_data[skill.name]['origin']
elif skill.name in default_skills:
origin = 'default'
elif skill.url:
origin = 'cli'
else:
origin = 'non-msm'
beta = skills_data.get(skill.name, {}).get('beta', False)
entry = build_skill_entry(skill.name, origin, beta)
entry['installed'] = \
skills_data.get(skill.name, {}).get('installed') or 0
if isinstance(entry['installed'], bool):
entry['installed'] = 0
entry['update'] = \
skills_data.get(skill.name, {}).get('updated') or 0
new['skills'].append(entry)
new['upgraded'] = True
return new
def curate_skills_data(self, skills_data):
""" Sync skills_data with actual skills on disk. """
local_skills = [s for s in self.list() if s.is_local]
default_skills = [s.name for s in self.list_defaults()]
local_skill_names = [s.name for s in local_skills]
skills_data_skills = [s['name'] for s in skills_data['skills']]
# Check for skills that aren't in the list
for skill in local_skills:
if skill.name not in skills_data_skills:
if skill.name in default_skills:
origin = 'default'
elif skill.url:
origin = 'cli'
else:
origin = 'non-msm'
entry = build_skill_entry(skill.name, origin, False)
skills_data['skills'].append(entry)
# Check for skills in the list that doesn't exist in the filesystem
remove_list = []
for s in skills_data.get('skills', []):
if (s['name'] not in local_skill_names and
s['installation'] == 'installed'):
remove_list.append(s)
for skill in remove_list:
skills_data['skills'].remove(skill)
return skills_data
def load_skills_data(self) -> dict:
skills_data = load_skills_data()
if skills_data.get('version', 0) < CURRENT_SKILLS_DATA_VERSION:
skills_data = self.__upgrade_skills_data(skills_data)
else:
skills_data = self.curate_skills_data(skills_data)
return skills_data
def sync_skills_data(self):
""" Update internal skill_data_structure from disk. """
self.skills_data = self.load_skills_data()
if 'upgraded' in self.skills_data:
self.skills_data.pop('upgraded')
else:
self.skills_data_hash = skills_data_hash(self.skills_data)
def write_skills_data(self, data=None):
""" Write skills data hash if it has been modified. """
data = data or self.skills_data
if skills_data_hash(data) != self.skills_data_hash:
write_skills_data(data)
self.skills_data_hash = skills_data_hash(data)
@save_skills_data
def install(self, param, author=None, constraints=None, origin=''):
"""Install by url or name"""
if isinstance(param, SkillEntry):
skill = param
else:
skill = self.find_skill(param, author)
entry = build_skill_entry(skill.name, origin, skill.is_beta)
try:
skill.install(constraints)
entry['installed'] = time.time()
entry['installation'] = 'installed'
entry['status'] = 'active'
entry['beta'] = skill.is_beta
except AlreadyInstalled:
entry = None
raise
except MsmException as e:
entry['installation'] = 'failed'
entry['status'] = 'error'
entry['failure_message'] = repr(e)
raise
finally:
# Store the entry in the list
if entry:
self.skills_data['skills'].append(entry)
@save_skills_data
def remove(self, param, author=None):
"""Remove by url or name"""
if isinstance(param, SkillEntry):
skill = param
else:
skill = self.find_skill(param, author)
skill.remove()
skills = [s for s in self.skills_data['skills']
if s['name'] != skill.name]
self.skills_data['skills'] = skills
return
def update_all(self):
local_skills = [skill for skill in self.list() if skill.is_local]
def update_skill(skill):
entry = get_skill_entry(skill.name, self.skills_data)
if entry:
entry['beta'] = skill.is_beta
if skill.update():
if entry:
entry['updated'] = time.time()
return self.apply(update_skill, local_skills)
@save_skills_data
def update(self, skill=None, author=None):
"""Update all downloaded skills or one specified skill."""
if skill is None:
return self.update_all()
else:
if isinstance(skill, str):
skill = self.find_skill(skill, author)
entry = get_skill_entry(skill.name, self.skills_data)
if entry:
entry['beta'] = skill.is_beta
if skill.update():
# On successful update update the update value
if entry:
entry['updated'] = time.time()
@save_skills_data
def apply(self, func, skills):
"""Run a function on all skills in parallel"""
def run_item(skill):
try:
func(skill)
return True
except MsmException as e:
LOG.error('Error running {} on {}: {}'.format(
func.__name__, skill.name, repr(e)
))
return False
except:
LOG.exception('Error running {} on {}:'.format(
func.__name__, skill.name
))
with ThreadPool(20) as tp:
return tp.map(run_item, skills)
@save_skills_data
def install_defaults(self):
"""Installs the default skills, updates all others"""
def install_or_update_skill(skill):
if skill.is_local:
self.update(skill)
else:
self.install(skill, origin='default')
return self.apply(install_or_update_skill, self.list_defaults())
def list_all_defaults(self): # type: () -> Dict[str, List[SkillEntry]]
"""Returns {'skill_group': [SkillEntry('name')]}"""
skills = self.list()
name_to_skill = {skill.name: skill for skill in skills}
defaults = {group: [] for group in self.SKILL_GROUPS}
for section_name, skill_names in self.repo.get_default_skill_names():
section_skills = []
for skill_name in skill_names:
if skill_name in name_to_skill:
section_skills.append(name_to_skill[skill_name])
else:
LOG.warning('No such default skill: ' + skill_name)
defaults[section_name] = section_skills
return defaults
def list_defaults(self):
skill_groups = self.list_all_defaults()
if self.platform not in skill_groups:
LOG.error('Unknown platform:' + self.platform)
return skill_groups.get(self.platform,
skill_groups.get('default', []))
def list(self):
"""
Load a list of SkillEntry objects from both local and
remote skills
It is necessary to load both local and remote skills at
the same time to correctly associate local skills with the name
in the repo and remote skills with any custom path that they
have been downloaded to
"""
try:
self.repo.update()
except GitException as e:
if not isdir(self.repo.path):
raise
LOG.warning('Failed to update repo: {}'.format(repr(e)))
remote_skill_list = (
SkillEntry(
name, SkillEntry.create_path(self.skills_dir, url, name),
url, sha if self.versioned else '', msm=self
)
for name, path, url, sha in self.repo.get_skill_data()
)
remote_skills = {
skill.id: skill for skill in remote_skill_list
}
all_skills = []
for skill_file in glob(join(self.skills_dir, '*', '__init__.py')):
skill = SkillEntry.from_folder(dirname(skill_file), msm=self)
if skill.id in remote_skills:
skill.attach(remote_skills.pop(skill.id))
all_skills.append(skill)
all_skills += list(remote_skills.values())
return all_skills
def find_skill(self, param, author=None, skills=None):
# type: (str, str, List[SkillEntry]) -> SkillEntry
"""Find skill by name or url"""
if param.startswith('https://') or param.startswith('http://'):
repo_id = SkillEntry.extract_repo_id(param)
for skill in self.list():
if skill.id == repo_id:
return skill
name = SkillEntry.extract_repo_name(param)
path = SkillEntry.create_path(self.skills_dir, param)
return SkillEntry(name, path, param, msm=self)
else:
skill_confs = {
skill: skill.match(param, author)
for skill in skills or self.list()
}
best_skill, score = max(skill_confs.items(), key=lambda x: x[1])
LOG.info('Best match ({}): {} by {}'.format(
round(score, 2), best_skill.name, best_skill.author)
)
if score < 0.3:
raise SkillNotFound(param)
low_bound = (score * 0.7) if score != 1.0 else 1.0
close_skills = [
skill for skill, conf in skill_confs.items()
if conf >= low_bound and skill != best_skill
]
if close_skills:
raise MultipleSkillMatches([best_skill] + close_skills)
return best_skill
|
the-stack_106_13748
|
"""
EEG artifact correction using FASTER
based on pull request for mne-python:
https://github.com/mne-tools/mne-python/pull/1777
NOTE: this code is experimental!!!
"""
def _handle_default(k, v=None):
bads_faster = dict(max_iter=1, thresh=3, use_metrics=None)
return bads_faster
### faster_.py
# Authors: Marijn van Vliet <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from collections import defaultdict
from mne.utils import logger
from mne.io.pick import pick_info, _picks_by_type
from mne.preprocessing.bads import find_outliers
def _hurst(x):
"""Estimate Hurst exponent on a timeseries.
The estimation is based on the second order discrete derivative.
Parameters
----------
x : array, shape(n_channels, n_samples)
The timeseries to estimate the Hurst exponent for.
Returns
-------
h : float
The estimation of the Hurst exponent for the given timeseries.
"""
from scipy.signal import lfilter
y = np.cumsum(np.diff(x, axis=1), axis=1)
b1 = [1, -2, 1]
b2 = [1, 0, -2, 0, 1]
# second order derivative
y1 = lfilter(b1, 1, y, axis=1)
y1 = y1[:, len(b1) - 1 : -1] # first values contain filter artifacts
# wider second order derivative
y2 = lfilter(b2, 1, y, axis=1)
y2 = y2[:, len(b2) - 1 : -1] # first values contain filter artifacts
s1 = np.mean(y1 ** 2, axis=1)
s2 = np.mean(y2 ** 2, axis=1)
return 0.5 * np.log2(s2 / s1)
def _efficient_welch(data, sfreq):
"""Calls scipy.signal.welch with parameters optimized for greatest speed
at the expense of precision. The window is set to ~10 seconds and windows
are non-overlapping.
Parameters
----------
data : array, shape (..., n_samples)
The timeseries to estimate signal power for. The last dimension
is assumed to be time.
sfreq : float
The sample rate of the timeseries.
Returns
-------
fs : array of float
The frequencies for which the power spectra was calculated.
ps : array, shape (..., frequencies)
The power spectra for each timeseries.
"""
from scipy.signal import welch
nperseg = min(data.shape[-1], 2 ** int(np.log2(10 * sfreq) + 1)) # next power of 2
return welch(data, sfreq, nperseg=nperseg, noverlap=0, axis=-1)
def _freqs_power(data, sfreq, freqs):
"""Estimate signal power at specific frequencies.
Parameters
----------
data : array, shape (..., n_samples)
The timeseries to estimate signal power for. The last dimension
is presumed to be time.
sfreq : float
The sample rate of the timeseries.
freqs : array of float
The frequencies to estimate signal power for.
Returns
-------
p : float
The summed signal power of each requested frequency.
"""
fs, ps = _efficient_welch(data, sfreq)
try:
return np.sum([ps[..., np.searchsorted(fs, f)] for f in freqs], axis=0)
except IndexError:
raise ValueError(
(
"Insufficient sample rate to estimate power at {} Hz for line "
"noise detection. Use the 'metrics' parameter to disable the "
"'line_noise' metric."
).format(freqs)
)
def _power_gradient(data, sfreq, prange):
"""Estimate the gradient of the power spectrum at upper frequencies.
Parameters
----------
data : array, shape (n_components, n_samples)
The timeseries to estimate signal power for. The last dimension
is presumed to be time.
sfreq : float
The sample rate of the timeseries.
prange : pair of floats
The (lower, upper) frequency limits of the power spectrum to use. In
the FASTER paper, they set these to the passband of the lowpass filter.
Returns
-------
grad : array of float
The gradients of the timeseries.
"""
fs, ps = _efficient_welch(data, sfreq)
# Limit power spectrum to selected frequencies
start, stop = (np.searchsorted(fs, p) for p in prange)
if start >= ps.shape[1]:
raise ValueError(
(
"Sample rate insufficient to estimate {} Hz power. "
"Use the 'power_gradient_range' parameter to tweak "
"the tested frequencies for this metric or use the "
"'metrics' parameter to disable the "
"'power_gradient' metric."
).format(prange[0])
)
ps = ps[:, start:stop]
# Compute mean gradients
return np.mean(np.diff(ps), axis=1)
def _deviation(data):
"""Computes the deviation from mean for each channel in a set of epochs.
This is not implemented as a lambda function, because the channel means
should be cached during the computation.
Parameters
----------
data : array, shape (n_epochs, n_channels, n_samples)
The epochs for which to compute the channel deviation.
Returns
-------
dev : list of float
For each epoch, the mean deviation of the channels.
"""
ch_mean = np.mean(data, axis=2)
return ch_mean - np.mean(ch_mean, axis=0)
def _find_bad_channels(epochs, picks, use_metrics, thresh, max_iter):
"""Implements the first step of the FASTER algorithm.
This function attempts to automatically mark bad EEG channels by performing
outlier detection. It operated on epoched data, to make sure only relevant
data is analyzed.
Additional Parameters
---------------------
use_metrics : list of str
List of metrics to use. Can be any combination of:
'variance', 'correlation', 'hurst', 'kurtosis', 'line_noise'
Defaults to all of them.
thresh : float
The threshold value, in standard deviations, to apply. A channel
crossing this threshold value is marked as bad. Defaults to 3.
max_iter : int
The maximum number of iterations performed during outlier detection
(defaults to 1, as in the original FASTER paper).
"""
from scipy.stats import kurtosis
metrics = {
"variance": lambda x: np.var(x, axis=1),
"correlation": lambda x: np.mean(
np.ma.masked_array(np.corrcoef(x), np.identity(len(x), dtype=bool)), axis=0
),
"hurst": lambda x: _hurst(x),
"kurtosis": lambda x: kurtosis(x, axis=1),
"line_noise": lambda x: _freqs_power(x, epochs.info["sfreq"], [50, 60]),
}
if use_metrics is None:
use_metrics = list(metrics.keys())
# Concatenate epochs in time
data = epochs.get_data()[:, picks]
data = data.transpose(1, 0, 2).reshape(data.shape[1], -1)
# Find bad channels
bads = defaultdict(list)
info = pick_info(epochs.info, picks, copy=True)
for ch_type, chs in _picks_by_type(info):
logger.info("Bad channel detection on %s channels:" % ch_type.upper())
for metric in use_metrics:
scores = metrics[metric](data[chs])
bad_channels = [
epochs.ch_names[picks[chs[i]]]
for i in find_outliers(scores, thresh, max_iter)
]
logger.info("\tBad by %s: %s" % (metric, bad_channels))
bads[metric].append(bad_channels)
bads = dict((k, np.concatenate(v).tolist()) for k, v in list(bads.items()))
return bads
def _find_bad_epochs(epochs, picks, use_metrics, thresh, max_iter):
"""Implements the second step of the FASTER algorithm.
This function attempts to automatically mark bad epochs by performing
outlier detection.
Additional Parameters
---------------------
use_metrics : list of str
List of metrics to use. Can be any combination of:
'amplitude', 'variance', 'deviation'. Defaults to all of them.
thresh : float
The threshold value, in standard deviations, to apply. A channel
crossing this threshold value is marked as bad. Defaults to 3.
max_iter : int
The maximum number of iterations performed during outlier detection
(defaults to 1, as in the original FASTER paper).
"""
metrics = {
"amplitude": lambda x: np.mean(np.ptp(x, axis=2), axis=1),
"deviation": lambda x: np.mean(_deviation(x), axis=1),
"variance": lambda x: np.mean(np.var(x, axis=2), axis=1),
}
if use_metrics is None:
use_metrics = list(metrics.keys())
info = pick_info(epochs.info, picks, copy=True)
data = epochs.get_data()[:, picks]
bads = defaultdict(list)
for ch_type, chs in _picks_by_type(info):
logger.info("Bad epoch detection on %s channels:" % ch_type.upper())
for metric in use_metrics:
scores = metrics[metric](data[:, chs])
bad_epochs = find_outliers(scores, thresh, max_iter)
logger.info("\tBad by %s: %s" % (metric, bad_epochs))
bads[metric].append(bad_epochs)
bads = dict((k, np.concatenate(v).tolist()) for k, v in list(bads.items()))
return bads
def _find_bad_channels_in_epochs(epochs, picks, use_metrics, thresh, max_iter):
"""Implements the fourth step of the FASTER algorithm.
This function attempts to automatically mark bad channels in each epochs by
performing outlier detection.
Additional Parameters
---------------------
use_metrics : list of str
List of metrics to use. Can be any combination of:
'amplitude', 'variance', 'deviation', 'median_gradient'
Defaults to all of them.
thresh : float
The threshold value, in standard deviations, to apply. A channel
crossing this threshold value is marked as bad. Defaults to 3.
max_iter : int
The maximum number of iterations performed during outlier detection
(defaults to 1, as in the original FASTER paper).
"""
metrics = {
"amplitude": lambda x: np.ptp(x, axis=2),
"deviation": lambda x: _deviation(x),
"variance": lambda x: np.var(x, axis=2),
"median_gradient": lambda x: np.median(np.abs(np.diff(x)), axis=2),
"line_noise": lambda x: _freqs_power(x, epochs.info["sfreq"], [50, 60]),
}
if use_metrics is None:
use_metrics = list(metrics.keys())
info = pick_info(epochs.info, picks, copy=True)
data = epochs.get_data()[:, picks]
bads = dict((m, np.zeros((len(data), len(picks)), dtype=bool)) for m in metrics)
for ch_type, chs in _picks_by_type(info):
ch_names = [info["ch_names"][k] for k in chs]
chs = np.array(chs)
for metric in use_metrics:
logger.info(
"Bad channel-in-epoch detection on %s channels:" % ch_type.upper()
)
s_epochs = metrics[metric](data[:, chs])
for i_epochs, epoch in enumerate(s_epochs):
outliers = find_outliers(epoch, thresh, max_iter)
if len(outliers) > 0:
bad_segment = [ch_names[k] for k in outliers]
logger.info(
"Epoch %d, Bad by %s:\n\t%s" % (i_epochs, metric, bad_segment)
)
bads[metric][i_epochs, chs[outliers]] = True
return bads
### bads.py
# Authors: Denis Engemann <[email protected]>
# Marijn van Vliet <[email protected]>
# License: BSD (3-clause)
import numpy as np
from mne.utils import verbose
from mne.io.pick import pick_info
from mne.io.pick import pick_types
# from mne.defaults import _handle_default
# from . import faster_ as _faster
@verbose
def find_bad_channels(
epochs,
picks=None,
method="faster",
method_params=None,
return_by_metric=False,
verbose=None,
):
"""Implements the first step of the FASTER algorithm.
This function attempts to automatically mark bad EEG channels by performing
outlier detection. It operated on epoched data, to make sure only relevant
data is analyzed.
Parameters
----------
epochs : Instance of Epochs
The epochs for which bad channels need to be marked
picks : list of int | None
Channels to operate on. Defaults to EEG channels.
method : {'faster'}
The detection algorithm.
method_params : dict | None
The method parameters in a dict.
If ``method`` equals 'faster', and ``method_params``is None,
defaults to the following parameters. Partial updates are supported.
use_metrics : list of str
List of metrics to use. Can be any combination of:
'variance', 'correlation', 'hurst', 'kurtosis', 'line_noise'
Defaults to all of them.
thresh : float
The threshold value, in standard deviations, to apply. A channel
crossing this threshold value is marked as bad. Defaults to 3.
max_iter : int
The maximum number of iterations performed during outlier detection
(defaults to 1, as in the original FASTER paper).
return_by_metric : bool
Whether to return the bad channels as a flat list (False, default) or
as a dictionary with the names of the used metrics as keys and the
bad channels found by this metric as values. Is ignored if not
supported by method.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Returns
-------
bads : list of str
The names of the bad EEG channels.
"""
if picks is None:
picks = pick_types(epochs.info, meg=True, eeg=True, exclude=[])
_method_params = _handle_default("bads" + "_" + method, method_params)
if method == "faster":
bads = _find_bad_channels(epochs, picks, **_method_params)
else:
raise NotImplementedError('Come back later, for now there is only "FASTER"')
if return_by_metric:
return bads
else:
return _combine_indices(bads)
@verbose
def find_bad_epochs(
epochs,
picks=None,
return_by_metric=False,
method="faster",
method_params=None,
verbose=None,
):
"""Implements the second step of the FASTER algorithm.
This function attempts to automatically mark bad epochs by performing
outlier detection.
Parameters
----------
epochs : Instance of Epochs
The epochs to analyze.
picks : list of int | None
Channels to operate on. Defaults to EEG channels.
method : {'faster'}
The detection algorithm.
method_params : dict | None
The method parameters in a dict.
If ``method`` equals 'faster', and ``method_params``is None,
defaults to the following parameters. Partial updates are supported.
use_metrics : list of str
List of metrics to use. Can be any combination of:
'amplitude', 'variance', 'deviation'. Defaults to all of them.
thresh : float
The threshold value, in standard deviations, to apply. A channel
crossing this threshold value is marked as bad. Defaults to 3.
max_iter : int
The maximum number of iterations performed during outlier detection
(defaults to 1, as in the original FASTER paper).
return_by_metric : bool
Whether to return the bad channels as a flat list (False, default) or
as a dictionary with the names of the used metrics as keys and the
bad channels found by this metric as values. Is ignored if not
supported by method.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Returns
-------
bads : list of int
The indices of the bad epochs.
"""
if picks is None:
picks = pick_types(epochs.info, meg=True, eeg=True, exclude="bads")
_method_params = _handle_default("bads" + "_" + method, method_params)
if method == "faster":
bads = _find_bad_epochs(epochs, picks, **_method_params)
else:
raise NotImplementedError('Come back later, for now there is only "FASTER"')
if return_by_metric:
return bads
else:
return _combine_indices(bads)
@verbose
def find_bad_channels_in_epochs(
epochs, picks=None, method="faster", method_params=None, return_by_metric=False
):
"""Implements the fourth step of the FASTER algorithm.
This function attempts to automatically mark bad channels in each epochs by
performing outlier detection.
Parameters
----------
epochs : Instance of Epochs
The epochs to analyze.
picks : list of int | None
Channels to operate on. Defaults to EEG channels.
method : {'faster'}
The detection algorithm.
method_params : dict | None
The method parameters in a dict.
If ``method`` equals 'faster', and ``method_params``is None,
defaults to the following parameters. Partial updates are supported.
use_metrics : list of str
List of metrics to use. Can be any combination of:
'amplitude', 'variance', 'deviation', 'median_gradient'
Defaults to all of them.
thresh : float
The threshold value, in standard deviations, to apply. A channel
crossing this threshold value is marked as bad. Defaults to 3.
max_iter : int
The maximum number of iterations performed during outlier detection
(defaults to 1, as in the original FASTER paper).
return_by_metric : bool
Whether to return the bad channels as a flat list (False, default) or
as a dictionary with the names of the used metrics as keys and the
bad channels found by this metric as values. Is ignored if not
supported by method.
Returns
-------
bads : list of lists of int
For each epoch, the indices of the bad channels.
"""
if picks is None:
picks = pick_types(epochs.info, meg=True, eeg=True, exclude=[])
_method_params = _handle_default("bads" + "_" + method, method_params)
if method == "faster":
bads = _find_bad_channels_in_epochs(epochs, picks, **_method_params)
else:
raise NotImplementedError('Come back later, for now there is only "FASTER"')
info = pick_info(epochs.info, picks, copy=True)
if return_by_metric:
bads = dict((m, _bad_mask_to_names(info, v)) for m, v in list(bads.items()))
else:
bads = np.sum(list(bads.values()), axis=0).astype(bool)
bads = _bad_mask_to_names(info, bads)
return bads
def _bad_mask_to_names(info, bad_mask):
"""Remap mask to ch names"""
bad_idx = [np.where(m)[0] for m in bad_mask]
return [[info["ch_names"][k] for k in epoch] for epoch in bad_idx]
def _combine_indices(bads):
"""summarize indices"""
return list(set(v for val in list(bads.values()) if len(val) > 0 for v in val))
|
the-stack_106_13749
|
"""
Support for showing the date and the time.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.time_date/
"""
from datetime import timedelta
import asyncio
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_DISPLAY_OPTIONS
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
TIME_STR_FORMAT = '%H:%M'
OPTION_TYPES = {
'time': 'Time',
'date': 'Date',
'date_time': 'Date & Time',
'time_date': 'Time & Date',
'beat': 'Internet Time',
'time_utc': 'Time (UTC)',
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_DISPLAY_OPTIONS, default=['time']):
vol.All(cv.ensure_list, [vol.In(OPTION_TYPES)]),
})
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Setup the Time and Date sensor."""
if hass.config.time_zone is None:
_LOGGER.error("Timezone is not set in Home Assistant configuration")
return False
devices = []
for variable in config[CONF_DISPLAY_OPTIONS]:
devices.append(TimeDateSensor(variable))
yield from async_add_devices(devices, True)
return True
class TimeDateSensor(Entity):
"""Implementation of a Time and Date sensor."""
def __init__(self, option_type):
"""Initialize the sensor."""
self._name = OPTION_TYPES[option_type]
self.type = option_type
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Icon to use in the frontend, if any."""
if 'date' in self.type and 'time' in self.type:
return 'mdi:calendar-clock'
elif 'date' in self.type:
return 'mdi:calendar'
else:
return 'mdi:clock'
@asyncio.coroutine
def async_update(self):
"""Get the latest data and updates the states."""
time_date = dt_util.utcnow()
time = dt_util.as_local(time_date).strftime(TIME_STR_FORMAT)
time_utc = time_date.strftime(TIME_STR_FORMAT)
date = dt_util.as_local(time_date).date().isoformat()
# Calculate Swatch Internet Time.
time_bmt = time_date + timedelta(hours=1)
delta = timedelta(
hours=time_bmt.hour, minutes=time_bmt.minute,
seconds=time_bmt.second, microseconds=time_bmt.microsecond)
beat = int((delta.seconds + delta.microseconds / 1000000.0) / 86.4)
if self.type == 'time':
self._state = time
elif self.type == 'date':
self._state = date
elif self.type == 'date_time':
self._state = '{}, {}'.format(date, time)
elif self.type == 'time_date':
self._state = '{}, {}'.format(time, date)
elif self.type == 'time_utc':
self._state = time_utc
elif self.type == 'beat':
self._state = '@{0:03d}'.format(beat)
|
the-stack_106_13751
|
#!/usr/bin/python
from enum import Enum
from copy import copy
import random
class InvalidMoveException(Exception):
pass
class SquareState(Enum):
_ = 0
empty = 0 # Alias of _
X = 1
x = 1 # alias of X
O = 2
o = 2 # alias of O
def __str__(self):
return self.name
class Square(object):
child = None
state = SquareState['empty']
def __eq__(self, other):
if isinstance(other, SquareState):
return self.state == other
else:
return self.state == other.state
def __str__(self):
return self.state.name
class Board(object):
parent = None # this is currently unused
_index = 0
_winner = None
winning_line = None
def __init__(self, parent=None):
self.tl = Square() # Top left
self.tc = Square() # Top center
self.tr = Square() # Top right
self.ml = Square() # Middle left
self.mc = Square() # Middle centre
self.mr = Square() # Middle right
self.bl = Square() # Bottom left
self.bc = Square() # Bottom centre
self.br = Square() # Bottom right
# It would probably be better to use a 2D array, but ¯\_(ツ)_/¯
# This was a deliberate decision, to make winner() easier to read
if parent is not None:
self.parent = parent
def __iter__(self):
return copy(self)
def __next__(self):
i = self._index
self._index = self._index + 1
if i < 9:
return self.square(i // 3, i % 3)
else:
raise StopIteration
def __getitem__(self, key):
try:
return self.square(key[0], key[1])
except:
raise KeyError
def __setitem__(self, key, value):
try:
item = self.square(key[0], key[1])
except:
raise KeyError
if not isinstance(value, Square):
raise ValueError("The value must be an instance of Square")
item = value
def __delitem__(self, key):
try:
item = self.square(key[0], key[1])
except:
raise KeyError
item.state = SquareState.empty
# Note that we do NOT set item.child = None
def square(self, row, col):
"""Returns the square at the relevant row, col"""
if 0 == row:
if 0 == col:
return self.tl
elif 1 == col:
return self.tc
elif 2 == col:
return self.tr
elif 1 == row:
if 0 == col:
return self.ml
elif 1 == col:
return self.mc
elif 2 == col:
return self.mr
elif 2 == row:
if 0 == col:
return self.bl
elif 1 == col:
return self.bc
elif 2 == col:
return self.br
raise TypeError(
"No such (row, column) pair: each must be in range 0-2 inclusive")
@staticmethod
def square_name(row_or_tuple, col=None):
"""Returns a human readable name of the square at row, col"""
if col is None:
# Then row_or_tuple should be a tuple, extract row and col from there
try:
row = row_or_tuple[0]
col = row_or_tuple[1]
except TypeError:
raise ValueError("Row and column are both required")
else:
# row_or_tuple is the row
row = row_or_tuple
if 0 == row:
if 0 == col:
return "top left"
elif 1 == col:
return "top centre"
elif 2 == col:
return "top right"
elif 1 == row:
if 0 == col:
return "middle left"
elif 1 == col:
return "middle centre"
elif 2 == col:
return "middle right"
elif 2 == row:
if 0 == col:
return "bottom left"
elif 1 == col:
return "bottom centre"
elif 2 == col:
return "bottom right"
raise TypeError(
"No such (row, column) pair: each must be in range 0-2 inclusive")
def winner(self):
"""Returns the winner of this board (a SquareState value, which will
be SquareState.empty if the board is a draw) or None if the board is
not yet finalized."""
# Use the cached value rather than checking again
if self._winner is not None:
return self._winner
# It is possible for multiple wins to exist (for example: last move is X
# to tl when X is already in tc, tr, ml, bl), in which case the winning_line
# will be whatever gets set last in this cascade. This doesn't *really*
# matter but it is arbitrary and therefore not elegant.
if self.tl != SquareState.empty:
if self.tl == self.tc == self.tr:
self.winning_line = ((0,0), (0,2))
self._winner = self.tl.state
if self.tl == self.ml == self.bl:
self.winning_line = ((0,0), (2,0))
self._winner = self.tl.state
if self.br != SquareState.empty:
if self.br == self.bc == self.bl:
self.winning_line = ((2,0), (2,2))
self._winner = self.br.state
if self.br == self.mr == self.tr:
self.winning_line = ((0,2), (2,2))
self._winner = self.br.state
if self.mc != SquareState.empty:
if self.mc == self.tl == self.br:
self.winning_line = ((0,0), (2,2))
self._winner = self.mc.state
if self.mc == self.bl == self.tr:
self.winning_line = ((2,0), (0,2))
self._winner = self.mc.state
if self.mc == self.ml == self.mr:
self.winning_line = ((1,0), (1,2))
self._winner = self.mc.state
if self.mc == self.tc == self.bc:
self.winning_line = ((0,1), (2,1))
self._winner = self.mc.state
if self._winner is None:
# Nobody has won, but the board might be full. Start from that assumption
# and reset _winner to None if we find an empty square.
self._winner = SquareState['empty']
for square in self:
if square == SquareState.empty:
# There is at least one empty square, so board is still active
self._winner = None
break
return self._winner
# This __str__ is super ick but is kind of useful in debugging
def __str__(self):
s = ""
for c in self:
s = "{}\n{}: ".format(s, c)
if c.child:
a = ""
for k in c.child:
a = "{}{}".format(a, k)
s = "{}{}".format(s, a)
return s
class Player(object):
def __init__(self, token, name=None):
try:
if token.lower() in ('x', 'o'):
token = SquareState[token]
except:
if token != SquareState.X and token != SquareState.O:
raise ValueError("token must be SquareState.X or SquareState.O")
self.token = token
if name is None:
self.name = name
else:
self.name = token.name
class Move(object):
def __init__(self, game, player, child_board, square):
self.game = game
self.player = player
self.child_board = child_board
self.square = square
def __str__(self):
return "{} played in the {} square of the {} board".format(
self.player.name,
Board.square_name(self.square),
Board.square_name(self.child_board)
)
class Game(object):
last_move = None
_log_functions = []
moves = []
# child_win and overall_win are flags that should be reset after they are read
child_win = None # if not None, a tuple (child_board, winning_player)
overall_win = None # if not None, the winning_player
def __init__(self, starting_player=None):
self.main_board = Board()
for i in range(3):
for j in range(3):
self.main_board[(i, j)].child = Board(self.main_board)
if starting_player is None:
if random.choice('xo') == 'x':
self.active_player = SquareState['X']
else:
self.active_player = SquareState['O']
else:
try:
if starting_player.lower() in ('x', 'o'):
starting_player = SquareState[starting_player]
except:
if starting_player != SquareState.X and starting_player != SquareState.O:
raise ValueError("The starting_player must be SquareState.X or SquareState.O")
self.active_player = starting_player
self.log_status("{} to play".format(self.active_player.name))
# All boards start active
self.active_boards = [(i, j) for i in range(3) for j in range(3)]
def add_log_function(self, fun):
self._log_functions.append(fun)
def log_status(self, status):
print(status)
for f in self._log_functions:
f(status)
def play(self, child_board, square):
"""Progress state by having self.active_player play on square in child_board.
Each of child_board and square to be specified as (row, col) tuples."""
if (self.main_board[child_board].child.winner() is not None) or (self.main_board[child_board].child[square] != SquareState.empty) or (child_board not in self.available_boards()):
# Can't play if child_board has been won
# Can't play if the square is occupied
# Can't play except in accordance with the available_boards() rule
raise InvalidMoveException
self.main_board[child_board].child[square].state = self.active_player # Record the play
self.log_status("{} played in the {} square of the {} board".format(
self.active_player.name,
Board.square_name(square),
Board.square_name(child_board)
))
#moves.append(Move(
# self,
# self.active_player,
# child_board,
# square
# ))
# Check to see if this move resulted in child_board being won
board_winner = self.main_board[child_board].child.winner()
if board_winner is not None:
self.main_board[child_board].state = self.active_player
self.log_status("{} won the {} board with a line from {} to {}".format(
self.active_player,
Board.square_name(child_board),
Board.square_name(self.main_board[child_board].child.winning_line[0]),
Board.square_name(self.main_board[child_board].child.winning_line[1])
))
self.active_boards.remove(child_board)
self.child_win = (child_board, self.active_player)
# Check to see if this move finished the game
board_winner = self.main_board.winner()
if board_winner is not None:
self.log_status("{} won the game overall with a line from {} to {}".format(
self.active_player,
Board.square_name(self.main_board.winning_line[0]),
Board.square_name(self.main_board.winning_line[1])
))
self.overall_win = self.active_player
self.last_move = (square[0], square[1]) # needed for available_boards()
if self.active_player == SquareState.X:
self.active_player = SquareState['O']
#self.log_status("{} to play".format(self.active_player))
elif self.active_player == SquareState.O:
self.active_player = SquareState['X']
#self.log_status("{} to play".format(self.active_player))
def available_boards(self):
"""Returns child boards which are available for this move, based on the following rules:
1. if it's the first move, you can play anywhere
2. the child board available for play is that board which is in the same position on the
main board as the last move was on its child board, subject to (3)
3. if the child board in accordance with (2) is unplayable (being full or already won)
then you can play anywhere
4. you can never play on a board that is full or which has been already won"""
if self.last_move is not None and self.main_board[self.last_move].child.winner() is None:
return [self.last_move]
for b in self.active_boards:
if self.main_board[b].child.winner() is not None:
self.active_boards.remove(b)
return self.active_boards
|
the-stack_106_13752
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def removeNthFromEnd(self, head: Optional[ListNode], n: int) -> Optional[ListNode]:
fast = slow = head
for _ in range(n):
fast = fast.next
if not fast:
return head.next
while fast.next:
fast = fast.next
slow = slow.next
slow.next = slow.next.next
return head
|
the-stack_106_13753
|
from __future__ import print_function
import datetime
import sys
from src.myThread.sarjob import create_sar_job, s
sys.path.append('.')
sys.path.append('..')
if __name__ == '__main__':
start_time = datetime.datetime.now().replace(microsecond=0)
print('Sample start: {}'.format(start_time))
try:
s.enter(5, 1, create_sar_job, (s,))
s.run()
except Exception as err:
print(err)
raise
print()
input('Press ENTER to exit...')
|
the-stack_106_13754
|
"""
Tracker script for DMLC
Implements the tracker control protocol
- start dmlc jobs
- start ps scheduler and rabit tracker
- help nodes to establish links with each other
Tianqi Chen
"""
# pylint: disable=invalid-name, missing-docstring, too-many-arguments, too-many-locals
# pylint: disable=too-many-branches, too-many-statements
import os
import sys
import socket
import struct
import subprocess
import argparse
import time
import logging
from threading import Thread
class ExSocket(object):
"""
Extension of socket to handle recv and send of special data
"""
def __init__(self, sock):
self.sock = sock
def recvall(self, nbytes):
res = []
nread = 0
while nread < nbytes:
chunk = self.sock.recv(min(nbytes - nread, 1024))
nread += len(chunk)
res.append(chunk)
return b''.join(res)
def recvint(self):
return struct.unpack('@i', self.recvall(4))[0]
def sendint(self, n):
self.sock.sendall(struct.pack('@i', n))
def sendstr(self, s):
self.sendint(len(s))
self.sock.sendall(s.encode())
def recvstr(self):
slen = self.recvint()
return self.recvall(slen).decode()
# magic number used to verify existence of data
kMagic = 0xff99
def get_some_ip(host):
return socket.getaddrinfo(host, None)[0][4][0]
def get_family(addr):
return socket.getaddrinfo(addr, None)[0][0]
class SlaveEntry(object):
def __init__(self, sock, s_addr):
slave = ExSocket(sock)
self.sock = slave
self.host = get_some_ip(s_addr[0])
magic = slave.recvint()
assert magic == kMagic, 'invalid magic number=%d from %s' % (magic, self.host)
slave.sendint(kMagic)
self.rank = slave.recvint()
self.world_size = slave.recvint()
self.jobid = slave.recvstr()
self.cmd = slave.recvstr()
self.wait_accept = 0
self.port = None
def decide_rank(self, job_map):
if self.rank >= 0:
return self.rank
if self.jobid != 'NULL' and self.jobid in job_map:
return job_map[self.jobid]
return -1
def assign_rank(self, rank, wait_conn, tree_map, parent_map, ring_map):
self.rank = rank
nnset = set(tree_map[rank])
rprev, rnext = ring_map[rank]
self.sock.sendint(rank)
# send parent rank
self.sock.sendint(parent_map[rank])
# send world size
self.sock.sendint(len(tree_map))
self.sock.sendint(len(nnset))
# send the rprev and next link
for r in nnset:
self.sock.sendint(r)
# send prev link
if rprev != -1 and rprev != rank:
nnset.add(rprev)
self.sock.sendint(rprev)
else:
self.sock.sendint(-1)
# send next link
if rnext != -1 and rnext != rank:
nnset.add(rnext)
self.sock.sendint(rnext)
else:
self.sock.sendint(-1)
while True:
ngood = self.sock.recvint()
goodset = set([])
for _ in range(ngood):
goodset.add(self.sock.recvint())
assert goodset.issubset(nnset)
badset = nnset - goodset
conset = []
for r in badset:
if r in wait_conn:
conset.append(r)
self.sock.sendint(len(conset))
self.sock.sendint(len(badset) - len(conset))
for r in conset:
self.sock.sendstr(wait_conn[r].host)
self.sock.sendint(wait_conn[r].port)
self.sock.sendint(r)
nerr = self.sock.recvint()
if nerr != 0:
continue
self.port = self.sock.recvint()
rmset = []
# all connection was successuly setup
for r in conset:
wait_conn[r].wait_accept -= 1
if wait_conn[r].wait_accept == 0:
rmset.append(r)
for r in rmset:
wait_conn.pop(r, None)
self.wait_accept = len(badset) - len(conset)
return rmset
class RabitTracker(object):
"""
tracker for rabit
"""
def __init__(self, hostIP, nslave, port=9091, port_end=9999):
sock = socket.socket(get_family(hostIP), socket.SOCK_STREAM)
for port in range(port, port_end):
try:
sock.bind((hostIP, port))
self.port = port
break
except socket.error as e:
if e.errno in [98, 48]:
continue
else:
raise
sock.listen(256)
self.sock = sock
self.hostIP = hostIP
self.thread = None
self.start_time = None
self.end_time = None
self.nslave = nslave
logging.info('start listen on %s:%d', hostIP, self.port)
def __del__(self):
self.sock.close()
@staticmethod
def get_neighbor(rank, nslave):
rank = rank + 1
ret = []
if rank > 1:
ret.append(rank // 2 - 1)
if rank * 2 - 1 < nslave:
ret.append(rank * 2 - 1)
if rank * 2 < nslave:
ret.append(rank * 2)
return ret
def slave_envs(self):
"""
get enviroment variables for slaves
can be passed in as args or envs
"""
return {'DMLC_TRACKER_URI': self.hostIP,
'DMLC_TRACKER_PORT': self.port}
def get_tree(self, nslave):
tree_map = {}
parent_map = {}
for r in range(nslave):
tree_map[r] = self.get_neighbor(r, nslave)
parent_map[r] = (r + 1) // 2 - 1
return tree_map, parent_map
def find_share_ring(self, tree_map, parent_map, r):
"""
get a ring structure that tends to share nodes with the tree
return a list starting from r
"""
nset = set(tree_map[r])
cset = nset - set([parent_map[r]])
if len(cset) == 0:
return [r]
rlst = [r]
cnt = 0
for v in cset:
vlst = self.find_share_ring(tree_map, parent_map, v)
cnt += 1
if cnt == len(cset):
vlst.reverse()
rlst += vlst
return rlst
def get_ring(self, tree_map, parent_map):
"""
get a ring connection used to recover local data
"""
assert parent_map[0] == -1
rlst = self.find_share_ring(tree_map, parent_map, 0)
assert len(rlst) == len(tree_map)
ring_map = {}
nslave = len(tree_map)
for r in range(nslave):
rprev = (r + nslave - 1) % nslave
rnext = (r + 1) % nslave
ring_map[rlst[r]] = (rlst[rprev], rlst[rnext])
return ring_map
def get_link_map(self, nslave):
"""
get the link map, this is a bit hacky, call for better algorithm
to place similar nodes together
"""
tree_map, parent_map = self.get_tree(nslave)
ring_map = self.get_ring(tree_map, parent_map)
rmap = {0 : 0}
k = 0
for i in range(nslave - 1):
k = ring_map[k][1]
rmap[k] = i + 1
ring_map_ = {}
tree_map_ = {}
parent_map_ = {}
for k, v in list(ring_map.items()):
ring_map_[rmap[k]] = (rmap[v[0]], rmap[v[1]])
for k, v in list(tree_map.items()):
tree_map_[rmap[k]] = [rmap[x] for x in v]
for k, v in list(parent_map.items()):
if k != 0:
parent_map_[rmap[k]] = rmap[v]
else:
parent_map_[rmap[k]] = -1
return tree_map_, parent_map_, ring_map_
def accept_slaves(self, nslave):
# set of nodes that finishs the job
shutdown = {}
# set of nodes that is waiting for connections
wait_conn = {}
# maps job id to rank
job_map = {}
# list of workers that is pending to be assigned rank
pending = []
# lazy initialize tree_map
tree_map = None
while len(shutdown) != nslave:
fd, s_addr = self.sock.accept()
s = SlaveEntry(fd, s_addr)
if s.cmd == 'print':
msg = s.sock.recvstr()
logging.info(msg.strip())
continue
if s.cmd == 'shutdown':
assert s.rank >= 0 and s.rank not in shutdown
assert s.rank not in wait_conn
shutdown[s.rank] = s
logging.debug('Recieve %s signal from %d', s.cmd, s.rank)
continue
assert s.cmd == 'start' or s.cmd == 'recover'
# lazily initialize the slaves
if tree_map is None:
assert s.cmd == 'start'
if s.world_size > 0:
nslave = s.world_size
tree_map, parent_map, ring_map = self.get_link_map(nslave)
# set of nodes that is pending for getting up
todo_nodes = list(range(nslave))
else:
assert s.world_size == -1 or s.world_size == nslave
if s.cmd == 'recover':
assert s.rank >= 0
rank = s.decide_rank(job_map)
# batch assignment of ranks
if rank == -1:
assert len(todo_nodes) != 0
pending.append(s)
if len(pending) == len(todo_nodes):
pending.sort(key=lambda x: x.host)
for s in pending:
rank = todo_nodes.pop(0)
if s.jobid != 'NULL':
job_map[s.jobid] = rank
s.assign_rank(rank, wait_conn, tree_map, parent_map, ring_map)
if s.wait_accept > 0:
wait_conn[rank] = s
logging.debug('Recieve %s signal from %s; assign rank %d',
s.cmd, s.host, s.rank)
if len(todo_nodes) == 0:
logging.info('@tracker All of %d nodes getting started', nslave)
self.start_time = time.time()
else:
s.assign_rank(rank, wait_conn, tree_map, parent_map, ring_map)
logging.debug('Recieve %s signal from %d', s.cmd, s.rank)
if s.wait_accept > 0:
wait_conn[rank] = s
logging.info('@tracker All nodes finishes job')
self.end_time = time.time()
logging.info('@tracker %s secs between node start and job finish',
str(self.end_time - self.start_time))
def start(self, nslave):
def run():
self.accept_slaves(nslave)
self.thread = Thread(target=run, args=())
self.thread.setDaemon(True)
self.thread.start()
def join(self):
while self.thread.isAlive():
self.thread.join(100)
class PSTracker(object):
"""
Tracker module for PS
"""
def __init__(self, hostIP, cmd, port=9091, port_end=9999, envs=None):
"""
Starts the PS scheduler
"""
self.cmd = cmd
if cmd is None:
return
envs = {} if envs is None else envs
self.hostIP = hostIP
sock = socket.socket(get_family(hostIP), socket.SOCK_STREAM)
for port in range(port, port_end):
try:
sock.bind(('', port))
self.port = port
sock.close()
break
except socket.error:
continue
env = os.environ.copy()
env['DMLC_ROLE'] = 'scheduler'
env['DMLC_PS_ROOT_URI'] = str(self.hostIP)
env['DMLC_PS_ROOT_PORT'] = str(self.port)
for k, v in list(envs.items()):
env[k] = str(v)
self.thread = Thread(
target=(lambda: subprocess.check_call(self.cmd, env=env, shell=True)), args=())
self.thread.setDaemon(True)
self.thread.start()
def join(self):
if self.cmd is not None:
while self.thread.isAlive():
self.thread.join(100)
def slave_envs(self):
if self.cmd is None:
return {}
else:
return {'DMLC_PS_ROOT_URI': self.hostIP,
'DMLC_PS_ROOT_PORT': self.port}
def get_host_ip(hostIP=None):
if hostIP is None or hostIP == 'auto':
hostIP = 'ip'
if hostIP == 'dns':
hostIP = socket.getfqdn()
elif hostIP == 'ip':
from socket import gaierror
try:
hostIP = socket.gethostbyname(socket.getfqdn())
except gaierror:
logging.warn('gethostbyname(socket.getfqdn()) failed... trying on hostname()')
hostIP = socket.gethostbyname(socket.gethostname())
if hostIP.startswith("127."):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# doesn't have to be reachable
s.connect(('10.255.255.255', 1))
hostIP = s.getsockname()[0]
return hostIP
def submit(nworker, nserver, fun_submit, hostIP='auto', pscmd=None):
if nserver == 0:
pscmd = None
envs = {'DMLC_NUM_WORKER' : nworker,
'DMLC_NUM_SERVER' : nserver}
hostIP = get_host_ip(hostIP)
if nserver == 0:
rabit = RabitTracker(hostIP=hostIP, nslave=nworker)
envs.update(rabit.slave_envs())
rabit.start(nworker)
else:
pserver = PSTracker(hostIP=hostIP, cmd=pscmd, envs=envs)
envs.update(pserver.slave_envs())
fun_submit(nworker, nserver, envs)
if nserver == 0:
rabit.join()
else:
pserver.join()
def start_rabit_tracker(args):
"""Standalone function to start rabit tracker.
Parameters
----------
args: arguments to start the rabit tracker.
"""
envs = {'DMLC_NUM_WORKER' : args.num_workers,
'DMLC_NUM_SERVER' : args.num_servers}
rabit = RabitTracker(hostIP=get_host_ip(args.host_ip), nslave=args.num_workers)
envs.update(rabit.slave_envs())
rabit.start(args.num_workers)
sys.stdout.write('DMLC_TRACKER_ENV_START\n')
# simply write configuration to stdout
for k, v in list(envs.items()):
sys.stdout.write('%s=%s\n' % (k, str(v)))
sys.stdout.write('DMLC_TRACKER_ENV_END\n')
sys.stdout.flush()
rabit.join()
def main():
"""Main function if tracker is executed in standalone mode."""
parser = argparse.ArgumentParser(description='Rabit Tracker start.')
parser.add_argument('--num-workers', required=True, type=int,
help='Number of worker proccess to be launched.')
parser.add_argument('--num-servers', default=0, type=int,
help='Number of server process to be launched. Only used in PS jobs.')
parser.add_argument('--host-ip', default=None, type=str,
help=('Host IP addressed, this is only needed ' +
'if the host IP cannot be automatically guessed.'))
parser.add_argument('--log-level', default='INFO', type=str,
choices=['INFO', 'DEBUG'],
help='Logging level of the logger.')
args = parser.parse_args()
fmt = '%(asctime)s %(levelname)s %(message)s'
if args.log_level == 'INFO':
level = logging.INFO
elif args.log_level == 'DEBUG':
level = logging.DEBUG
else:
raise RuntimeError("Unknown logging level %s" % args.log_level)
logging.basicConfig(format=fmt, level=level)
if args.num_servers == 0:
start_rabit_tracker(args)
else:
raise RuntimeError("Do not yet support start ps tracker in standalone mode.")
if __name__ == "__main__":
main()
|
the-stack_106_13758
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
from typing import Any, Optional, Sequence
from hydra.core.utils import JobReturn
from hydra.plugins.launcher import Launcher
from hydra.types import HydraContext, TaskFunction
from omegaconf import DictConfig, OmegaConf
from .config import RQLauncherConf
log = logging.getLogger(__name__)
class RQLauncher(Launcher):
def __init__(self, **params: Any) -> None:
"""RQ Launcher
Launches jobs using RQ (Redis Queue). For details, refer to:
https://python-rq.org
"""
self.config: Optional[DictConfig] = None
self.task_function: Optional[TaskFunction] = None
self.hydra_context: Optional[HydraContext] = None
self.rq = OmegaConf.structured(RQLauncherConf(**params))
def setup(
self,
*,
hydra_context: HydraContext,
task_function: TaskFunction,
config: DictConfig,
) -> None:
self.config = config
self.task_function = task_function
self.hydra_context = hydra_context
def launch(
self, job_overrides: Sequence[Sequence[str]], initial_job_idx: int
) -> Sequence[JobReturn]:
from . import _core
return _core.launch(
launcher=self, job_overrides=job_overrides, initial_job_idx=initial_job_idx
)
|
the-stack_106_13759
|
""" Base emmet model to add default metadata """
from datetime import datetime
from typing import TypeVar, Dict
from pydantic import BaseModel, Field
from pymatgen.core import __version__ as pmg_version
from emmet.core import __version__
T = TypeVar("T", bound="EmmetBaseModel")
class EmmetMeta(BaseModel):
"""
Default emmet metadata
"""
emmet_version: str = Field(
__version__, description="The version of emmet this document was built with"
)
pymatgen_version: str = Field(
pmg_version, description="The version of pymatgen this document was built with"
)
pull_request: int = Field(
None, description="The pull request number associated with this data build"
)
database_version: str = Field(
None, description="The database version for the built data"
)
build_date: datetime = Field(
default_factory=datetime.utcnow, description="The build date for this document",
)
class EmmetBaseModel(BaseModel):
"""
Base Model for default emmet data
"""
builder_meta: EmmetMeta = Field(
default_factory=EmmetMeta, description="Builder metadata"
)
|
the-stack_106_13762
|
import numpy as np
import cv2 as cv
cap = cv.VideoCapture(0)
# count = 0
# while(count<10):
while(True):
#=== Capture frame-by-frame
ret, frame = cap.read()
#=== Our operations on the frame come here
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
#=== Display the resulting frame
# cv.imshow(str(count),gray)
cv.imshow('image',gray)
# count = count + 1
if cv.waitKey(1) & 0xFF == ord('q'):
break
# ===When everything done, release the capture
# cv.waitKey(0)
cap.release()
cv.destroyAllWindows()
|
the-stack_106_13763
|
# This file is part of the pyMOR project (https://www.pymor.org).
# Copyright pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (https://opensource.org/licenses/BSD-2-Clause)
import os
import sys
from pathlib import Path
import io
import importlib.machinery
import importlib.util
from docutils.core import publish_doctree
from docutils.parsers.rst import Directive
from docutils.parsers.rst.directives import flag, register_directive
import pytest
from pymor.tools.io import change_to_directory
from pymortests.base import runmodule
from pymortests.demos import _test_demo
TUT_DIR = Path(os.path.dirname(__file__)).resolve() / 'source'
_exclude_files = []
EXCLUDE = [TUT_DIR / t for t in _exclude_files]
TUTORIALS = [t for t in TUT_DIR.glob('tutorial_*rst') if t not in EXCLUDE]
TUTORIALS += [t for t in TUT_DIR.glob('tutorial_*md') if t not in EXCLUDE]
class CodeCell(Directive):
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'hide-output': flag,
'hide-code': flag,
'raises': flag}
has_content = True
def run(self):
self.assert_has_content()
if 'raises' in self.options:
text = 'try:\n ' + '\n '.join(
self.content) + '\nexcept:\n import traceback; traceback.print_exc()'
else:
text = '\n'.join(self.content)
print('# %%')
print(text)
print()
return []
@pytest.fixture(params=TUTORIALS, ids=[t.name for t in TUTORIALS])
def tutorial_code(request):
filename = request.param
with change_to_directory(TUT_DIR):
code = io.StringIO()
register_directive('jupyter-execute', CodeCell)
with open(filename, 'rt') as f:
original = sys.stdout
sys.stdout = code
publish_doctree(f.read(), settings_overrides={'report_level': 42})
sys.stdout = original
code.seek(0)
source_fn = Path(f'{str(filename).replace(".rst", "_rst")}_extracted.py')
with open(source_fn, 'wt') as source:
# filter line magics
source.write(''.join([line for line in code.readlines() if not line.startswith('%')]))
return request.param, source_fn
def test_tutorial(tutorial_code):
filename, source_module_path = tutorial_code
# make sure (picture) resources can be loaded as in sphinx-build
with change_to_directory(TUT_DIR):
def _run():
loader = importlib.machinery.SourceFileLoader(source_module_path.stem, str(source_module_path))
spec = importlib.util.spec_from_loader(loader.name, loader)
mod = importlib.util.module_from_spec(spec)
loader.exec_module(mod)
try:
# wrap module execution in hacks to auto-close Qt-Apps, etc.
_test_demo(_run)
except Exception as e:
print(f'Failed: {source_module_path}')
raise e
if __name__ == "__main__":
runmodule(filename=__file__)
|
the-stack_106_13764
|
# -*- coding: utf-8 -*-
# (c) Copyright IBM Corp. 2010, 2021. All Rights Reserved.
# pragma pylint: disable=unused-argument, no-self-use
"""Function implementation"""
import logging
from resilient_circuits import ResilientComponent, function, handler, StatusMessage, FunctionResult, FunctionError
from resilient_lib import ResultPayload, validate_fields
from fn_aws_guardduty.lib.aws_gd_client import AwsGdClient
import fn_aws_guardduty.util.config as config
PACKAGE_NAME = "fn_aws_guardduty"
REQUIRED_FIELDS = ["aws_gd_finding_id", "aws_gd_detector_id", "aws_gd_region"]
class FunctionComponent(ResilientComponent):
"""Component that implements Resilient function 'func_aws_guardduty_archive_finding''"""
def __init__(self, opts):
"""constructor provides access to the configuration options"""
super(FunctionComponent, self).__init__(opts)
self.options = opts.get(PACKAGE_NAME, {})
self.opts = opts
validate_fields(config.REQUIRED_CONFIG_SETTINGS, self.options)
@handler("reload")
def _reload(self, event, opts):
"""Configuration options have changed, save new values"""
self.options = opts.get(PACKAGE_NAME, {})
self.opts = opts
validate_fields(config.REQUIRED_CONFIG_SETTINGS, self.options)
@function("func_aws_guardduty_archive_finding")
def _func_aws_guardduty_archive_finding_function(self, event, *args, **kwargs):
"""Function: Archive an AWS GuardDuty finding.
:param aws_gd_finding_id: An AWS GuardDuty finding ID.
:param aws_gd_detector_id: An AWS GuardDuty detector ID.
:param aws_gd_region: An AWS GuardDuty region ID.
"""
try:
# Get the wf_instance_id of the workflow this Function was called in
wf_instance_id = event.message["workflow_instance"]["workflow_instance_id"]
yield StatusMessage("Starting 'func_aws_guardduty_archive_finding' running in workflow '{0}'".format(wf_instance_id))
rp = ResultPayload(PACKAGE_NAME, **kwargs)
# Get the function parameters:
aws_gd_region = kwargs.get("aws_gd_region") # text
aws_gd_finding_id = kwargs.get("aws_gd_finding_id") # text
aws_gd_detector_id = kwargs.get("aws_gd_detector_id") # text
validate_fields(REQUIRED_FIELDS, kwargs)
log = logging.getLogger(__name__)
log.info("aws_gd_region: %s", aws_gd_region)
log.info("aws_gd_finding_id: %s", aws_gd_finding_id)
log.info("aws_gd_detector_id: %s", aws_gd_detector_id)
# Instantiate AWS GuardDuty client object.
aws_gd = AwsGdClient(self.opts, self.options, region=aws_gd_region)
result = aws_gd.post("archive_findings", DetectorId=aws_gd_detector_id, FindingIds=[aws_gd_finding_id])
results = rp.done(True, result)
yield StatusMessage("Finished 'func_aws_guardduty_archive_finding' that was running in workflow '{0}'".format(wf_instance_id))
# Produce a FunctionResult with the results
yield FunctionResult(results)
except Exception:
yield FunctionError()
|
the-stack_106_13766
|
import pathlib
import pytest
from cryp_to_go import path_handler
def test_init():
inst = path_handler.SubPath('foo/bar')
assert str(inst.relative_path) == 'foo/bar'
inst = path_handler.SubPath(pathlib.Path('foo/bar'))
assert str(inst.relative_path) == 'foo/bar'
with pytest.raises(ValueError, match="only relative"):
path_handler.SubPath('/foo/bar')
with pytest.raises(ValueError, match="not allowed"):
path_handler.SubPath('foo/../bar')
@pytest.mark.parametrize("input_path,target", [
('foo/bar', 'foo/bar'),
(pathlib.Path('foo/bar'), 'foo/bar'),
('/foo/bar', '/foo/bar'),
(pathlib.Path('/foo/bar'), '/foo/bar'),
])
def test_to_path(input_path, target):
path = path_handler.SubPath.to_path(input_path)
assert isinstance(path, pathlib.Path)
assert str(path) == target
def test_str():
path = path_handler.SubPath('foo/bar')
assert str(path) == 'foo/bar'
@pytest.mark.parametrize("path_parent", ['/foo', pathlib.Path('/foo')])
def test_absolute_path(path_parent):
path_rel = path_handler.SubPath('bar/bar')
path_abs = path_rel.absolute_path(path_parent)
assert isinstance(path_abs, pathlib.Path)
assert str(path_abs) == '/foo/bar/bar'
@pytest.mark.parametrize("path", ['foo/bar', pathlib.Path('foo/bar')])
def test_from_any_path(path):
subpath = path_handler.SubPath(path)
assert isinstance(subpath, path_handler.SubPath)
assert str(subpath) == 'foo/bar'
def test_slashed_string():
subpath = path_handler.SubPath('foo')
assert subpath.slashed_string == 'foo'
# overwrite internal relative path with PurePath in different flavors
subpath.relative_path = pathlib.PurePosixPath('foo/bar')
assert subpath.slashed_string == 'foo/bar'
subpath.relative_path = pathlib.PureWindowsPath(r'foo\bar')
assert subpath.slashed_string == 'foo/bar'
|
the-stack_106_13767
|
# /usr/bin/env python3
# -*- coding:utf-8 -*-
#
# 02_led_blunk.py
#
import sys
import time
import pigpio
LED_PORT = 18
PWM_FREQUENCY = 500 # Hz
RANGE = 100
def led_blunk(hostname):
# Software PWM
pi = pigpio.pi(hostname)
pi.set_mode(LED_PORT, pigpio.OUTPUT)
pi.set_PWM_frequency(LED_PORT, PWM_FREQUENCY)
pi.set_PWM_range(LED_PORT, RANGE)
# LEDを2秒間点灯する
for duty in (5, 20, 50, 80, 100):
for i in range(2):
pi.set_PWM_dutycycle(LED_PORT, duty)
time.sleep(0.5)
pi.set_PWM_dutycycle(LED_PORT, 0)
time.sleep(0.5)
# ピンをINPUTモードにしておかないと、LEDが点灯し続けてしまう
pi.set_mode(LED_PORT, pigpio.INPUT)
pi.stop()
if __name__ == '__main__':
hostname = 'localhost'
if len(sys.argv) > 1:
hostname = sys.argv[1]
led_blunk(hostname)
|
the-stack_106_13768
|
_base_ = [
'../../_base_/models/tsm_r50.py', '../../_base_/schedules/sgd_tsm_50e.py',
'../../_base_/default_runtime.py'
]
# model settings
model = dict(
backbone=dict(num_segments=16),
cls_head=dict(num_classes=174, num_segments=16))
# dataset settings
dataset_type = 'RawframeDataset'
data_root = 'data/sthv2/rawframes'
data_root_val = 'data/sthv2/rawframes'
ann_file_train = 'data/sthv2/sthv2_train_list_rawframes.txt'
ann_file_val = 'data/sthv2/sthv2_val_list_rawframes.txt'
ann_file_test = 'data/sthv2/sthv2_val_list_rawframes.txt'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=16),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(
type='MultiScaleCrop',
input_size=224,
scales=(1, 0.875, 0.75, 0.66),
random_crop=False,
max_wh_scale_gap=1,
num_fixed_crops=13),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(
type='SampleFrames',
clip_len=1,
frame_interval=1,
num_clips=16,
test_mode=True),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='CenterCrop', crop_size=224),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
test_pipeline = [
dict(
type='SampleFrames',
clip_len=1,
frame_interval=1,
num_clips=16,
test_mode=True),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='CenterCrop', crop_size=224),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
data = dict(
videos_per_gpu=6,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_root,
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix=data_root_val,
pipeline=val_pipeline),
test=dict(
type=dataset_type,
ann_file=ann_file_test,
data_prefix=data_root_val,
pipeline=test_pipeline))
evaluation = dict(
interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy'])
# optimizer
optimizer = dict(
lr=0.0075, # this lr is used for 8 gpus
weight_decay=0.0005)
# runtime settings
work_dir = './work_dirs/tsm_r50_1x1x16_50e_sthv2_rgb/'
|
the-stack_106_13769
|
"""
@reference:
python dict 保存为pickle格式
https://blog.csdn.net/rosefun96/article/details/90633786
"""
import os
import pickle
import json
from logging import DEBUG
from src.modules.logger import MyLogger
class Cache(object):
def __init__(self, name, local_dir='./cache', logger=None):
self._name = name
self._local_dir = local_dir
self._logger = logger if logger else MyLogger('cache', DEBUG)
self._cache = dict()
def add(self, key, val):
if key in self._cache:
self._logger.warning(f'key exits already, add failed.')
return False
else:
self._cache[key] = val
return True
def put(self, key, val):
self._cache[key] = val
def get(self, key, default):
return default if key not in self._cache else self._cache[key]
def __iter__(self):
for key in self._cache:
yield key
def __setitem__(self, key, val):
self._cache[key] = val
def __getitem__(self, key):
return self._cache[key]
def __repr__(self):
return json.dumps(self._cache, indent=4)
class LocalCache(Cache):
def __init__(self, name, local_dir='./cache', logger=None):
super(LocalCache, self).__init__(name, local_dir, logger)
self.local_path = ''
def store(self, local_path=''):
if not local_path:
if not os.path.exists(self._local_dir):
os.makedirs(self._local_dir, exist_ok=True)
self._logger.warning(f'{self._local_dir} did not exist, and has been created now.')
local_path = f'{os.path.join(self._local_dir, self._name)}.pkl'
with open(local_path, 'wb') as fw: # Pickling
pickle.dump(self._cache, fw, protocol=pickle.HIGHEST_PROTOCOL)
self.local_path = local_path
def load(self, local_path=''):
if not local_path:
local_path = f'{os.path.join(self._local_dir, self._name)}.pkl'
with open(local_path, 'rb') as fr:
self._cache = pickle.load(fr)
self.local_path = local_path
def smart_load(self, local_path=''):
"""
:param local_path:
:return:
Don't raise an error if cache does not exis.
"""
if not local_path:
local_path = f'{os.path.join(self._local_dir, self._name)}.pkl'
if os.path.exists(local_path):
with open(local_path, 'rb') as fr:
cache = pickle.load(fr)
self._cache = cache
self.local_path = local_path
else:
self._logger.warning(f'load failed. "{local_path} does not exist."')
def clear(self):
"""
:return:
clear both cathe in memory and local.
"""
self._cache = dict()
if self.local_path:
os.remove(self.local_path)
self.local_path = ''
@classmethod
def load_from(cls, local_path):
if os.path.exists(local_path):
raise FileNotFoundError
cache_name = os.path.basename(local_path).split('.')[0]
new = LocalCache(cache_name)
new.load(local_path)
new.local_path = local_path # record this local_path, it is useful when make clear.
return new
|
the-stack_106_13770
|
from apps.api.forms.catalogs import CatalogForm
from apps.core.models import Catalog, UserCatalog
class CatalogService:
def populate(self, catalog: Catalog, form: CatalogForm) -> Catalog:
form.populate(catalog)
catalog.save()
if 'users' in form.cleaned_data:
UserCatalog.objects.filter(catalog=catalog).delete()
for item in form.cleaned_data['users']:
user_catalog = UserCatalog.objects.create(
catalog=catalog,
user=item['user_id'],
mode=item['mode']
)
user_catalog.save()
return catalog
|
the-stack_106_13771
|
# requests is a module that allows you to send HTTP requests
import requests
# json is for handling JSON files
import json
# simplekml is for generating KML files
import simplekml
# The geographic location of your virtual radar
Latitude = "52.0688114"
Longitude = "19.4709897"
# Range of your virtual radar in kilometers
# Too high range may cause performance issues.
Radius = "450"
# ADS-B Data URL
URL = "https://public-api.adsbexchange.com/VirtualRadar/AircraftList.json?lat="+Latitude+"&lng="+Longitude+"&fDstU="+Radius
# Unfortunately adsbexchange.com blocks requests generated by urllib so we have to spoof some headers
Headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"}
# Download data from given URL
RawFlightData = requests.get(URL, headers=Headers)
# Parse downloaded JSON
FlightData = json.loads(RawFlightData.content)
# Now it's time to generate our map
kml = simplekml.Kml()
for airplane in FlightData["acList"]:
point = kml.newpoint()
point.coords = [(airplane["Long"],airplane["Lat"])]
point.name = str(airplane["Icao"])
description = ""
if("Reg" in airplane):
description += "Registration number: "+str(airplane["Reg"])+"\n<br>"
if("Alt" in airplane):
description += "Altitude: "+str(airplane["Alt"])+" ft.\n<br>"
if("Spd" in airplane):
description += "Ground speed: "+str(airplane["Spd"])+" knots\n<br>"
if("Trak" in airplane):
description += "Heading: "+str(airplane["Trak"])+"°\n<br>"
point.style.iconstyle.rotation = round(airplane["Trak"])
if("Op" in airplane):
description += "Operator: "+airplane["Op"]
point.description = description
# Save our KML file
kml.save("./radar.kml")
|
the-stack_106_13774
|
import argparse
import glob
import numpy as np
import pandas as pd
import tensorflow as tf
from keras.preprocessing import sequence
from keras import backend as K
from keras.models import Model
from keras.layers import recurrent, Embedding, Dropout, Dense, Bidirectional, Concatenate, Input
from keras.optimizers import SGD, Adam
from sklearn.metrics import classification_report, accuracy_score
from evidencedetection.vectorizer import TokenizingEmbeddingVectorizer
def parse_arguments():
parser = argparse.ArgumentParser("Trains a simple BiLSTM to detect sentential arguments across multiple topics.")
parser.add_argument("--embeddings", type=str, help="The path to the embedding folder.")
parser.add_argument("--data", type=str, help="The path to the folder containing the TSV files with the training data.")
return parser.parse_args()
def read_data(data_path):
data = pd.read_csv(data_path, sep=",", quotechar="'", header=0, index_col=0)
return data
def create_model(units, lr, hypothesis_max_length, sentence_max_length, embeddings, seed):
# set random seed to Keras and TensorFlow
tf.set_random_seed(seed)
# session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
# sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
# K.set_session(sess)
hyp_inpt = Input([hypothesis_max_length], name="hypothesis_input", dtype="int32")
hyp_emb = Dropout(0.5, seed=seed)(Embedding(embeddings.shape[0],
embeddings.shape[1],
weights=[embeddings],
mask_zero=True,
name="hyp_emb")(hyp_inpt))
hyp_bilstm = Bidirectional(recurrent.LSTM(units[0], return_sequences=False, name="hyp_lstm" ))(hyp_emb)
content_inpt = Input([sentence_max_length], name="evidence_input", dtype="int32")
content_emb = Dropout(0.5, seed=seed)(Embedding(embeddings.shape[0],
embeddings.shape[1],
weights=[embeddings],
mask_zero=True,
name="content_emb")(content_inpt))
evidence_bilstm = Bidirectional(recurrent.LSTM(units[0], return_sequences=False, name="evidence_lstm"))(content_emb)
hidden = Dropout(0.5, seed=seed)(Concatenate()([hyp_bilstm, evidence_bilstm]))
if len(units) > 1: # add dense layer if required
hidden = Dropout(0.5, seed=seed)(Dense(units=units[1], activation="relu", name="hidden_dense")(hidden))
classifier = Dense(units=2, activation="softmax", name="dense")(hidden)
model = Model(inputs=[hyp_inpt, content_inpt], outputs=classifier)
optimizer = Adam(lr=lr)
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
print(model.summary())
return model
if "__main__"==__name__:
args = parse_arguments()
data = read_data(args.data)
motion_groups = data.groupby("motion")
test_motion = "This house believes atheism is the only way"
test_group = motion_groups.get_group(test_motion)
train_links = list()
train_labels = list()
for name, group in motion_groups:
if name == test_motion: continue
links_frame = group[["hypothesis", "evidence"]]
links = links_frame.values
train_links.extend(links.tolist())
train_labels.extend(group["label"].values.tolist())
train_links = np.array(train_links)
train_labels = np.array(train_labels) == "link"
label_array = np.array(train_labels)
two_d_train_labels = np.zeros((label_array.shape[0], 2))
two_d_train_labels[np.where(label_array==0), 0] = 1
two_d_train_labels[np.where(label_array==1), 1] = 1
hypotheses = train_links[:, 0]
sentences = train_links[:, 1]
vectorizer = TokenizingEmbeddingVectorizer(args.embeddings)
tokenized_hypotheses = vectorizer.tokenize_sentences(hypotheses)
tokenized_sentences = vectorizer.tokenize_sentences(sentences)
hypothesis_max_length = max(map(lambda s: len(s.split(" ")), hypotheses))
sentence_max_length = max(map(lambda s: len(s.split(" ")), sentences))
model = create_model([100], 0.002, hypothesis_max_length, sentence_max_length, vectorizer.embeddings, 0)
vectorized_hypotheses = vectorizer.sentences_to_padded_indices(hypotheses, hypothesis_max_length)
vectorized_sentences = vectorizer.sentences_to_padded_indices(sentences, sentence_max_length)
print("hypotheses.shape: ", vectorized_hypotheses.shape)
print("hypotheses: ", vectorized_hypotheses)
print("sentences.shape: ", vectorized_sentences.shape)
print("sentences: ", vectorized_sentences)
model.fit({"hypothesis_input":vectorized_hypotheses, "evidence_input": vectorized_sentences}, two_d_train_labels, epochs=30, verbose=True)
# model.fit({"hypothesis_input":vectorized_hypotheses[::10], "evidence_input": vectorized_sentences[::10]}, two_d_train_labels[::10], epochs=5, verbose=True)
test_links = test_group[["hypothesis", "evidence"]].values
test_labels = test_group["label"].values == "link"
test_hypotheses = test_links[:, 0]
test_sentences = test_links[:, 1]
test_vectorized_hypotheses = vectorizer.sentences_to_padded_indices(test_hypotheses, hypothesis_max_length)
test_vectorized_sentences = vectorizer.sentences_to_padded_indices(test_sentences, sentence_max_length)
predictions = model.predict({"hypothesis_input": test_vectorized_hypotheses, "evidence_input": test_vectorized_sentences})
preds = np.argmax(predictions, axis=1)
print(predictions)
print(classification_report(test_labels, preds, target_names=["no-link", "link"]))
print(accuracy_score(test_labels, preds))
model_json = model.to_json()
with open("../models/hypothesisevidencelinking-en.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save("../models/hypothesisevidencelinking-en.h5")
|
the-stack_106_13775
|
# qubit number=3
# total number=25
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.rx(-0.09738937226128368,input_qubit[2]) # number=2
prog.h(input_qubit[1]) # number=3
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_QC137.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_belem")
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
the-stack_106_13776
|
import discord
from os import path
from discord.ext import commands
import requests
from datetime import datetime, timezone
import json
from time import time
class CTF(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.header = {'User-Agent': 'CyberBot'}
self.fp = path.dirname(__file__)
self.rp = 'data/teams.json'
self.file_path = path.join(self.fp, self.rp)
self.ctftime = 'https://ctftime.org/'
if not path.exists(self.file_path):
with open(self.file_path, 'w') as f:
f.write('{}')
@commands.command(name='ctf', aliases=['ctfs','upcoming'])
async def upcoming(self, ctx, limit=7):
'''!ctf (Optional: limit) : Returns 3-12 upcoming ctf events. Default = 7.'''
if limit < 3:
limit = 3
if limit > 12:
limit = 12
begin = int(datetime.now(timezone.utc).timestamp()) + 259200 # Converting current time in UTC to Epoch time
end = begin + 5184000 # Max end date is 2 months in the event max limit is not hit
url = f'https://ctftime.org/api/v1/events/?limit={limit}&start={begin}&finish={end}' # Skips first few days from present that are likely closed
request = requests.get(url, headers=self.header)
try:
results = request.json()
except json.decoder.JSONDecodeError:
print(f'[{datetime.now(timezone.utc)}] CTF Events were not properly fetched. HTTP Code: {request.status_code}')
return await ctx.send('Oh no, I can\'t fetch the events! Please try again later.')
embed = discord.Embed(
title = 'CTFTime Events',
description = f'Showing up to {limit} events',
color = discord.Color.red()
)
for event in results:
restrictions = event['restrictions']
if restrictions != 'Open':
continue
#String formatting done below to change time to UTC, may base time off of Discord server location later on
start = datetime.strptime(event['start'][:-6], "%Y-%m-%dT%H:%M:%S")
start_formatted = start.strftime("%a, %d %B %Y at %H:%M (%I:%M%p) UTC")
try:
title = event['title']
format = event['format']
event_url = event['url']
ctftime_url = event['ctftime_url']
except KeyError:
return await ctx.send('Sorry, there seems to be an issue retrieving events. Please try again!')
embed.add_field(name=f'[{format}] {title} ({event_url})', value=f'{start_formatted}\nMore Info: {ctftime_url}', inline=False)
embed.set_footer(text=f'Powered by the CTFTime API')
await ctx.send(embed=embed)
@commands.command(name='setteam', aliases=['teamset'])
@commands.has_permissions(administrator=True)
async def set_team(self, ctx, id):
'''!setteam (CTFTime Team ID) : Sets team to track for use of !myteam. Use ID in team page URL, not team name!
Note: This command can only be used by an administrator of the server.'''
if not path.exists(self.file_path):
with open(self.file_path, 'w') as f:
f.write('{}')
team_id = id
url = f'https://ctftime.org/api/v1/teams/{team_id}/'
request = requests.get(url, headers=self.header)
try:
results = request.json()
except json.decoder.JSONDecodeError:
team_id = None
print(f'[{datetime.now(timezone.utc)}] Set team request not properly fetched. HTTP Code: {request.status_code}')
return await ctx.send('I can\'t seem to fetch that team! Make sure it is the proper team id, otherwise try again later.')
with open(self.file_path, 'r') as f:
try:
team_db = json.load(f)
except json.decoder.JSONDecodeError:
print(f'[{datetime.now(timezone.utc)}] Local team DB not fetched properly.')
return await ctx.send('Sorry, there seems to be an issue retrieving the team database.')
team_db[str(ctx.guild.id)] = team_id
with open(self.file_path, 'w') as f:
json.dump(team_db, f)
team_name = results['name']
await ctx.send(f'Hi there, {team_name}! Team related commands will now be in regards to your team.')
@commands.command(name='myteam', aliases=['teamstats'])
async def team_stats(self, ctx):
'''!myteam : Displays CTFTime rating and point stats of the given team'''
if not path.exists(self.file_path):
with open(self.file_path, 'w') as f:
f.write('{}')
with open(self.file_path, 'r') as f:
try:
team_db = json.load(f)
except json.decoder.JSONDecodeError:
print(f'[{datetime.now(timezone.utc)}] Local team DB not fetched properly.')
return await ctx.send('Sorry, there seems to be an issue retrieving the team database.')
if (str(ctx.guild.id) not in team_db) or (team_db[str(ctx.guild.id)] == None):
return await ctx.send('You do not have a valid team set. Use !setteam [team_id] to set it, or !help setteam for more info.')
team_id = team_db[str(ctx.guild.id)]
url = f'https://ctftime.org/api/v1/teams/{team_id}/'
request = requests.get(url, headers=self.header)
try:
results = request.json()
except json.decoder.JSONDecodeError:
print(f'[{datetime.now(timezone.utc)}] Could not fetch team data. HTTP Code: {request.status_code}')
return await ctx.send('Team data can not be fetched, please try again later!')
team_name = results['name']
bot_message = f'Team Info for {team_name} (Team ID: {team_id}):\n\n'
embed = discord.Embed(
title = team_name,
url = f'https://ctftime.org/team/{team_id}',
description = f'Team ID: {team_id}',
color = discord.Color.red()
)
if not results['rating']: #Checking if the "ratings" list in JSON file is empty. AKA: The team has no score for any year
bot_message += 'Your team does not have any rating as of now.'
embed.add_field(name='No Ratings', value='Compete in more CTFs to add rating points!')
else:
rating_info = {}
for d in results['rating']:
rating_info.update(d)
for year in rating_info:
rate_pts = round(rating_info[year]['rating_points'])
rate_rank = rating_info[year]['rating_place']
embed.add_field(name=f'{year} Rating', value=f'Points: {rate_pts} - Rank: {rate_rank}')
embed.set_footer(text=f'Powered by the CTFTime API')
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(CTF(bot))
|
the-stack_106_13777
|
import os
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
from setuptools.command.sdist import sdist as SdistCommand
try:
from setuptools_rust import RustExtension
except ImportError:
import subprocess
errno = subprocess.call(
[sys.executable, "-m", "pip", "install", "setuptools-rust"])
if errno:
print("Please install setuptools-rust package")
raise SystemExit(errno)
else:
from setuptools_rust import RustExtension
class CargoModifiedSdist(SdistCommand):
"""Modifies Cargo.toml to use an absolute rather than a relative path
The current implementation of PEP 517 in pip always does builds in an
isolated temporary directory. This causes problems with the build, because
Cargo.toml necessarily refers to the current version of pyo3 by a relative
path.
Since these sdists are never meant to be used for anything other than
tox / pip installs, at sdist build time, we will modify the Cargo.toml
in the sdist archive to include an *absolute* path to pyo3.
"""
def make_release_tree(self, base_dir, files):
"""Stages the files to be included in archives"""
super().make_release_tree(base_dir, files)
import toml
# Cargo.toml is now staged and ready to be modified
cargo_loc = os.path.join(base_dir, "Cargo.toml")
assert os.path.exists(cargo_loc)
with open(cargo_loc, "r") as f:
cargo_toml = toml.load(f)
rel_pyo3_path = cargo_toml["dependencies"]["pyo3"]["path"]
base_path = os.path.dirname(__file__)
abs_pyo3_path = os.path.abspath(os.path.join(base_path, rel_pyo3_path))
cargo_toml["dependencies"]["pyo3"]["path"] = abs_pyo3_path
with open(cargo_loc, "w") as f:
toml.dump(cargo_toml, f)
class PyTest(TestCommand):
user_options = []
def run(self):
self.run_command("test_rust")
import subprocess
subprocess.check_call(["pytest", "tests"])
setup_requires = ["setuptools-rust>=0.10.1", "wheel"]
install_requires = ["scikit-learn~=0.21.3"]
tests_require = install_requires + ["pytest", "pytest-benchmark"]
setup(
name="linfa-k-means",
version="0.1.0",
classifiers=[
"License :: OSI Approved :: MIT License",
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Rust",
"Operating System :: POSIX",
"Operating System :: MacOS :: MacOS X",
],
packages=["linfa_k_means"],
rust_extensions=[RustExtension(
"linfa_k_means.linfa_k_means", "Cargo.toml", debug=False)],
install_requires=install_requires,
tests_require=tests_require,
setup_requires=setup_requires,
include_package_data=True,
zip_safe=False,
cmdclass={"test": PyTest, "sdist": CargoModifiedSdist},
)
|
the-stack_106_13778
|
import math
import time
from util import *
import torch
import torch.utils.data
from torch.utils.data import TensorDataset
from torchsummary import summary
import torch.nn as nn
import sys
from torch.optim import lr_scheduler
import re
import random
import torch.nn.functional as F
#from unet import UNet
from unet2 import UNet as unetmodel
import copy
import argparse
import csv
from DatasetLoader import *
from init import *
import pandas as pd
import json
def seed_torch(seed=2019):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.random.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def writetoCSV(path2file,data):
with open(path2file+'.csv','w') as out:
csv_out=csv.writer(out)
csv_out.writerow(['epoch','train loss','train dice'])
for row in data:
csv_out.writerow(row)
def adjust_lr(optimizer, epoch):
lr = 1e-4 * (0.1 ** (epoch // 4))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def adjust_lr_custom(optimizer):
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * 10
param_group['weight_decay'] = param_group['weight_decay'] * 10
def showImages(mydata):
i = 0
sample = mydata[i]
print(i, sample['image'].shape, sample['mask'].shape)
cv2.imshow('image',sample['image'])
print(sample['image'])
print(sample['mask'])
cv2.waitKey(0)
cv2.imshow('mask',sample['mask'])
cv2.waitKey(0)
cv2.destroyAllWindows()
def trainModel(path2Data,seed,iterationNum,GPU ,mode = 'train', nb_epoch = 20, batch_size = 16, img_width = 400,img_hight= 400, ch = 1,saveModelOption='last'):
df_max = pd.DataFrame()
task = iterationNum.split('_')[0]
fold = iterationNum.split('_')[1]
fold = 'Iteration_'+fold
nb_epoch = int(nb_epoch)
if path2Data.endswith(os.path.sep):
path2Data = path2Data[:-1]
expPath = os.path.dirname(path2Data)
if not os.path.exists(os.path.join(expPath,'pytorch_exp_May2021_saveEveryEpochModel_mixedPrecision')):
os.makedirs(os.path.join(expPath,'pytorch_exp_May2021_saveEveryEpochModel_mixedPrecision'))
taskPath = os.path.join(expPath,'pytorch_exp_May2021_saveEveryEpochModel_mixedPrecision')
if os.path.exists(os.path.join(taskPath,task,fold,'Models',fold+'_weights'+'.pt')) and mode == 'train':
print('Model for iteration {} already exist, try another iteration for training'.format(iterationNum))
sys.exit()
mode = mode.lower()
try:
del model
del best_model
except:
print('Seems memory already cleared from previous models')
seed_torch(seed=int(seed))
device = torch.device("cuda:"+GPU)
#model = UNet(n_classes=2, padding=True, up_mode='upconv').to(device)
model = unetmodel(in_channels=1, out_channels=2, init_features=32,BatchNorm = False)
#print(model)
#model.apply(weights_init)
#model.double() # double precision
model = weights_initFromKeras2(model)
listOflayers,listOfWeightsMax, listOfWeightsMin = getMaximumWeights(model)
df_max['layers'] = listOflayers
df_max['init_max'] = listOfWeightsMax
df_max['init_min'] = listOfWeightsMin
#model = model.cuda()
#print(model)
#inputSize = (int(ch),int(img_width),int(img_hight))
#summary(model,(int(ch),int(img_width),int(img_hight)))
model = model.to(device)
if mode == 'train':
param_json = {} # json for all training parameters
param_json['seed'] = seed
param_json['GPU'] = GPU
param_json['total_epochs'] = nb_epoch
param_json['batch_size'] = batch_size
param_json['img_width'] = img_width
param_json['img_hight'] = img_hight
param_json['channels'] = ch
param_json['normType'] = 'divideBY255'
#trainImgs,trainMsks = create_train_data(trainingImages,trainingMasks,'train',img_width,img_hight)
if not os.path.exists(os.path.join(taskPath,task,fold,'Models')):
os.makedirs(os.path.join(taskPath,task,fold,'Models'))
if not os.path.exists(os.path.join(taskPath,task,fold,'Summary')):
os.makedirs(os.path.join(taskPath,task,fold,'Summary'))
#optim = torch.optim.SGD(model.parameters(),lr=1e-4)
optim = torch.optim.Adam(model.parameters(),lr=1e-5,eps=1e-7,amsgrad=False, weight_decay=1e-3)
#lossFunc = nn.BCELoss(reduction='mean')
lossFunc = torch.nn.BCEWithLogitsLoss(reduction='mean')
lossFunc = lossFunc.to(device)
# lr scheduler
#exp_lr_scheduler = lr_scheduler.StepLR(optim, step_size=20, gamma=0.1)
#listOflayers,listOfWeightsMax, listOfWeightsMin = getMaximumWeights(model)
#df_max['layers'] = listOflayers
#df_max['init_max'] = listOfWeightsMax
#df_max['init_min'] = listOfWeightsMin
df_max.to_csv(os.path.join(taskPath,task,fold,'Summary','maxPerLayer.csv'))
'''
total_images = len(os.listdir(os.path.join(path2Data,'folds',fold,'train','masks','train')))
total_val_images = len(os.listdir(os.path.join(path2Data,'folds',fold,'val','masks','val')))
print('Total Number of Images in the dataset is {}'.format(total_images))
if os.path.exists(os.path.join(taskPath,task,fold,'Summary',fold+'_trainImgs.npy')):
trainImgs = np.load(os.path.join(taskPath,task,fold,'Summary',fold+'_trainImgs.npy'))
else:
trainImgs,trainMasks = create_train_data(os.path.join(path2Data,'folds',fold,'train','images'),os.path.join(path2Data,'folds',fold,'train','masks'),'train',img_width,img_hight)
np.save(os.path.join(taskPath,task,fold,'Summary',fold+'_trainImgs.npy'),trainImgs)
mean,std = getTrainStatistics(trainImgs)
train_data = Dataset(path2Data,os.path.join(path2Data,'folds',fold,'train','images','train'),os.path.join(path2Data,'folds',fold,'train','masks','train'),mean=mean,std=std,normalize=normType)
val_data = Dataset(path2Data,os.path.join(path2Data,'folds',fold,'val','images','val'),os.path.join(path2Data,'folds',fold,'val','masks','val'),mean=mean,std=std,normalize=normType)
'''
trainingImgs = np.load(os.path.join(path2Data,'trainImgs.npy'))
trainingMasks = np.load(os.path.join(path2Data,'trainingMasks_twoChannels.npy'))
total_images = trainingImgs.shape[0]
trainingImgs = trainingImgs / 255.0
train_loader = torch.utils.data.DataLoader(trainingImgs,batch_size = batch_size, shuffle= False, num_workers = 1)
trainMasks_loader = torch.utils.data.DataLoader(trainingMasks,batch_size = batch_size, shuffle=False,num_workers=1)
#data_loaders = {'train': train_loader, "val": val_loader}
#param_json['mean'] = str(mean)
#param_json['std'] = str(std)
# write paramaters to json file
with open(os.path.join(taskPath,task,fold,'Summary',fold+'_param.json'),'w') as fp:
json.dump(str(param_json),fp)
best_loss = math.inf
# training UNet
start_time = time.time()
epoch_loss_dice = []
scaler = torch.cuda.amp.GradScaler()
for epoch in range(1,nb_epoch+1):
accum_loss = 0.0
accum_dice = 0.0
accum_loss_val = 0.0
accum_dice_val = 0.0
totalImagesSeen = 0
totalImagesSeen_val = 0
for phase in ['train']:
for X,y in zip(train_loader,trainMasks_loader):
#X, y = batch['image'], batch['mask']
X = X.to(device,dtype=torch.float) # [N, 1, H, W]
y = y.to(device=device,dtype=torch.float) # [N, H, W] with class indices (0, 1)
optim.zero_grad()
with torch.cuda.amp.autocast():
prediction = model(X) # [N, 2, H, W
prediction = F.softmax(prediction,1)
loss = lossFunc(prediction,y)
#dice = get_dice(prediction,y)
if phase == 'train':
#loss.backward()
scaler.scale(loss).backward()
#optim.step()
scaler.step(optim)
scaler.update()
accum_loss += loss.item() * prediction.shape[0]
#accum_dice += dice.item()
totalImagesSeen += X.shape[0]
print('%d/%d \t train loss %f \r'%(totalImagesSeen,total_images,loss.item()),end="")
if phase == 'val':
accum_loss_val += loss.item() * prediction.shape[0]
#accum_dice_val += dice.item()
totalImagesSeen_val += X.shape[0]
#print('%d/%d \t train loss %f \t val loss %f \r'%(totalImagesSeen,total_images,loss.item(),loss_val.item()),end="")
#print('dice %f \r'%dice.item()/float(prediction.shape[0]), end="")
#exp_lr_scheduler.step()
epoch_loss = accum_loss /float(totalImagesSeen)
#epoch_dice = accum_dice /float(totalImagesSeen)
#epoch_loss_val = accum_loss_val / float(totalImagesSeen_val)
#epoch_dice_val = accum_dice_val / float(totalImagesSeen_val)
#epoch_loss_dice.append((epoch,epoch_loss,epoch_dice))
print('epoch {}/{}, train loss {}'.format(epoch,nb_epoch,epoch_loss))
#if (epoch > 1 and abs(epoch_loss_val - best_loss) > 1):
#print('reducing the weight decay by *10')
#adjust_lr_custom(optim)
#This code is for saving the min and max of each layer at each epoch to test why the results are not reproducible.
#-----------------------------------------------------------------------
listOflayers,listOfWeightsMax, listOfWeightsMin = getMaximumWeights(model)
df_max['layers'] = listOflayers
df_max['init_max'] = listOfWeightsMax
df_max['init_min'] = listOfWeightsMin
df_max.to_csv(os.path.join(taskPath,task,fold,'Summary','Epoch'+str(epoch)+'_maxPerLayer.csv'))
epoch_model = copy.deepcopy(model)
epoch_model = epoch_model.cpu()
torch.save(epoch_model.state_dict(), os.path.join(taskPath,task,fold,'Models',fold+'_Epoch'+str(epoch)+'_weights.pt'))
#------------------------------------------------------------------------
if epoch_loss < best_loss:
best_model = copy.deepcopy(model)
best_model = best_model.cpu()
best_loss = epoch_loss
#best_dice = epoch_dice
try:
print('saving the model ...')
torch.save(best_model.state_dict(), os.path.join(taskPath,task,fold,'Models',fold+'_weights.pt'))
except:
print('Error saving the model')
if saveModelOption == 'Snapshot':
if epoch%5 == 0:
print('saving model Snapshot...')
model2save = copy.deepcopy(model)
model2save = model2save.cpu()
torch.save(model2save.state_dict(), os.path.join(taskPath,task,fold,'Models',fold+'_epoch_'+str(epoch)+'_weights.pt'))
writetoCSV(os.path.join(taskPath,task,fold,'Summary',fold+'_epoch_loss_dice'),epoch_loss_dice)
total_time = time.time() - start_time
print("total training time {} sec".format(total_time))
del model
elif mode == 'test':
if not os.path.exists(os.path.join(taskPath,task,fold,'Models',fold+'_weights'+'.pt')):
print('Model for {} does not exist'.format(task))
sys.exit()
#task = iterationNum.split('_')[0]
#fold = iterationNum.split('_')[1]
#fold = 'fold_'+fold
for phase in ['test']:
start_time = time.time()
path2Model = os.path.join(taskPath,task,fold,'Models',fold+'_weights'+'.pt')
pred_dir_test = os.path.join(taskPath,task,fold,'PredictedMasks',fold+'_'+phase+'_predMasks')
if not os.path.exists(pred_dir_test):
os.makedirs(pred_dir_test)
#testImages,testIds = create_test_data(os.path.join(path2Data,'folds',fold,phase,'images'),phase,img_width,img_hight)
testImages = np.load(os.path.join(path2Data,'testImgs.npy'))
testIds = np.load(os.path.join(path2Data,'testIds.npy'))
testImages = testImages / 255.0
print('Total number of test Images is {}'.format(testImages.shape[0]))
dataloader = torch.utils.data.DataLoader(testImages, batch_size=1,
shuffle=False)
model.load_state_dict(torch.load(path2Model))
model.eval()
print('Now predicting on '+phase+' set, please wait...')
for X,ids in tqdm(zip(dataloader,testIds)):
X = X.to(device,dtype=torch.float)
prediction = model(X)
#prediction = F.softmax(prediction,1)
SaveMsksToFile(prediction,ids,pred_dir_test)
print("Total test time {}".format(time.time() - start_time))
print('DONE')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-GPU', '--GPU_num', help ='GPU number (Based on nvidia-smi indexing) to use in training/testing model ', required= True)
parser.add_argument('-seed','--seed', help='seed value to seed numpy, tensorflow, random number generator', default= 2019)
parser.add_argument('-d', '--dataPath', help ='path to data, make sure that names of data is correct',required= True)
parser.add_argument('-m','--mode', help='mode: either train or test, default is train',default = 'train')
parser.add_argument('-iter','--iteration_number', help='Iteration number of training/testing', required = True)
parser.add_argument('-total_epochs','--total_epochs', help='Total number of epochs to run the model for, default is 20',default=20)
parser.add_argument('-batch_size', '--batch_size', help='batch size, default is 16',default = 16)
parser.add_argument('-img_width', help='image width', default = 400)
parser.add_argument('-img_hight', help='image hight', default = 400)
parser.add_argument('-ch','--channel', help='Number of channels of input data, gray images channels =1, RGB images channels = 3, default=1',default=1)
parser.add_argument('-saveOption','--saveOption',help='Save model every certain number of epochs or the last model', default='last')
parser.add_argument('-normType','--normType', help='Normalization type name, can be <center>,<center_normalize_batchwise>,<divideby255>,<center_normalize_train>, default <center>',default = 'center')
args = parser.parse_args()
#print(args.dataPath)
trainModel(args.dataPath,seed= int(args.seed),ch=args.channel,nb_epoch = args.total_epochs,batch_size = int(args.batch_size), mode =args.mode, iterationNum = args.iteration_number, GPU = args.GPU_num,saveModelOption = args.saveOption)
|
the-stack_106_13779
|
# Copyright (C) 2018 Garth N. Wells
#
# SPDX-License-Identifier: MIT
"""This module contains a collection of functions related to
geographical data.
"""
#just need to fix the importing issues
from collections import Counter
from .stationdata import build_station_list
import operator
from .utils import sorted_by_key # noqa
from haversine import haversine, Unit
def stations_by_distance(stations, p):
pairlist = []
for station in stations:
d = haversine(station.coord, p)
coordpair = (station.name, station.town, d)
pairlist.append(coordpair)
return sorted(pairlist, key=lambda station: station[2])
def stations_within_radius(stations, centre, r):
pairlist = stations_by_distance(stations, centre)
i = 0
stationlist = []
while True:
if pairlist[i][2] < r:
stationlist.append(pairlist[i][0])
i += 1
else:
return sorted(stationlist)
def rivers_with_stations(stations):
rivers = set([])
for station in stations:
rivers.add(station.river)
return sorted(list(rivers))
def stations_by_river(stations):
out = {}
for station in stations:
river = station.river
if river in out:
out[river].append(station.name)
else:
out[river] = [station.name]
for key in out:
out[key] = sorted(out[key])
return out
def generate_rivers(stations): #Generate list of list of rivers,
Rivers_StationNumber = []
for i in stations:
Rivers_StationNumber.append(i.river)
return Rivers_StationNumber
def reverse_dictionary(d): #guess what this does
sorted_d = dict(sorted(d.items(), key=operator.itemgetter(1),reverse=True))
return sorted_d
def rivers_by_station_number(stations, N):
Rivers_StationNumber = generate_rivers(stations) #List of Rivers
RiverCount = Counter(Rivers_StationNumber) #Dictionary
Goodboy = reverse_dictionary(RiverCount) # Reverse dictionary to have descending value.
counter = 0
FinalList = []
for items in Goodboy.items():
counter = counter + 1
FinalList.append(items)
if counter > N-1:
break
return FinalList
|
the-stack_106_13780
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
NODE_VERSION_DEFAULT = "10.14"
NODE_VERSION_NEWER = "12-lts"
NODE_EXACT_VERSION_DEFAULT = "10.14.1"
NETCORE_VERSION_DEFAULT = "3.1"
ASPDOTNET_VERSION_DEFAULT = "4.8"
DOTNET_VERSION_DEFAULT = "5.0"
DOTNET_TARGET_FRAMEWORK_STRING = "net5.0"
PYTHON_VERSION_DEFAULT = "3.7"
NETCORE_RUNTIME_NAME = "dotnetcore"
ASPDOTNET_RUNTIME_NAME = "aspnet"
DOTNET_RUNTIME_NAME = "dotnet"
NODE_RUNTIME_NAME = "node"
PYTHON_RUNTIME_NAME = "python"
OS_DEFAULT = "Windows"
STATIC_RUNTIME_NAME = "static" # not an official supported runtime but used for CLI logic
NODE_VERSIONS = ['10.6', '10.14']
PYTHON_VERSIONS = ['3.9', '3.8', '3.7', '3.6']
NETCORE_VERSIONS = ['2.1', '3.1']
DOTNET_VERSIONS = ['3.5', '4.8']
LINUX_SKU_DEFAULT = "P1V2"
FUNCTIONS_VERSIONS = ['2', '3']
FUNCTIONS_STACKS_API_JSON_PATHS = {
'windows': os.path.abspath(os.path.join(os.path.abspath(__file__), '../resources/WindowsFunctionsStacks.json')),
'linux': os.path.abspath(os.path.join(os.path.abspath(__file__), '../resources/LinuxFunctionsStacks.json'))
}
FUNCTIONS_LINUX_RUNTIME_VERSION_REGEX = r"^.*\|(.*)$"
FUNCTIONS_WINDOWS_RUNTIME_VERSION_REGEX = r"^~(.*)$"
FUNCTIONS_NO_V2_REGIONS = {
"USNat West",
"USNat East",
"USSec West",
"USSec East"
}
class FUNCTIONS_STACKS_API_KEYS():
# pylint:disable=too-few-public-methods,too-many-instance-attributes
def __init__(self):
self.NAME = 'name'
self.VALUE = 'value'
self.DISPLAY = 'display'
self.PROPERTIES = 'properties'
self.MAJOR_VERSIONS = 'majorVersions'
self.DISPLAY_VERSION = 'displayVersion'
self.RUNTIME_VERSION = 'runtimeVersion'
self.IS_HIDDEN = 'isHidden'
self.IS_PREVIEW = 'isPreview'
self.IS_DEPRECATED = 'isDeprecated'
self.IS_DEFAULT = 'isDefault'
self.SITE_CONFIG_DICT = 'siteConfigPropertiesDictionary'
self.APP_SETTINGS_DICT = 'appSettingsDictionary'
self.LINUX_FX_VERSION = 'linuxFxVersion'
self.APPLICATION_INSIGHTS = 'applicationInsights'
self.SUPPORTED_EXTENSION_VERSIONS = 'supportedFunctionsExtensionVersions'
self.USE_32_BIT_WORKER_PROC = 'use32BitWorkerProcess'
self.FUNCTIONS_WORKER_RUNTIME = 'FUNCTIONS_WORKER_RUNTIME'
RUNTIME_STACKS = os.path.abspath(os.path.join(os.path.abspath(__file__),
'../resources/WebappRuntimeStacks.json'))
GENERATE_RANDOM_APP_NAMES = os.path.abspath(os.path.join(os.path.abspath(__file__),
'../resources/GenerateRandomAppNames.json'))
|
the-stack_106_13781
|
import unittest
from filter import Filter
from differ import Differ
class TestFilterMethods(unittest.TestCase):
def test_it_exists(self):
self.assertNotEqual(Filter(), None)
def test_it_returns_a_best_guess(self):
guess = "choke"
# solution = "chess"
guess_diff = [Differ.MATCH, Differ.MATCH,
Differ.ABSENT, Differ.ABSENT, Differ.CLOSE]
corpus = ["choke", "chode", "chose", "chess", "clops", "ocean"]
self.assertEqual(Filter().get_best_next_guess(
guess, guess_diff, corpus, corpus), "chess")
if __name__ == '__main__':
unittest.main()
|
the-stack_106_13786
|
from botstory.ast import story_context
from botstory.ast.story_context import get_message_attachment
from botstory.middlewares import any, location, option, sticker, text
from botstory.integrations.commonhttp import errors as http_errors
import emoji
import datetime
import logging
from nasabot.geo import animation, tiles
import os
from urllib.parse import urljoin
import uuid
logger = logging.getLogger(__name__)
dir_path = os.getcwd()
satellite_image_epsg3857 = 'https://gibs.earthdata.nasa.gov/wmts/epsg3857/best/MODIS_Terra_CorrectedReflectance_TrueColor/default/{date}/GoogleMapsCompatible_Level9/{z}/{y}/{x}.jpg'
satellite_image_epsg4326 = 'https://gibs.earthdata.nasa.gov/wmts/epsg4326/best/MODIS_Terra_CorrectedReflectance_TrueColor/default/{date}/250m/{z}/{y}/{x}.jpg'
def day_before():
return datetime.datetime.now() - datetime.timedelta(days=1)
class ContextException(Exception):
pass
class UserDialogContext:
"""
store context of user dialog and get used context to reduce similar questions
"""
def __init__(self, ctx):
self.ctx = ctx
self.user_data = story_context.get_user_data(ctx)
def get_last_location(self):
"""
get last used coords
:return:
"""
# TODO: raise exception if we don't have coors
try:
return self.user_data['coors'][-1]
except KeyError:
raise ContextException()
def store_location(self, lat, long, zoom=None, name=None):
if 'coors' not in self.user_data:
self.user_data['coors'] = []
self.user_data['coors'].append({
'lat': lat,
'long': long,
'zoom': zoom,
'name': name,
})
def setup(story):
async def show_image(ctx, target_date, lat, long, level):
tile = tiles.wgs84_tile_by_coors(lat, long, level)
await story.send_image(
# satellite_image_epsg3857.format(
satellite_image_epsg4326.format(
**tile,
date=target_date.isoformat(),
z=level,
),
user=ctx['user'],
)
await story.ask(
emoji.emojize('There will come GIBS!',
use_aliases=True),
user=ctx['user'],
quick_replies=[{
'title': emoji.emojize(':earth_americas:', use_aliases=True),
'payload': 'SHOW_AMERICAS'
}, {
'title': emoji.emojize(':earth_africa:', use_aliases=True),
'payload': 'SHOW_AFRICA_N_EUROPE'
}, {
'title': emoji.emojize(':earth_asia:', use_aliases=True),
'payload': 'SHOW_ASIA'
}, ],
)
async def show_animation(ctx, target_date, lat, long, level):
tile = tiles.mercator_tile_by_coords(lat, long, level)
await story.say('Here is the last 2 weeks...',
user=ctx['user'])
await story.start_typing(user=ctx['user'])
gif_filename = 'animation-{}.gif'.format(uuid.uuid4())
gif_full_filename = os.path.join(os.environ.get('GENERATED_STATIC_DIR'), gif_filename)
gif_url = urljoin(os.environ.get('HOST_URL'),
os.path.join(os.environ.get('GENERATED_STATIC_PATH'), gif_filename))
logger.info('# tile')
logger.info(tile)
logger.info('# level')
logger.info(level)
await animation.pipeline(
source=animation.source.GIBSSource(
'https://gibs.earthdata.nasa.gov/wmts/{projection}/best/{layer}/default/{date}/{resolution}/{z}/{y}/{x}.jpg',
layer='MODIS_Terra_CorrectedReflectance_TrueColor',
resolution='GoogleMapsCompatible_Level9',
projection='epsg3857',
z=level,
**tile,
),
timeline=animation.timeline.Interval(
target_date - datetime.timedelta(weeks=2),
target_date,
),
target=animation.target.Gif(
gif_full_filename,
),
)
await story.say(
emoji.emojize('Processed. Now we are going to upload it :package:.'),
user=ctx['user'])
await story.start_typing(user=ctx['user'])
await story.send_image(gif_url,
user=ctx['user'])
await story.stop_typing(user=ctx['user'])
# show static image
#
# await story.send_image(
# satellite_image_epsg3857.format(
# # satellite_image_epsg4326.format(
# **tile,
# date=target_date.isoformat(),
# z=level,
# ),
# user=ctx['user'],
# )
await story.say('What is next?',
user=ctx['user'])
os.remove(gif_full_filename)
async def show_animation_or_ask_retry_on_fail(ctx, lat, long, zoom):
try:
await show_animation(ctx, day_before(), lat, long, zoom)
except http_errors.HttpRequestError as ex:
logger.warning('# got exception')
await story.ask(
emoji.emojize(':confused: Got error:\n\n{}\n\nPlease retry.'.format(ex.message),
use_aliases=True),
quick_replies=[{
'title': 'Retry {},{},{}'.format(lat, long, zoom),
'payload': 'RETRY_SHOW_EARTH_{},{},{}'.format(lat, long, zoom),
}],
user=ctx['user']
)
@story.on(text.EqualCaseIgnore('earth'))
def handle_random_location():
@story.part()
async def show_whole_earth(ctx):
# TODO: request target date
await show_image(ctx, day_before(), 0, 0, 0)
@story.on(emoji.emojize(':earth_americas:', use_aliases=True))
def handle_america_location():
@story.part()
async def show_america(ctx):
await show_image(ctx, day_before(), 5, -90, 2)
@story.on(emoji.emojize(':earth_africa:', use_aliases=True))
def handle_africa_location():
@story.part()
async def show_africa_n_europe_(ctx):
await show_image(ctx, day_before(), 15, 15, 2)
@story.on(emoji.emojize(':earth_asia:', use_aliases=True))
def handle_asia_location():
@story.part()
async def show_asia(ctx):
await show_image(ctx, day_before(), 0, 170, 2)
@story.on([text.EqualCaseIgnore('retry'),
option.Match('RETRY_(.+)')])
def handle_retry():
@story.part()
async def use_store_coors_to_show_earth(ctx):
logger.info('# use_store_coors_to_show_earth')
dlg = UserDialogContext(ctx)
try:
location = dlg.get_last_location()
await show_animation_or_ask_retry_on_fail(
ctx=ctx,
lat=location['lat'],
long=location['long'],
zoom=location['zoom'],
)
except ContextException:
logger.warning('# we do not have needed user context')
@story.on(text.Any())
def handle_list_of_coords():
@story.part()
async def use_passed_coords_to_show_earth(ctx):
logger.info('# use_passed_coords_to_show_earth')
raw_text = text.get_raw_text(ctx)
values = raw_text.split(',')
if len(values) < 2 or len(values) > 4:
raise NotImplemented('Should parse if got less then 2 or more the 4 values with , delimiter')
lat = float(values[0])
long = float(values[1])
if len(values) > 2:
zoom = int(values[2])
else:
zoom = 6
dlg = UserDialogContext(ctx)
dlg.store_location(lat=lat, long=long, zoom=zoom)
await show_animation_or_ask_retry_on_fail(
ctx=ctx,
lat=lat,
long=long,
zoom=zoom,
)
@story.on(location.Any())
def handle_location():
@story.part()
async def show_earth_of_location(ctx):
logger.debug('# show earth of passed location')
location = get_message_attachment(ctx, 'location')['payload']['coordinates']
# TODO: request zoom from User
# TODO: request target date
await show_image(ctx, day_before(), location['lat'], location['long'], 5)
|
the-stack_106_13787
|
import wx, gui
class CalcFrame(gui.MyFrame1):
#constructor
def __init__(self,parent):
#initialize parent class
gui.MyFrame1.__init__(self,parent)
#mandatory in wx, create an app, False stands for not deteriction stdin/stdout
#refer manual for details
app = wx.App(False)
#create an object of CalcFrame
frame = CalcFrame(None)
#show the frame
frame.Show(True)
#start the applications
app.MainLoop()
|
the-stack_106_13788
|
"""Specialized tasks for handling Avro data in BigQuery from GCS.
"""
import logging
from luigi.contrib.bigquery import BigQueryLoadTask, SourceFormat
from luigi.contrib.gcs import GCSClient
from luigi.task import flatten
logger = logging.getLogger('luigi-interface')
try:
import avro
import avro.datafile
except ImportError:
logger.warning('bigquery_avro module imported, but avro is not installed. Any '
'BigQueryLoadAvro task will fail to propagate schema documentation')
class BigQueryLoadAvro(BigQueryLoadTask):
"""A helper for loading specifically Avro data into BigQuery from GCS.
Additional goodies - takes field documentation from the input data and propagates it
to BigQuery table description and field descriptions.
Suitable for use via subclassing: override requires() to return Task(s) that output
to GCS Targets; their paths are expected to be URIs of .avro files or URI prefixes
(GCS "directories") containing one or many .avro files.
Override output() to return a BigQueryTarget representing the destination table.
"""
source_format = SourceFormat.AVRO
def _avro_uri(self, target):
path_or_uri = target.uri if hasattr(target, 'uri') else target.path
return path_or_uri if path_or_uri.endswith('.avro') else path_or_uri.rstrip('/') + '/*.avro'
def source_uris(self):
return [self._avro_uri(x) for x in flatten(self.input())]
def _get_input_schema(self):
'''Arbitrarily picks an object in input and reads the Avro schema from it.'''
assert avro, 'avro module required'
input_target = flatten(self.input())[0]
input_fs = input_target.fs if hasattr(input_target, 'fs') else GCSClient()
input_uri = self.source_uris()[0]
if '*' in input_uri:
file_uris = list(input_fs.list_wildcard(input_uri))
if file_uris:
input_uri = file_uris[0]
else:
raise RuntimeError('No match for ' + input_uri)
schema = []
exception_reading_schema = []
def read_schema(fp):
# fp contains the file part downloaded thus far. We rely on that the DataFileReader
# initializes itself fine as soon as the file header with schema is downloaded, without
# requiring the remainder of the file...
try:
reader = avro.datafile.DataFileReader(fp, avro.io.DatumReader())
schema[:] = [reader.datum_reader.writers_schema]
except Exception as e:
# Save but assume benign unless schema reading ultimately fails. The benign
# exception in case of insufficiently big downloaded file part seems to be:
# TypeError('ord() expected a character, but string of length 0 found',).
exception_reading_schema[:] = [e]
return False
return True
input_fs.download(input_uri, 64 * 1024, read_schema).close()
if not schema:
raise exception_reading_schema[0]
return schema[0]
def _set_output_doc(self, avro_schema):
bq_client = self.output().client.client
table = self.output().table
current_bq_schema = bq_client.tables().get(projectId=table.project_id,
datasetId=table.dataset_id,
tableId=table.table_id).execute()
def get_fields_with_description(bq_fields, avro_fields):
new_fields = []
for field in bq_fields:
avro_field = avro_fields[field[u'name']]
field[u'description'] = avro_field.doc
if field[u'type'] == u'RECORD' and hasattr(avro_field.type, 'fields_dict'):
field[u'fields'] = \
get_fields_with_description(field[u'fields'], avro_field.type.fields_dict)
new_fields.append(field)
return new_fields
field_descriptions = get_fields_with_description(current_bq_schema['schema']['fields'], avro_schema.fields_dict)
patch = {
'description': avro_schema.doc,
'schema': {'fields': field_descriptions, },
}
bq_client.tables().patch(projectId=table.project_id,
datasetId=table.dataset_id,
tableId=table.table_id,
body=patch).execute()
def run(self):
super(BigQueryLoadAvro, self).run()
# We propagate documentation in one fire-and-forget attempt; the output table is
# left to exist without documentation if this step raises an exception.
try:
self._set_output_doc(self._get_input_schema())
except Exception as e:
logger.info('Could not propagate Avro doc to BigQuery table field descriptions: %r', e)
|
the-stack_106_13789
|
"""
A module for getting TLS certificates in YARN containers, used for setting up Kafka inside Jobs/Notebooks on Hops.
"""
import string
import base64
import textwrap
from hops import constants
import os
from pathlib import Path
try:
import jks
except:
pass
def _get_key_store_path():
"""
Get keystore path
Returns:
keystore path
"""
k_certificate = Path(constants.SSL_CONFIG.K_CERTIFICATE_CONFIG)
if k_certificate.exists():
return k_certificate
else:
username = os.environ['HADOOP_USER_NAME']
material_directory = Path(os.environ['MATERIAL_DIRECTORY'])
return material_directory.joinpath(username + constants.SSL_CONFIG.KEYSTORE_SUFFIX)
def get_key_store():
return str(_get_key_store_path())
def _get_trust_store_path():
"""
Get truststore location
Returns:
truststore location
"""
t_certificate = Path(constants.SSL_CONFIG.T_CERTIFICATE_CONFIG)
if t_certificate.exists():
return str(t_certificate)
else:
username = os.environ['HADOOP_USER_NAME']
material_directory = Path(os.environ['MATERIAL_DIRECTORY'])
return str(material_directory.joinpath(username + constants.SSL_CONFIG.TRUSTSTORE_SUFFIX))
def get_trust_store():
return str(_get_trust_store_path())
def _get_cert_pw():
"""
Get keystore password from local container
Returns:
Certificate password
"""
pwd_path = Path(constants.SSL_CONFIG.CRYPTO_MATERIAL_PASSWORD)
if not pwd_path.exists():
username = os.environ['HADOOP_USER_NAME']
material_directory = Path(os.environ['MATERIAL_DIRECTORY'])
pwd_path = material_directory.joinpath(username + constants.SSL_CONFIG.PASSWORD_SUFFIX)
with pwd_path.open() as f:
return f.read()
def get_key_store_cert():
"""
Get keystore certificate from local container
Returns:
Certificate password
"""
cert_path = _get_key_store_path()
if not cert_path.exists():
raise AssertionError('k_certificate is not present in directory: {}'.format(str(cert_path)))
# read as bytes, don't try to use utf-8 encoding
with cert_path.open("rb") as f:
key_store_cert = f.read()
key_store_cert = base64.b64encode(key_store_cert)
return key_store_cert
def get_key_store_pwd():
"""
Get keystore password
Returns:
keystore password
"""
return _get_cert_pw()
def get_trust_store_pwd():
"""
Get truststore password
Returns:
truststore password
"""
return _get_cert_pw()
def _bytes_to_pem_str(der_bytes, pem_type):
"""
Utility function for creating PEM files
Args:
der_bytes: DER encoded bytes
pem_type: type of PEM, e.g Certificate, Private key, or RSA private key
Returns:
PEM String for a DER-encoded certificate or private key
"""
pem_str = ""
pem_str = pem_str + "-----BEGIN {}-----".format(pem_type) + "\n"
pem_str = pem_str + "\r\n".join(textwrap.wrap(base64.b64encode(der_bytes).decode('ascii'), 64)) + "\n"
pem_str = pem_str + "-----END {}-----".format(pem_type) + "\n"
return pem_str
def _convert_jks_to_pem(jks_path, keystore_pw):
"""
Converts a keystore JKS that contains client private key,
client certificate and CA certificate that was used to
sign the certificate, to three PEM-format strings.
Args:
:jks_path: path to the JKS file
:pw: password for decrypting the JKS file
Returns:
strings: (client_cert, client_key, ca_cert)
"""
# load the keystore and decrypt it with password
ks = jks.KeyStore.load(jks_path, keystore_pw, try_decrypt_keys=True)
private_keys_certs = ""
private_keys = ""
ca_certs = ""
# Convert private keys and their certificates into PEM format and append to string
for alias, pk in ks.private_keys.items():
if pk.algorithm_oid == jks.util.RSA_ENCRYPTION_OID:
private_keys = private_keys + _bytes_to_pem_str(pk.pkey, "RSA PRIVATE KEY")
else:
private_keys = private_keys + _bytes_to_pem_str(pk.pkey_pkcs8, "PRIVATE KEY")
for c in pk.cert_chain:
# c[0] contains type of cert, i.e X.509
private_keys_certs = private_keys_certs + _bytes_to_pem_str(c[1], "CERTIFICATE")
# Convert CA Certificates into PEM format and append to string
for alias, c in ks.certs.items():
ca_certs = ca_certs + _bytes_to_pem_str(c.cert, "CERTIFICATE")
return private_keys_certs, private_keys, ca_certs
def _write_pem(jks_key_store_path, jks_trust_store_path, keystore_pw, client_key_cert_path, client_key_path, ca_cert_path):
"""
Converts the JKS keystore, JKS truststore, and the root ca.pem
client certificate, client key, and ca certificate
Args:
:jks_key_store_path: path to the JKS keystore
:jks_trust_store_path: path to the JKS truststore
:keystore_pw: path to file with passphrase for the keystores
:client_key_cert_path: path to write the client's certificate for its private key in PEM format
:client_key_path: path to write the client's private key in PEM format
:ca_cert_path: path to write the chain of CA certificates required to validate certificates
"""
keystore_key_cert, keystore_key, keystore_ca_cert = _convert_jks_to_pem(jks_key_store_path, keystore_pw)
truststore_key_cert, truststore_key, truststore_ca_cert = _convert_jks_to_pem(jks_trust_store_path, keystore_pw)
with client_key_cert_path.open("w") as f:
f.write(keystore_key_cert)
with client_key_path.open("w") as f:
f.write(keystore_key)
with ca_cert_path.open("w") as f:
f.write(keystore_ca_cert + truststore_ca_cert)
def get_client_certificate_location():
"""
Get location of client certificate (PEM format) for the private key signed by trusted CA
used for 2-way TLS authentication, for example with Kafka cluster
Returns:
string path to client certificate in PEM format
"""
certificate_path = Path(constants.SSL_CONFIG.PEM_CLIENT_CERTIFICATE_CONFIG)
if not certificate_path.exists():
_write_pems()
return str(certificate_path)
def get_client_key_location():
"""
Get location of client private key (PEM format)
used for for 2-way TLS authentication, for example with Kafka cluster
Returns:
string path to client private key in PEM format
"""
# Convert JKS to PEMs if they don't exists already
key_path = Path(constants.SSL_CONFIG.PEM_CLIENT_KEY_CONFIG)
if not key_path.exists():
_write_pems()
return str(key_path)
def get_ca_chain_location():
"""
Get location of chain of CA certificates (PEM format) that are required to validate the
private key certificate of the client
used for 2-way TLS authentication, for example with Kafka cluster
Returns:
string path to ca chain of certificate
"""
ca_chain_path = Path(constants.SSL_CONFIG.PEM_CA_CHAIN_CERTIFICATE_CONFIG)
if not ca_chain_path.exists():
_write_pems()
return str(ca_chain_path)
def _write_pems():
"""
Converts JKS keystore file into PEM to be compatible with Python libraries
"""
t_jks_path = get_trust_store()
k_jks_path = get_key_store()
client_certificate_path = Path(constants.SSL_CONFIG.PEM_CLIENT_CERTIFICATE_CONFIG)
client_key_path = Path(constants.SSL_CONFIG.PEM_CLIENT_KEY_CONFIG)
ca_chain_path = Path(constants.SSL_CONFIG.PEM_CA_CHAIN_CERTIFICATE_CONFIG)
_write_pem(k_jks_path, t_jks_path, get_key_store_pwd(), client_certificate_path, client_key_path, ca_chain_path)
def _prepare_rest_appservice_json_request():
"""
Prepares a REST JSON Request to Hopsworks APP-service
Returns:
a dict with keystore cert bytes and password string
"""
key_store_pwd = get_key_store_pwd()
key_store_cert = get_key_store_cert()
json_contents = {}
json_contents[constants.REST_CONFIG.JSON_KEYSTOREPWD] = key_store_pwd
json_contents[constants.REST_CONFIG.JSON_KEYSTORE] = key_store_cert.decode("latin-1") # raw bytes is not serializable by JSON -_-
return json_contents
|
the-stack_106_13790
|
def _merge_rangelist(unsorted_ranges):
if len(unsorted_ranges) == 0:
return []
# First sort the ranges
sorted_ranges = sorted(unsorted_ranges)
merged_ranges = []
# Start our state
(merged_start, merged_end) = sorted_ranges.pop(0)
for (range_start, range_end) in sorted_ranges:
if range_start <= (merged_end + 1):
# Merge this with the existing range
merged_end = range_end
else:
# We have a full range - append it
merged_ranges.append((merged_start, merged_end))
# Start a new merged range
merged_start = range_start
merged_end = range_end
# Append the last ranged
merged_ranges.append((merged_start, merged_end))
return merged_ranges
def gen_rangelist(base_name, rangelist):
rangelist = _merge_rangelist(rangelist)
output = ""
output += "const UnicodeRange " + base_name + "Ranges[] = {\n"
entry_index = None
for (index, (range_start, range_end)) in enumerate(rangelist):
# Split the top of the binary tree in to ASCII and non-ASCII halves
# This is to speed to ASCII searches
if range_end <= 127:
entry_index = index
output += "\t{" + hex(range_start) + ", " + hex(range_end) + "},\n"
output += "};\n\n"
output += "const UnicodeRangeSet " + base_name + "RangeSet = {\n"
output += "\t.ranges = &" + base_name + "Ranges[0],\n"
output += "\t.rangeCount = " + str(len(rangelist)) + ",\n"
output += "\t.entryRange = " + str(entry_index) + "\n"
output += "};\n\n"
return output
|
the-stack_106_13791
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.db import migrations, models
import datetime
import avatar.models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Avatar',
fields=[
('id', models.AutoField(
verbose_name='ID', serialize=False,
auto_created=True, primary_key=True)),
('primary', models.BooleanField(default=False)),
('avatar', models.ImageField(
max_length=1024,
upload_to=avatar.models.avatar_file_path, blank=True)),
('date_uploaded', models.DateTimeField(
default=datetime.datetime.now)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL,
on_delete=models.CASCADE)),
],
),
]
|
the-stack_106_13792
|
# coding=utf-8
# Copyright 2021 Tel AViv University, AllenAI and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Tokenization classes for Splinter."""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_splinter import SplinterTokenizer
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"tau/splinter-base": "https://huggingface.co/tau/splinter-base/resolve/main/vocab.txt",
"tau/splinter-base-qass": "https://huggingface.co/tau/splinter-base-qass/resolve/main/vocab.txt",
"tau/splinter-large": "https://huggingface.co/tau/splinter-large/resolve/main/vocab.txt",
"tau/splinter-large-qass": "https://huggingface.co/tau/splinter-large-qass/resolve/main/vocab.txt",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"tau/splinter-base": 512,
"tau/splinter-base-qass": 512,
"tau/splinter-large": 512,
"tau/splinter-large-qass": 512,
}
PRETRAINED_INIT_CONFIGURATION = {
"tau/splinter-base": {"do_lower_case": False},
"tau/splinter-base-qass": {"do_lower_case": False},
"tau/splinter-large": {"do_lower_case": False},
"tau/splinter-large-qass": {"do_lower_case": False},
}
class SplinterTokenizerFast(PreTrainedTokenizerFast):
r"""
Construct a "fast" Splinter tokenizer (backed by HuggingFace's `tokenizers` library). Based on WordPiece.
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizerFast` which contains most of the main
methods. Users should refer to this superclass for more information regarding those methods.
Args:
vocab_file (:obj:`str`):
File containing the vocabulary.
do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to lowercase the input when tokenizing.
unk_token (:obj:`str`, `optional`, defaults to :obj:`"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (:obj:`str`, `optional`, defaults to :obj:`"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (:obj:`str`, `optional`, defaults to :obj:`"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (:obj:`str`, `optional`, defaults to :obj:`"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (:obj:`str`, `optional`, defaults to :obj:`"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
question_token (:obj:`str`, `optional`, defaults to :obj:`"[QUESTION]"`):
The token used for constructing question representations.
clean_text (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to clean the text before tokenization by removing any control characters and replacing all
whitespaces by the classic one.
tokenize_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see `this
issue <https://github.com/huggingface/transformers/issues/328>`__).
strip_accents: (:obj:`bool`, `optional`):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for :obj:`lowercase` (as in the original BERT).
wordpieces_prefix: (:obj:`str`, `optional`, defaults to :obj:`"##"`):
The prefix for subwords.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
slow_tokenizer_class = SplinterTokenizer
def __init__(
self,
vocab_file=None,
tokenizer_file=None,
do_lower_case=True,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]",
question_token="[QUESTION]",
tokenize_chinese_chars=True,
strip_accents=None,
**kwargs
):
super().__init__(
vocab_file,
tokenizer_file=tokenizer_file,
do_lower_case=do_lower_case,
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
tokenize_chinese_chars=tokenize_chinese_chars,
strip_accents=strip_accents,
additional_special_tokens=(question_token,),
**kwargs,
)
pre_tok_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
pre_tok_state.get("lowercase", do_lower_case) != do_lower_case
or pre_tok_state.get("strip_accents", strip_accents) != strip_accents
):
pre_tok_class = getattr(normalizers, pre_tok_state.pop("type"))
pre_tok_state["lowercase"] = do_lower_case
pre_tok_state["strip_accents"] = strip_accents
self.backend_tokenizer.normalizer = pre_tok_class(**pre_tok_state)
self.do_lower_case = do_lower_case
@property
def question_token_id(self):
"""
:obj:`Optional[int]`: Id of the question token in the vocabulary, used to condition the answer on a question
representation.
"""
return self.convert_tokens_to_ids(self.question_token)
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a pair of sequence for question answering tasks by concatenating and adding special
tokens. A Splinter sequence has the following format:
- single sequence: ``[CLS] X [SEP]``
- pair of sequences for question answering: ``[CLS] question_tokens [QUESTION] . [SEP] context_tokens [SEP]``
Args:
token_ids_0 (:obj:`List[int]`):
The question token IDs if pad_on_right, else context tokens IDs
token_ids_1 (:obj:`List[int]`, `optional`):
The context token IDs if pad_on_right, else question token IDs
Returns:
:obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
question_suffix = [self.question_token_id] + [self.convert_tokens_to_ids(".")]
if self.padding_side == "right":
# Input is question-then-context
return cls + token_ids_0 + question_suffix + sep + token_ids_1 + sep
else:
# Input is context-then-question
return cls + token_ids_0 + sep + token_ids_1 + question_suffix + sep
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create the token type IDs corresponding to the sequences passed. `What are token type IDs?
<../glossary.html#token-type-ids>`__
Should be overridden in a subclass if the model has a special way of building those.
Args:
token_ids_0 (:obj:`List[int]`): The first tokenized sequence.
token_ids_1 (:obj:`List[int]`, `optional`): The second tokenized sequence.
Returns:
:obj:`List[int]`: The token type ids.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
question_suffix = [self.question_token_id] + [self.convert_tokens_to_ids(".")]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
if self.padding_side == "right":
# Input is question-then-context
return len(cls + token_ids_0 + question_suffix + sep) * [0] + len(token_ids_1 + sep) * [1]
else:
# Input is context-then-question
return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + question_suffix + sep) * [1]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files)
|
the-stack_106_13794
|
import argparse
import json
import math
import os
import random
import re
import traceback
from argparse import ArgumentParser
from copy import deepcopy
from multiprocessing import Pool, Queue
from time import sleep
import numpy as np
from .hyper_opt_utils import strategies
from gettext import gettext as _
def optimize_parallel_gpu_private(args):
trial_params, train_function = args[0], args[1]
# get set of gpu ids
gpu_id_set = g_gpu_id_q.get(block=True)
try:
# enable the proper gpus
os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id_set
# run training fx on the specific gpus
results = train_function(trial_params)
return [trial_params, results]
except Exception as e:
print('Caught exception in worker thread', e)
# This prints the type, value, and stack trace of the
# current exception being handled.
traceback.print_exc()
return [trial_params, None]
finally:
g_gpu_id_q.put(gpu_id_set)
def optimize_parallel_cpu_private(args):
trial_params, train_function = args[0], args[1]
sleep(random.randint(0, 4))
# run training fx on the specific gpus
results = train_function(trial_params)
# True = completed
return [trial_params, results]
class HyperOptArgumentParser(ArgumentParser):
"""
Subclass of argparse ArgumentParser which adds optional calls to sample from lists or ranges
Also enables running optimizations across parallel processes
"""
# these are commands injected by test tube from cluster operations
TRIGGER_CMD = 'test_tube_from_cluster_hopt'
SLURM_CMD_PATH = 'test_tube_slurm_cmd_path'
SLURM_EXP_CMD = 'hpc_exp_number'
SLURM_LOAD_CMD = 'test_tube_do_checkpoint_load'
CMD_MAP = {
TRIGGER_CMD: bool,
SLURM_CMD_PATH: str,
SLURM_EXP_CMD: int,
SLURM_LOAD_CMD: bool
}
def __init__(self, strategy='grid_search', **kwargs):
"""
:param strategy: 'grid_search', 'random_search'
:param enabled:
:param experiment:
:param kwargs:
"""
ArgumentParser.__init__(self, **kwargs)
self.strategy = strategy
self.trials = []
self.parsed_args = None
self.opt_args = {}
self.json_config_arg_name = None
self.pool = None
def add_argument(self, *args, **kwargs):
super(HyperOptArgumentParser, self).add_argument(*args, **kwargs)
def opt_list(self, *args, **kwargs):
options = kwargs.pop("options", None)
tunable = kwargs.pop("tunable", False)
self.add_argument(*args, **kwargs)
for i in range(len(args)):
arg_name = args[i]
self.opt_args[arg_name] = OptArg(obj_id=arg_name, opt_values=options, tunable=tunable)
def opt_range(
self,
*args,
**kwargs
):
low = kwargs.pop("low", None)
high = kwargs.pop("high", None)
arg_type = kwargs["type"]
nb_samples = kwargs.pop("nb_samples", 10)
tunable = kwargs.pop("tunable", False)
log_base = kwargs.pop("log_base", None)
self.add_argument(*args, **kwargs)
arg_name = args[-1]
self.opt_args[arg_name] = OptArg(
obj_id=arg_name,
opt_values=[low, high],
arg_type=arg_type,
nb_samples=nb_samples,
tunable=tunable,
log_base=log_base,
)
def json_config(self, *args, **kwargs):
self.add_argument(*args, **kwargs)
self.json_config_arg_name = re.sub('-', '', args[-1])
def __parse_args(self, args=None, namespace=None):
# allow bypassing certain missing params which other parts of test tube may introduce
args, argv = self.parse_known_args(args, namespace)
args, argv = self.__whitelist_cluster_commands(args, argv)
if argv:
msg = _('unrecognized arguments: %s')
self.error(msg % ' '.join(argv))
return args
def __whitelist_cluster_commands(self, args, argv):
parsed = {}
# build a dict where key = arg, value = value of the arg or None if just a flag
for i, arg_candidate in enumerate(argv):
arg = None
value = None
# only look at --keys
if '--' not in arg_candidate:
continue
# skip items not on the white list
if arg_candidate[2:] not in HyperOptArgumentParser.CMD_MAP:
continue
arg = arg_candidate[2:]
# pull out the value of the argument if given
if i + 1 <= len(argv) - 1:
if '--' not in argv[i + 1]:
value = argv[i + 1]
if arg is not None:
parsed[arg] = value
else:
if arg is not None:
parsed[arg] = value
# add the whitelist cmds to the args
all_values = set()
for k, v in args.__dict__.items():
all_values.add(k)
all_values.add(v)
for arg, v in parsed.items():
v_parsed = self.__parse_primitive_arg_val(v)
all_values.add(v)
all_values.add(arg)
args.__setattr__(arg, v_parsed)
# make list with only the unknown args
unk_args = []
for arg in argv:
arg_candidate = re.sub('--', '', arg)
is_bool = arg_candidate == 'True' or arg_candidate == 'False'
if is_bool: continue
if arg_candidate not in all_values:
unk_args.append(arg)
# when no bad args are left, return none to be consistent with super api
if len(unk_args) == 0:
unk_args = None
# add hpc_exp_number if not passed in so we can never get None
if HyperOptArgumentParser.SLURM_EXP_CMD not in args:
args.__setattr__(HyperOptArgumentParser.SLURM_EXP_CMD, None)
return args, unk_args
def __parse_primitive_arg_val(self, val):
if val is None:
return True
try:
return int(val)
except ValueError:
try:
return float(val)
except ValueError:
return val
def parse_args(self, args=None, namespace=None):
# call superclass arg first
results = self.__parse_args(args, namespace)
# extract vals
old_args = vars(results)
# override with json args if given
if self.json_config_arg_name and old_args[self.json_config_arg_name]:
for arg, v in self.__read_json_config(old_args[self.json_config_arg_name]).items():
old_args[arg] = v
# track args
self.parsed_args = deepcopy(old_args)
# attach optimization fx
old_args['trials'] = self.opt_trials
old_args['optimize_parallel'] = self.optimize_parallel
old_args['optimize_parallel_gpu'] = self.optimize_parallel_gpu
old_args['optimize_parallel_cpu'] = self.optimize_parallel_cpu
old_args['generate_trials'] = self.generate_trials
old_args['optimize_trials_parallel_gpu'] = self.optimize_trials_parallel_gpu
return TTNamespace(**old_args)
def __read_json_config(self, file_path):
with open(file_path) as json_data:
json_args = json.load(json_data)
return json_args
def opt_trials(self, num):
self.trials = strategies.generate_trials(
strategy=self.strategy,
flat_params=self.__flatten_params(self.opt_args),
nb_trials=num,
)
for trial in self.trials:
ns = self.__namespace_from_trial(trial)
yield ns
def generate_trials(self, nb_trials):
trials = strategies.generate_trials(
strategy=self.strategy,
flat_params=self.__flatten_params(self.opt_args),
nb_trials=nb_trials,
)
trials = [self.__namespace_from_trial(x) for x in trials]
return trials
def optimize_parallel_gpu(
self,
train_function,
nb_trials,
gpu_ids,
nb_workers=4,
):
"""
Runs optimization across gpus with cuda drivers
:param train_function:
:param nb_trials:
:param gpu_ids: List of strings like: ['0', '1, 3']
:param nb_workers:
:return:
"""
self.trials = strategies.generate_trials(
strategy=self.strategy,
flat_params=self.__flatten_params(self.opt_args),
nb_trials=nb_trials,
)
self.trials = [(self.__namespace_from_trial(x), train_function) for x in self.trials]
# build q of gpu ids so we can use them in each process
# this is thread safe so each process can pull out a gpu id, run its task and put it back when done
if self.pool is None:
gpu_q = Queue()
for gpu_id in gpu_ids:
gpu_q.put(gpu_id)
# called by the Pool when a process starts
def init(local_gpu_q):
global g_gpu_id_q
g_gpu_id_q = local_gpu_q
# init a pool with the nb of worker threads we want
self.pool = Pool(processes=nb_workers, initializer=init, initargs=(gpu_q,))
# apply parallelization
results = self.pool.map(optimize_parallel_gpu_private, self.trials)
return results
def optimize_trials_parallel_gpu(
self,
train_function,
nb_trials,
trials,
gpu_ids,
nb_workers=4,
):
"""
Runs optimization across gpus with cuda drivers
:param train_function:
:param nb_trials:
:param gpu_ids: List of strings like: ['0', '1, 3']
:param nb_workers:
:return:
"""
self.trials = trials
self.trials = [(x, train_function) for x in self.trials]
# build q of gpu ids so we can use them in each process
# this is thread safe so each process can pull out a gpu id, run its task and put it back when done
if self.pool is None:
gpu_q = Queue()
for gpu_id in gpu_ids:
gpu_q.put(gpu_id)
# called by the Pool when a process starts
def init(local_gpu_q):
global g_gpu_id_q
g_gpu_id_q = local_gpu_q
# init a pool with the nb of worker threads we want
self.pool = Pool(processes=nb_workers, initializer=init, initargs=(gpu_q,))
# apply parallelization
results = self.pool.map(optimize_parallel_gpu_private, self.trials)
return results
def optimize_parallel_cpu(
self,
train_function,
nb_trials,
nb_workers=4,
):
"""
Runs optimization across n cpus
:param train_function:
:param nb_trials:
:param nb_workers:
:return:
"""
self.trials = strategies.generate_trials(
strategy=self.strategy,
flat_params=self.__flatten_params(self.opt_args),
nb_trials=nb_trials
)
self.trials = [(self.__namespace_from_trial(x), train_function) for x in self.trials]
# init a pool with the nb of worker threads we want
if self.pool is None:
self.pool = Pool(processes=nb_workers)
# apply parallelization
results = self.pool.map(optimize_parallel_cpu_private, self.trials)
return results
def optimize_parallel(
self,
train_function,
nb_trials,
nb_parallel=4,
):
self.trials = strategies.generate_trials(
strategy=self.strategy,
flat_params=self.__flatten_params(self.opt_args),
nb_trials=nb_trials
)
# nb of runs through all parallel systems
fork_batches = [
self.trials[i:i + nb_parallel] for i in range(0, len(self.trials), nb_parallel)
]
for fork_batch in fork_batches:
children = []
# run n parallel forks
for parallel_nb, trial in enumerate(fork_batch):
# q up the trial and convert to a namespace
ns = self.__namespace_from_trial(trial)
# split new fork
pid = os.fork()
# when the process is a parent
if pid:
children.append(pid)
# when process is a child
else:
# slight delay to make sure we don't overwrite over test tube log versions
sleep(parallel_nb * 0.5)
train_function(ns, parallel_nb)
os._exit(0)
for i, child in enumerate(children):
os.waitpid(child, 0)
def __namespace_from_trial(self, trial):
trial_dict = {d['name']: d['val'] for d in trial}
for k, v in self.parsed_args.items():
if k not in trial_dict:
trial_dict[k] = v
return TTNamespace(**trial_dict)
def __flatten_params(self, params):
"""
Turns a list of parameters with values into a flat tuple list of lists
so we can permute
:param params:
:return:
"""
flat_params = []
for i, (opt_name, opt_arg) in enumerate(params.items()):
if opt_arg.tunable:
clean_name = opt_name.strip('-')
clean_name = re.sub('-', '_', clean_name)
param_groups = []
for val in opt_arg.opt_values:
param_groups.append({'idx': i, 'val': val, 'name': clean_name})
flat_params.append(param_groups)
return flat_params
class TTNamespace(argparse.Namespace):
def __str__(self):
result = '-' * 100 + '\nHyperparameters:\n'
for k, v in self.__dict__.items():
result += '{0:20}: {1}\n'.format(k, v)
return result
def __getstate__(self):
# capture what is normally pickled
state = self.__dict__.copy()
# remove all functions from the namespace
clean_state = {}
for k, v in state.items():
if not hasattr(v, '__call__'):
clean_state[k] = v
# what we return here will be stored in the pickle
return clean_state
def __setstate__(self, newstate):
# re-instate our __dict__ state from the pickled state
self.__dict__.update(newstate)
class OptArg(object):
def __init__(
self,
obj_id,
opt_values,
arg_type=None,
nb_samples=None,
tunable=False,
log_base=None,
):
self.opt_values = opt_values
self.obj_id = obj_id
self.tunable = tunable
# convert range to list of values
if nb_samples:
low, high = opt_values
if log_base is None:
# random search on uniform scale
if arg_type is int:
self.opt_values = np.random.choice(np.arange(low, high), nb_samples, replace=False)
elif arg_type is float:
self.opt_values = np.random.uniform(low, high, nb_samples)
else:
# random search on log scale with specified base
assert high >= low > 0, "`opt_values` must be positive to do log-scale search."
log_low, log_high = math.log(low, log_base), math.log(high, log_base)
self.opt_values = log_base ** np.random.uniform(log_low, log_high, nb_samples)
|
the-stack_106_13798
|
# -*- coding: utf-8 -*-
"""
Installation of Ruby modules packaged as gems
=============================================
A state module to manage rubygems. Gems can be set up to be installed
or removed. This module will use RVM or rbenv if they are installed. In that case,
you can specify what ruby version and gemset to target.
.. code-block:: yaml
addressable:
gem.installed:
- user: rvm
- ruby: jruby@jgemset
"""
from __future__ import absolute_import, print_function, unicode_literals
import salt.utils.versions
import re
import logging
import re
import salt.utils.versions
log = logging.getLogger(__name__)
def __virtual__():
"""
Only load if gem module is available in __salt__
"""
if "gem.list" in __salt__:
return True
return (False, "gem module could not be loaded")
def installed(
name, # pylint: disable=C0103
ruby=None,
gem_bin=None,
user=None,
version=None,
rdoc=False,
ri=False,
pre_releases=False,
proxy=None,
source=None,
): # pylint: disable=C0103
"""
Make sure that a gem is installed.
name
The name of the gem to install
ruby: None
Only for RVM or rbenv installations: the ruby version and gemset to
target.
gem_bin: None
Custom ``gem`` command to run instead of the default.
Use this to install gems to a non-default ruby install. If you are
using rvm or rbenv use the ruby argument instead.
user: None
The user under which to run the ``gem`` command
.. versionadded:: 0.17.0
version : None
Specify the version to install for the gem.
Doesn't play nice with multiple gems at once
rdoc : False
Generate RDoc documentation for the gem(s).
ri : False
Generate RI documentation for the gem(s).
pre_releases : False
Install pre-release version of gem(s) if available.
proxy : None
Use the specified HTTP proxy server for all outgoing traffic.
Format: http://hostname[:port]
source : None
Use the specified HTTP gem source server to download gem.
Format: http://hostname[:port]
"""
ret = {"name": name, "result": None, "comment": "", "changes": {}}
if ruby is not None and not (
__salt__["rvm.is_installed"](runas=user)
or __salt__["rbenv.is_installed"](runas=user)
):
log.warning("Use of argument ruby found, but neither rvm or rbenv is installed")
gems = __salt__["gem.list"](name, ruby, gem_bin=gem_bin, runas=user)
if name in gems and version is not None:
versions = list([x.replace('default: ', '') for x in gems[name]])
match = re.match(r'(>=|>|<|<=)', version)
if match:
# Grab the comparison
cmpr = match.group()
# Clear out 'default:' and any whitespace
installed_version = re.sub("default: ", "", gems[name][0]).strip()
# Clear out comparison from version and whitespace
desired_version = re.sub(cmpr, "", version).strip()
if salt.utils.versions.compare(installed_version,
cmpr,
desired_version):
ret['result'] = True
ret['comment'] = 'Installed Gem meets version requirements.'
return ret
elif str(version) in versions:
ret['result'] = True
ret['comment'] = 'Gem is already installed.'
return ret
else:
if str(version) in gems[name]:
ret["result"] = True
ret["comment"] = "Gem is already installed."
return ret
elif name in gems and version is None:
ret["result"] = True
ret["comment"] = "Gem is already installed."
return ret
if __opts__["test"]:
ret["comment"] = "The gem {0} would have been installed".format(name)
return ret
if __salt__["gem.install"](
name,
ruby=ruby,
gem_bin=gem_bin,
runas=user,
version=version,
rdoc=rdoc,
ri=ri,
pre_releases=pre_releases,
proxy=proxy,
source=source,
):
ret["result"] = True
ret["changes"][name] = "Installed"
ret["comment"] = "Gem was successfully installed"
else:
ret["result"] = False
ret["comment"] = "Could not install gem."
return ret
def removed(name, ruby=None, user=None, gem_bin=None):
"""
Make sure that a gem is not installed.
name
The name of the gem to uninstall
gem_bin : None
Full path to ``gem`` binary to use.
ruby : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
user: None
The user under which to run the ``gem`` command
.. versionadded:: 0.17.0
"""
ret = {"name": name, "result": None, "comment": "", "changes": {}}
if name not in __salt__["gem.list"](name, ruby, gem_bin=gem_bin, runas=user):
ret["result"] = True
ret["comment"] = "Gem is not installed."
return ret
if __opts__["test"]:
ret["comment"] = "The gem {0} would have been removed".format(name)
return ret
if __salt__["gem.uninstall"](name, ruby, gem_bin=gem_bin, runas=user):
ret["result"] = True
ret["changes"][name] = "Removed"
ret["comment"] = "Gem was successfully removed."
else:
ret["result"] = False
ret["comment"] = "Could not remove gem."
return ret
def sources_add(name, ruby=None, user=None):
"""
Make sure that a gem source is added.
name
The URL of the gem source to be added
ruby: None
For RVM or rbenv installations: the ruby version and gemset to target.
user: None
The user under which to run the ``gem`` command
.. versionadded:: 0.17.0
"""
ret = {"name": name, "result": None, "comment": "", "changes": {}}
if name in __salt__["gem.sources_list"](ruby, runas=user):
ret["result"] = True
ret["comment"] = "Gem source is already added."
return ret
if __opts__["test"]:
ret["comment"] = "The gem source {0} would have been added.".format(name)
return ret
if __salt__["gem.sources_add"](source_uri=name, ruby=ruby, runas=user):
ret["result"] = True
ret["changes"][name] = "Installed"
ret["comment"] = "Gem source was successfully added."
else:
ret["result"] = False
ret["comment"] = "Could not add gem source."
return ret
def sources_remove(name, ruby=None, user=None):
"""
Make sure that a gem source is removed.
name
The URL of the gem source to be removed
ruby: None
For RVM or rbenv installations: the ruby version and gemset to target.
user: None
The user under which to run the ``gem`` command
.. versionadded:: 0.17.0
"""
ret = {"name": name, "result": None, "comment": "", "changes": {}}
if name not in __salt__["gem.sources_list"](ruby, runas=user):
ret["result"] = True
ret["comment"] = "Gem source is already removed."
return ret
if __opts__["test"]:
ret["comment"] = "The gem source would have been removed."
return ret
if __salt__["gem.sources_remove"](source_uri=name, ruby=ruby, runas=user):
ret["result"] = True
ret["changes"][name] = "Removed"
ret["comment"] = "Gem source was successfully removed."
else:
ret["result"] = False
ret["comment"] = "Could not remove gem source."
return ret
|
the-stack_106_13799
|
import pytest
import time
import datetime
from dateutil import tz
from freezegun import freeze_time
from helheimr_heating.scheduling import scheduler, PeriodicHeatingJob, NonHeatingJob, non_heating_jobs, ScheduleValueError
from helheimr_heating.utils import time_utils
from attributedict.collections import AttributeDict
def test_scheduler(): #capsys):
# Test start up
scheduler.start()
with pytest.raises(RuntimeError):
scheduler.start()
# Test loading preconfigured jobs
assert isinstance(scheduler.idle_time, int) and scheduler.idle_time > 0
si1 = scheduler.idle_time
jl = scheduler.job_list('En')
assert len(jl.heating_jobs) == 0
assert len(jl.non_heating_jobs) == 0
# Test some non-heating jobs
# First, sanity checks for invalid inputs:
cfg = AttributeDict({
'type': None,
'interval': None,
'description': None
})
with pytest.raises(ScheduleValueError):
test_job = NonHeatingJob.from_attrdict(cfg)
cfg['type'] = 'non-existing-fx'
with pytest.raises(ScheduleValueError):
test_job = NonHeatingJob.from_attrdict(cfg)
cfg['type'] = 'dummy_task'
with pytest.raises(ScheduleValueError):
test_job = NonHeatingJob.from_attrdict(cfg)
cfg['description'] = 'This is only a test.'
cfg['interval'] = -1
with pytest.raises(ScheduleValueError):
test_job = NonHeatingJob.from_attrdict(cfg)
cfg['interval'] = 1
cfg['unit'] = 'second'
test_job = NonHeatingJob.from_attrdict(cfg)
print(f'Initialized test job: {test_job}')
d = test_job.summary()
for k in ['unique_id', 'description', 'interval_string', 'at_string', 'next_run']:
assert k in d
# Schedule this task and ensure that it is actually added to the job queue
scheduler.schedule_job(test_job)
assert scheduler.idle_time <= test_job.interval
jl = scheduler.job_list('En')
assert len(jl.heating_jobs) == 0
assert len(jl.non_heating_jobs) == 1
added_job = jl.non_heating_jobs[0]
assert added_job.unique_id == test_job.unique_id
assert added_job.description == test_job.job_description
assert added_job.next_run == time_utils.format(test_job.next_run, fmt='%Y-%m-%d %H:%M:%S')
# Remove this task again:
res = scheduler.remove_job(test_job.unique_id)
assert res == test_job
# Test dumping the task to a serializable dictionary
dr = res.to_dict()
expected_fields = ['type', 'description', 'interval', 'unit', 'at']
for k in dr:
assert k in expected_fields
if k != 'unit':
assert cfg[k] == dr[k]
else:
if len(dr[k]) < len(cfg[k]):
assert cfg[k].startswith(dr[k])
else:
assert dr[k].startswith(cfg[k])
# assert len(scheduler.list_heating_jobs()) == 0 FIXME
# new_job_list = scheduler.list_non_heating_jobs()
# assert len(new_job_list) == 0
# Test deserializing jobs from disk:
scheduler.load_configured_jobs()
jl = scheduler.job_list('de')
assert len(jl.non_heating_jobs) > 0
scheduler.shutdown()
cfg['unit'] = 'hours'
cfg['at'] = '15:00'
test_job = NonHeatingJob.from_attrdict(cfg)
assert test_job.to_dict()['at'] == '15:00'
def test_cest_switch():
# Test summertime switch
cfg = AttributeDict({
'type': 'dummy_task',
'interval': 1,
'unit': 'hours'
})
# CET --> CEST
dt = datetime.datetime(year=2021, month=3, day=28,
hour=1, minute=55, second=3,
tzinfo=tz.tzlocal())
with freeze_time(dt) as frozen_datetime:
job = NonHeatingJob.from_attrdict(cfg)
assert job.next_run == (dt + datetime.timedelta(hours=2))
# CEST --> CET
dt = datetime.datetime(year=2020, month=10, day=25,
hour=3, minute=55, second=3,
tzinfo=tz.tzlocal())
with freeze_time(dt) as frozen_datetime:
job = NonHeatingJob.from_attrdict(cfg)
assert job.next_run == datetime.datetime(year=2020, month=10, day=25,
hour=4, minute=55, second=3,
tzinfo=tz.tzlocal())
# initial_datetime = datetime.datetime(year=2021, month=3, day=28,
# hour=1, minute=55, second=3)
# cfg = AttributeDict({
# 'type': 'dummy_task',
# 'interval': 1,
# 'unit': 'hours'
# })
# with freeze_time(initial_datetime) as frozen_datetime:
# now = time_utils.dt_now_utc()
# job = NonHeatingJob.from_attrdict(cfg)
# print('FOO',now, type(now))
# print('foo',job.next_run,type(job.next_run))
# assert job.next_run == (now + datetime.timedelta(hours=2))
# scheduler.start()
# rsp = scheduler.schedule_job(job)
# print('FOOOOOOOO', job)
# assert rsp.success
# jlist = scheduler.job_list()
# print(jlist.non_heating_jobs)
# assert False
|
the-stack_106_13800
|
from pathlib import Path
from setuptools import setup, find_packages
from pySpectralPDE import version
__version__ = version.__version__
BASE_PATH = Path(__file__).resolve().parent
# read the version from the particular file
with open(BASE_PATH / "pySpectralPDE" / "version.py", "r") as f:
exec(f.read())
DOWNLOAD_URL = f"https://github.com/alanmatzumiya/pySpectralPDE/archive/v{__version__}.tar.gz"
# read the version from the particular file
with open(BASE_PATH / "README.md", "r") as fh:
long_description = fh.read()
setup(
name="pySpectralPDE",
package_data={"pySpectralPDE": ["py.typed"]},
packages=find_packages(),
zip_safe=False, # this is required for mypy to find the py.typed file
version=__version__,
license="MIT",
description="Python package for solving PDEs' using spectral methods",
long_description=long_description,
long_description_content_type="text/markdown",
author="Alan Matzumiya",
author_email="[email protected]",
url="https://github.com/alanmatzumiya/pySpectralPDE",
download_url=DOWNLOAD_URL,
keywords=["burgers", "partial-differential-equations", "spectral-methods"],
python_requires=">=3.7",
install_requires=["matplotlib", "numpy", "numba", "scipy"],
classifiers=[
"Development Status :: null",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
|
the-stack_106_13801
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = "Little Boxes"
copyright = "2018, Thomas Sileo"
author = "Thomas Sileo"
# The short X.Y version
version = ""
# The full version, including alpha/beta/rc tags
release = ""
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.viewcode"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
html_sidebars = {
"**": [
"about.html",
"sidebar_badges.html",
"navigation.html",
"searchbox.html",
"sidebar_end.html",
]
}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "LittleBoxesdoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"LittleBoxes.tex",
"Little Boxes Documentation",
"Thomas Sileo",
"manual",
)
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "littleboxes", "Little Boxes Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"LittleBoxes",
"Little Boxes Documentation",
author,
"LittleBoxes",
"One line description of project.",
"Miscellaneous",
)
]
# -- Extension configuration -------------------------------------------------
|
the-stack_106_13802
|
import csv
# to read player information from the csv file
def read_playerinfo():
# open the csv file and read it to create a list of all player details
with open('soccer_players.csv', newline='') as csvfile:
socc_players = csv.DictReader(csvfile, delimiter=',')
players = list(socc_players)
return players
# for seperating the players into experienced and non-experienced groups
def player_grouping(players):
# create a list for each group and return them together as a tuple
inexperienced = [player for player in players
if player['Soccer Experience'] != "YES"]
experienced = [player for player in players
if player['Soccer Experience'] == "YES"]
return experienced, inexperienced
# to assign players for the 3 teams, Sharks, Dragons and Raptors
def player_assigning(players, team_list):
index = 0
sorted_players = player_grouping(players)
player_list = []
teams = [key for key, values in team_list.items()]
# looping through experienced and inexperienced group
for group in sorted_players:
range = len(teams) - 1
# looping through each player in group and assigning them to each team
for player in group:
# assigning teams for the players
player['Team'] = teams[index]
player_list.append(player)
if index < range:
index += 1
else:
index = 0
return player_list
# to create three teams with the assigned players and to create a team list
def team_creating(team_list, player_list):
for player in player_list:
team = player['Team']
if team in team_list:
team_list[team].append(player)
return team_list
# to create a 'teams.txt' file with teams and team's player details
def writing_txtfile(team_list):
file = open('teams.txt', 'w')
for team, players in team_list.items():
file.write(team + "\n")
for player in players:
file.write("{}, {}, {}\n".format(player['Name'],
player['Soccer Experience'],
player['Guardian Name(s)']))
file.write("\n")
file.close()
# to create a welcome letters to all the player's guardians
def welcome_letter(team_list):
for team, players in team_list.items():
for player in players:
lowercase_name = player['Name'].lower()
name = lowercase_name.split(' ')
first_name = name[0]
last_name = name[1]
file = open('{}_{}.txt'.format(first_name, last_name), 'w')
content = " Dear {}, \n \
Congratulations!! Your kid {} has been officially selected for \
the team {} in the Soccer League. \n \
The first practise begins on June 4th, 2019. And the practise starts \
at 9 AM. \n \
Thanks, \n \
Aishwarya Ravichandran,\n \
Coordinator. \n".format(player['Guardian Name(s)'], player['Name'], team)
file.write(content)
file.close()
if __name__ == '__main__':
team_list = {'Sharks': [],
'Dragons': [],
'Raptors': []}
available_players = read_playerinfo()
players_assigned = player_assigning(available_players, team_list)
teams = team_creating(team_list, players_assigned)
writing_txtfile(team_list)
welcome_letter(team_list)
|
the-stack_106_13804
|
from __future__ import annotations
import logging
import time
from dataclasses import replace
from secrets import token_bytes
from typing import Any, Dict, List, Optional, Set
from blspy import AugSchemeMPL, G2Element
from peas.consensus.cost_calculator import calculate_cost_of_program, NPCResult
from peas.full_node.bundle_tools import simple_solution_generator
from peas.full_node.mempool_check_conditions import get_name_puzzle_conditions
from peas.protocols.wallet_protocol import PuzzleSolutionResponse
from peas.types.blockchain_format.coin import Coin
from peas.types.blockchain_format.program import Program
from peas.types.blockchain_format.sized_bytes import bytes32
from peas.types.coin_spend import CoinSpend
from peas.types.generator_types import BlockGenerator
from peas.types.spend_bundle import SpendBundle
from peas.util.byte_types import hexstr_to_bytes
from peas.util.condition_tools import conditions_dict_for_solution, pkm_pairs_for_conditions_dict
from peas.util.ints import uint8, uint32, uint64, uint128
from peas.util.json_util import dict_to_json_str
from peas.wallet.block_record import HeaderBlockRecord
from peas.wallet.cc_wallet.cc_info import CCInfo
from peas.wallet.cc_wallet.cc_utils import (
CC_MOD,
SpendableCC,
cc_puzzle_for_inner_puzzle,
cc_puzzle_hash_for_inner_puzzle_hash,
get_lineage_proof_from_coin_and_puz,
spend_bundle_for_spendable_ccs,
uncurry_cc,
)
from peas.wallet.derivation_record import DerivationRecord
from peas.wallet.puzzles.genesis_by_coin_id_with_0 import (
create_genesis_or_zero_coin_checker,
genesis_coin_id_for_genesis_coin_checker,
lineage_proof_for_genesis,
)
from peas.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle import (
DEFAULT_HIDDEN_PUZZLE_HASH,
calculate_synthetic_secret_key,
)
from peas.wallet.transaction_record import TransactionRecord
from peas.wallet.util.transaction_type import TransactionType
from peas.wallet.util.wallet_types import WalletType
from peas.wallet.wallet import Wallet
from peas.wallet.wallet_coin_record import WalletCoinRecord
from peas.wallet.wallet_info import WalletInfo
class CCWallet:
wallet_state_manager: Any
log: logging.Logger
wallet_info: WalletInfo
cc_coin_record: WalletCoinRecord
cc_info: CCInfo
standard_wallet: Wallet
base_puzzle_program: Optional[bytes]
base_inner_puzzle_hash: Optional[bytes32]
cost_of_single_tx: Optional[int]
@staticmethod
async def create_new_cc(
wallet_state_manager: Any,
wallet: Wallet,
amount: uint64,
):
self = CCWallet()
self.cost_of_single_tx = None
self.base_puzzle_program = None
self.base_inner_puzzle_hash = None
self.standard_wallet = wallet
self.log = logging.getLogger(__name__)
self.wallet_state_manager = wallet_state_manager
self.cc_info = CCInfo(None, [])
info_as_string = bytes(self.cc_info).hex()
self.wallet_info = await wallet_state_manager.user_store.create_wallet(
"CC Wallet", WalletType.COLOURED_COIN, info_as_string
)
if self.wallet_info is None:
raise ValueError("Internal Error")
try:
spend_bundle = await self.generate_new_coloured_coin(amount)
except Exception:
await wallet_state_manager.user_store.delete_wallet(self.id())
raise
await self.wallet_state_manager.add_new_wallet(self, self.id())
# Change and actual coloured coin
non_ephemeral_spends: List[Coin] = spend_bundle.not_ephemeral_additions()
cc_coin = None
puzzle_store = self.wallet_state_manager.puzzle_store
for c in non_ephemeral_spends:
info = await puzzle_store.wallet_info_for_puzzle_hash(c.puzzle_hash)
if info is None:
raise ValueError("Internal Error")
id, wallet_type = info
if id == self.id():
cc_coin = c
if cc_coin is None:
raise ValueError("Internal Error, unable to generate new coloured coin")
regular_record = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=cc_coin.puzzle_hash,
amount=uint64(cc_coin.amount),
fee_amount=uint64(0),
confirmed=False,
sent=uint32(0),
spend_bundle=spend_bundle,
additions=spend_bundle.additions(),
removals=spend_bundle.removals(),
wallet_id=self.wallet_state_manager.main_wallet.id(),
sent_to=[],
trade_id=None,
type=uint32(TransactionType.OUTGOING_TX.value),
name=token_bytes(),
)
cc_record = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=cc_coin.puzzle_hash,
amount=uint64(cc_coin.amount),
fee_amount=uint64(0),
confirmed=False,
sent=uint32(10),
spend_bundle=None,
additions=spend_bundle.additions(),
removals=spend_bundle.removals(),
wallet_id=self.id(),
sent_to=[],
trade_id=None,
type=uint32(TransactionType.INCOMING_TX.value),
name=token_bytes(),
)
await self.standard_wallet.push_transaction(regular_record)
await self.standard_wallet.push_transaction(cc_record)
return self
@staticmethod
async def create_wallet_for_cc(
wallet_state_manager: Any,
wallet: Wallet,
genesis_checker_hex: str,
) -> CCWallet:
self = CCWallet()
self.cost_of_single_tx = None
self.base_puzzle_program = None
self.base_inner_puzzle_hash = None
self.standard_wallet = wallet
self.log = logging.getLogger(__name__)
self.wallet_state_manager = wallet_state_manager
self.cc_info = CCInfo(Program.from_bytes(bytes.fromhex(genesis_checker_hex)), [])
info_as_string = bytes(self.cc_info).hex()
self.wallet_info = await wallet_state_manager.user_store.create_wallet(
"CC Wallet", WalletType.COLOURED_COIN, info_as_string
)
if self.wallet_info is None:
raise Exception("wallet_info is None")
await self.wallet_state_manager.add_new_wallet(self, self.id())
return self
@staticmethod
async def create(
wallet_state_manager: Any,
wallet: Wallet,
wallet_info: WalletInfo,
) -> CCWallet:
self = CCWallet()
self.log = logging.getLogger(__name__)
self.cost_of_single_tx = None
self.wallet_state_manager = wallet_state_manager
self.wallet_info = wallet_info
self.standard_wallet = wallet
self.cc_info = CCInfo.from_bytes(hexstr_to_bytes(self.wallet_info.data))
self.base_puzzle_program = None
self.base_inner_puzzle_hash = None
return self
@classmethod
def type(cls) -> uint8:
return uint8(WalletType.COLOURED_COIN)
def id(self) -> uint32:
return self.wallet_info.id
async def get_confirmed_balance(self, record_list: Optional[Set[WalletCoinRecord]] = None) -> uint64:
if record_list is None:
record_list = await self.wallet_state_manager.coin_store.get_unspent_coins_for_wallet(self.id())
amount: uint64 = uint64(0)
for record in record_list:
lineage = await self.get_lineage_proof_for_coin(record.coin)
if lineage is not None:
amount = uint64(amount + record.coin.amount)
self.log.info(f"Confirmed balance for cc wallet {self.id()} is {amount}")
return uint64(amount)
async def get_unconfirmed_balance(self, unspent_records=None) -> uint128:
confirmed = await self.get_confirmed_balance(unspent_records)
unconfirmed_tx: List[TransactionRecord] = await self.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(
self.id()
)
addition_amount = 0
removal_amount = 0
for record in unconfirmed_tx:
if TransactionType(record.type) is TransactionType.INCOMING_TX:
addition_amount += record.amount
else:
removal_amount += record.amount
result = confirmed - removal_amount + addition_amount
self.log.info(f"Unconfirmed balance for cc wallet {self.id()} is {result}")
return uint128(result)
async def get_max_send_amount(self, records=None):
spendable: List[WalletCoinRecord] = list(
await self.wallet_state_manager.get_spendable_coins_for_wallet(self.id(), records)
)
if len(spendable) == 0:
return 0
spendable.sort(reverse=True, key=lambda record: record.coin.amount)
if self.cost_of_single_tx is None:
coin = spendable[0].coin
tx = await self.generate_signed_transaction(
[coin.amount], [coin.puzzle_hash], coins={coin}, ignore_max_send_amount=True
)
program: BlockGenerator = simple_solution_generator(tx.spend_bundle)
# npc contains names of the coins removed, puzzle_hashes and their spend conditions
result: NPCResult = get_name_puzzle_conditions(
program,
self.wallet_state_manager.constants.MAX_BLOCK_COST_CLVM,
cost_per_byte=self.wallet_state_manager.constants.COST_PER_BYTE,
safe_mode=True,
)
cost_result: uint64 = calculate_cost_of_program(
program.program, result, self.wallet_state_manager.constants.COST_PER_BYTE
)
self.cost_of_single_tx = cost_result
self.log.info(f"Cost of a single tx for standard wallet: {self.cost_of_single_tx}")
max_cost = self.wallet_state_manager.constants.MAX_BLOCK_COST_CLVM / 2 # avoid full block TXs
current_cost = 0
total_amount = 0
total_coin_count = 0
for record in spendable:
current_cost += self.cost_of_single_tx
total_amount += record.coin.amount
total_coin_count += 1
if current_cost + self.cost_of_single_tx > max_cost:
break
return total_amount
async def get_name(self):
return self.wallet_info.name
async def set_name(self, new_name: str):
new_info = replace(self.wallet_info, name=new_name)
self.wallet_info = new_info
await self.wallet_state_manager.user_store.update_wallet(self.wallet_info, False)
def get_colour(self) -> str:
assert self.cc_info.my_genesis_checker is not None
return bytes(self.cc_info.my_genesis_checker).hex()
async def coin_added(self, coin: Coin, height: uint32):
"""Notification from wallet state manager that wallet has been received."""
self.log.info(f"CC wallet has been notified that {coin} was added")
search_for_parent: bool = True
inner_puzzle = await self.inner_puzzle_for_cc_puzhash(coin.puzzle_hash)
lineage_proof = Program.to((1, [coin.parent_coin_info, inner_puzzle.get_tree_hash(), coin.amount]))
await self.add_lineage(coin.name(), lineage_proof, True)
for name, lineage_proofs in self.cc_info.lineage_proofs:
if coin.parent_coin_info == name:
search_for_parent = False
break
if search_for_parent:
data: Dict[str, Any] = {
"data": {
"action_data": {
"api_name": "request_puzzle_solution",
"height": height,
"coin_name": coin.parent_coin_info,
"received_coin": coin.name(),
}
}
}
data_str = dict_to_json_str(data)
await self.wallet_state_manager.create_action(
name="request_puzzle_solution",
wallet_id=self.id(),
wallet_type=self.type(),
callback="puzzle_solution_received",
done=False,
data=data_str,
in_transaction=True,
)
async def puzzle_solution_received(self, response: PuzzleSolutionResponse, action_id: int):
coin_name = response.coin_name
height = response.height
puzzle: Program = response.puzzle
r = uncurry_cc(puzzle)
header_hash = self.wallet_state_manager.blockchain.height_to_hash(height)
block: Optional[
HeaderBlockRecord
] = await self.wallet_state_manager.blockchain.block_store.get_header_block_record(header_hash)
if block is None:
return None
removals = block.removals
if r is not None:
mod_hash, genesis_coin_checker, inner_puzzle = r
self.log.info(f"parent: {coin_name} inner_puzzle for parent is {inner_puzzle}")
parent_coin = None
for coin in removals:
if coin.name() == coin_name:
parent_coin = coin
if parent_coin is None:
raise ValueError("Error in finding parent")
lineage_proof = get_lineage_proof_from_coin_and_puz(parent_coin, puzzle)
await self.add_lineage(coin_name, lineage_proof)
await self.wallet_state_manager.action_store.action_done(action_id)
async def get_new_inner_hash(self) -> bytes32:
return await self.standard_wallet.get_new_puzzlehash()
async def get_new_inner_puzzle(self) -> Program:
return await self.standard_wallet.get_new_puzzle()
async def get_puzzle_hash(self, new: bool):
return await self.standard_wallet.get_puzzle_hash(new)
async def get_new_puzzlehash(self) -> bytes32:
return await self.standard_wallet.get_new_puzzlehash()
def puzzle_for_pk(self, pubkey) -> Program:
inner_puzzle = self.standard_wallet.puzzle_for_pk(bytes(pubkey))
cc_puzzle: Program = cc_puzzle_for_inner_puzzle(CC_MOD, self.cc_info.my_genesis_checker, inner_puzzle)
self.base_puzzle_program = bytes(cc_puzzle)
self.base_inner_puzzle_hash = inner_puzzle.get_tree_hash()
return cc_puzzle
async def get_new_cc_puzzle_hash(self):
return (await self.wallet_state_manager.get_unused_derivation_record(self.id())).puzzle_hash
# Create a new coin of value 0 with a given colour
async def generate_zero_val_coin(self, send=True, exclude: List[Coin] = None) -> SpendBundle:
if self.cc_info.my_genesis_checker is None:
raise ValueError("My genesis checker is None")
if exclude is None:
exclude = []
coins = await self.standard_wallet.select_coins(0, exclude)
assert coins != set()
origin = coins.copy().pop()
origin_id = origin.name()
cc_inner = await self.get_new_inner_hash()
cc_puzzle_hash: Program = cc_puzzle_hash_for_inner_puzzle_hash(
CC_MOD, self.cc_info.my_genesis_checker, cc_inner
)
tx: TransactionRecord = await self.standard_wallet.generate_signed_transaction(
uint64(0), cc_puzzle_hash, uint64(0), origin_id, coins
)
assert tx.spend_bundle is not None
full_spend: SpendBundle = tx.spend_bundle
self.log.info(f"Generate zero val coin: cc_puzzle_hash is {cc_puzzle_hash}")
# generate eve coin so we can add future lineage_proofs even if we don't eve spend
eve_coin = Coin(origin_id, cc_puzzle_hash, uint64(0))
await self.add_lineage(
eve_coin.name(),
Program.to(
(
1,
[eve_coin.parent_coin_info, cc_inner, eve_coin.amount],
)
),
)
await self.add_lineage(eve_coin.parent_coin_info, Program.to((0, [origin.as_list(), 1])))
if send:
regular_record = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=cc_puzzle_hash,
amount=uint64(0),
fee_amount=uint64(0),
confirmed=False,
sent=uint32(10),
spend_bundle=full_spend,
additions=full_spend.additions(),
removals=full_spend.removals(),
wallet_id=uint32(1),
sent_to=[],
trade_id=None,
type=uint32(TransactionType.INCOMING_TX.value),
name=token_bytes(),
)
cc_record = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=cc_puzzle_hash,
amount=uint64(0),
fee_amount=uint64(0),
confirmed=False,
sent=uint32(0),
spend_bundle=full_spend,
additions=full_spend.additions(),
removals=full_spend.removals(),
wallet_id=self.id(),
sent_to=[],
trade_id=None,
type=uint32(TransactionType.INCOMING_TX.value),
name=full_spend.name(),
)
await self.wallet_state_manager.add_transaction(regular_record)
await self.wallet_state_manager.add_pending_transaction(cc_record)
return full_spend
async def get_spendable_balance(self, records=None) -> uint64:
coins = await self.get_cc_spendable_coins(records)
amount = 0
for record in coins:
amount += record.coin.amount
return uint64(amount)
async def get_pending_change_balance(self) -> uint64:
unconfirmed_tx = await self.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(self.id())
addition_amount = 0
for record in unconfirmed_tx:
if not record.is_in_mempool():
continue
our_spend = False
for coin in record.removals:
# Don't count eve spend as change
if coin.parent_coin_info.hex() == self.get_colour():
continue
if await self.wallet_state_manager.does_coin_belong_to_wallet(coin, self.id()):
our_spend = True
break
if our_spend is not True:
continue
for coin in record.additions:
if await self.wallet_state_manager.does_coin_belong_to_wallet(coin, self.id()):
addition_amount += coin.amount
return uint64(addition_amount)
async def get_cc_spendable_coins(self, records=None) -> List[WalletCoinRecord]:
result: List[WalletCoinRecord] = []
record_list: Set[WalletCoinRecord] = await self.wallet_state_manager.get_spendable_coins_for_wallet(
self.id(), records
)
for record in record_list:
lineage = await self.get_lineage_proof_for_coin(record.coin)
if lineage is not None:
result.append(record)
return result
async def select_coins(self, amount: uint64) -> Set[Coin]:
"""
Returns a set of coins that can be used for generating a new transaction.
Note: Must be called under wallet state manager lock
"""
spendable_am = await self.get_confirmed_balance()
if amount > spendable_am:
error_msg = f"Can't select amount higher than our spendable balance {amount}, spendable {spendable_am}"
self.log.warning(error_msg)
raise ValueError(error_msg)
self.log.info(f"About to select coins for amount {amount}")
spendable: List[WalletCoinRecord] = await self.get_cc_spendable_coins()
sum = 0
used_coins: Set = set()
# Use older coins first
spendable.sort(key=lambda r: r.confirmed_block_height)
# Try to use coins from the store, if there isn't enough of "unused"
# coins use change coins that are not confirmed yet
unconfirmed_removals: Dict[bytes32, Coin] = await self.wallet_state_manager.unconfirmed_removals_for_wallet(
self.id()
)
for coinrecord in spendable:
if sum >= amount and len(used_coins) > 0:
break
if coinrecord.coin.name() in unconfirmed_removals:
continue
sum += coinrecord.coin.amount
used_coins.add(coinrecord.coin)
self.log.info(f"Selected coin: {coinrecord.coin.name()} at height {coinrecord.confirmed_block_height}!")
# This happens when we couldn't use one of the coins because it's already used
# but unconfirmed, and we are waiting for the change. (unconfirmed_additions)
if sum < amount:
raise ValueError(
"Can't make this transaction at the moment. Waiting for the change from the previous transaction."
)
self.log.info(f"Successfully selected coins: {used_coins}")
return used_coins
async def get_sigs(self, innerpuz: Program, innersol: Program, coin_name: bytes32) -> List[G2Element]:
puzzle_hash = innerpuz.get_tree_hash()
pubkey, private = await self.wallet_state_manager.get_keys(puzzle_hash)
synthetic_secret_key = calculate_synthetic_secret_key(private, DEFAULT_HIDDEN_PUZZLE_HASH)
sigs: List[G2Element] = []
error, conditions, cost = conditions_dict_for_solution(
innerpuz, innersol, self.wallet_state_manager.constants.MAX_BLOCK_COST_CLVM
)
if conditions is not None:
for _, msg in pkm_pairs_for_conditions_dict(
conditions, coin_name, self.wallet_state_manager.constants.AGG_SIG_ME_ADDITIONAL_DATA
):
signature = AugSchemeMPL.sign(synthetic_secret_key, msg)
sigs.append(signature)
return sigs
async def inner_puzzle_for_cc_puzhash(self, cc_hash: bytes32) -> Program:
record: DerivationRecord = await self.wallet_state_manager.puzzle_store.get_derivation_record_for_puzzle_hash(
cc_hash.hex()
)
inner_puzzle: Program = self.standard_wallet.puzzle_for_pk(bytes(record.pubkey))
return inner_puzzle
async def get_lineage_proof_for_coin(self, coin) -> Optional[Program]:
for name, proof in self.cc_info.lineage_proofs:
if name == coin.parent_coin_info:
return proof
return None
async def generate_signed_transaction(
self,
amounts: List[uint64],
puzzle_hashes: List[bytes32],
fee: uint64 = uint64(0),
origin_id: bytes32 = None,
coins: Set[Coin] = None,
ignore_max_send_amount: bool = False,
) -> TransactionRecord:
# Get coins and calculate amount of change required
outgoing_amount = uint64(sum(amounts))
total_outgoing = outgoing_amount + fee
if not ignore_max_send_amount:
max_send = await self.get_max_send_amount()
if total_outgoing > max_send:
raise ValueError(f"Can't send more than {max_send} in a single transaction")
if coins is None:
selected_coins: Set[Coin] = await self.select_coins(uint64(total_outgoing))
else:
selected_coins = coins
total_amount = sum([x.amount for x in selected_coins])
change = total_amount - total_outgoing
primaries = []
for amount, puzzle_hash in zip(amounts, puzzle_hashes):
primaries.append({"puzzlehash": puzzle_hash, "amount": amount})
if change > 0:
changepuzzlehash = await self.get_new_inner_hash()
primaries.append({"puzzlehash": changepuzzlehash, "amount": change})
coin = list(selected_coins)[0]
inner_puzzle = await self.inner_puzzle_for_cc_puzhash(coin.puzzle_hash)
if self.cc_info.my_genesis_checker is None:
raise ValueError("My genesis checker is None")
genesis_id = genesis_coin_id_for_genesis_coin_checker(self.cc_info.my_genesis_checker)
spendable_cc_list = []
innersol_list = []
sigs: List[G2Element] = []
first = True
for coin in selected_coins:
coin_inner_puzzle = await self.inner_puzzle_for_cc_puzhash(coin.puzzle_hash)
if first:
first = False
if fee > 0:
innersol = self.standard_wallet.make_solution(primaries=primaries, fee=fee)
else:
innersol = self.standard_wallet.make_solution(primaries=primaries)
else:
innersol = self.standard_wallet.make_solution()
innersol_list.append(innersol)
lineage_proof = await self.get_lineage_proof_for_coin(coin)
assert lineage_proof is not None
spendable_cc_list.append(SpendableCC(coin, genesis_id, inner_puzzle, lineage_proof))
sigs = sigs + await self.get_sigs(coin_inner_puzzle, innersol, coin.name())
spend_bundle = spend_bundle_for_spendable_ccs(
CC_MOD,
self.cc_info.my_genesis_checker,
spendable_cc_list,
innersol_list,
sigs,
)
# TODO add support for array in stored records
return TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=puzzle_hashes[0],
amount=uint64(outgoing_amount),
fee_amount=uint64(0),
confirmed=False,
sent=uint32(0),
spend_bundle=spend_bundle,
additions=spend_bundle.additions(),
removals=spend_bundle.removals(),
wallet_id=self.id(),
sent_to=[],
trade_id=None,
type=uint32(TransactionType.OUTGOING_TX.value),
name=spend_bundle.name(),
)
async def add_lineage(self, name: bytes32, lineage: Optional[Program], in_transaction=False):
self.log.info(f"Adding parent {name}: {lineage}")
current_list = self.cc_info.lineage_proofs.copy()
current_list.append((name, lineage))
cc_info: CCInfo = CCInfo(self.cc_info.my_genesis_checker, current_list)
await self.save_info(cc_info, in_transaction)
async def save_info(self, cc_info: CCInfo, in_transaction):
self.cc_info = cc_info
current_info = self.wallet_info
data_str = bytes(cc_info).hex()
wallet_info = WalletInfo(current_info.id, current_info.name, current_info.type, data_str)
self.wallet_info = wallet_info
await self.wallet_state_manager.user_store.update_wallet(wallet_info, in_transaction)
async def generate_new_coloured_coin(self, amount: uint64) -> SpendBundle:
coins = await self.standard_wallet.select_coins(amount)
origin = coins.copy().pop()
origin_id = origin.name()
cc_inner_hash = await self.get_new_inner_hash()
await self.add_lineage(origin_id, Program.to((0, [origin.as_list(), 0])))
genesis_coin_checker = create_genesis_or_zero_coin_checker(origin_id)
minted_cc_puzzle_hash = cc_puzzle_hash_for_inner_puzzle_hash(CC_MOD, genesis_coin_checker, cc_inner_hash)
tx_record: TransactionRecord = await self.standard_wallet.generate_signed_transaction(
amount, minted_cc_puzzle_hash, uint64(0), origin_id, coins
)
assert tx_record.spend_bundle is not None
lineage_proof: Optional[Program] = lineage_proof_for_genesis(origin)
lineage_proofs = [(origin_id, lineage_proof)]
cc_info: CCInfo = CCInfo(genesis_coin_checker, lineage_proofs)
await self.save_info(cc_info, False)
return tx_record.spend_bundle
async def create_spend_bundle_relative_amount(self, cc_amount, zero_coin: Coin = None) -> Optional[SpendBundle]:
# If we're losing value then get coloured coins with at least that much value
# If we're gaining value then our amount doesn't matter
if cc_amount < 0:
cc_spends = await self.select_coins(abs(cc_amount))
else:
if zero_coin is None:
return None
cc_spends = set()
cc_spends.add(zero_coin)
if cc_spends is None:
return None
# Calculate output amount given relative difference and sum of actual values
spend_value = sum([coin.amount for coin in cc_spends])
cc_amount = spend_value + cc_amount
# Loop through coins and create solution for innerpuzzle
list_of_solutions = []
output_created = None
sigs: List[G2Element] = []
for coin in cc_spends:
if output_created is None:
newinnerpuzhash = await self.get_new_inner_hash()
innersol = self.standard_wallet.make_solution(
primaries=[{"puzzlehash": newinnerpuzhash, "amount": cc_amount}]
)
output_created = coin
else:
innersol = self.standard_wallet.make_solution(consumed=[output_created.name()])
innerpuz: Program = await self.inner_puzzle_for_cc_puzhash(coin.puzzle_hash)
sigs = sigs + await self.get_sigs(innerpuz, innersol, coin.name())
lineage_proof = await self.get_lineage_proof_for_coin(coin)
puzzle_reveal = cc_puzzle_for_inner_puzzle(CC_MOD, self.cc_info.my_genesis_checker, innerpuz)
# Use coin info to create solution and add coin and solution to list of CoinSpends
solution = [
innersol,
coin.as_list(),
lineage_proof,
None,
None,
None,
None,
None,
]
list_of_solutions.append(CoinSpend(coin, puzzle_reveal, Program.to(solution)))
aggsig = AugSchemeMPL.aggregate(sigs)
return SpendBundle(list_of_solutions, aggsig)
|
the-stack_106_13805
|
import re
class Github:
"""Checks for issue mentions in the message and responds to them.
Made for the Modmail server."""
async def on_message(self, message):
match = re.match(r'modmail#(\d+)', message.content)
if match:
issue_num = match.group(1)
await message.channel.send(f'https://github.com/kyb3r/modmail/issues/{issue_num}')
def setup(bot):
bot.add_cog(Github())
|
the-stack_106_13806
|
#!/usr/bin/env python
# -*-coding:utf-8 -*-
import unittest
import pandas as pd
from cuffnote.mortgages import Mortgage, ExtraMonthlyPrincipal
class TestExtraMonthlyPrincipal(unittest.TestCase):
def setUp(self):
# base mortgage attributes
self.purhase_price = 200000
self.down_payment_percent = 0.2
self.down_payment = self.purhase_price * self.down_payment_percent
self.loan_amount = self.purhase_price - self.down_payment
self.interest_rate = 0.03375
self.start_date = '2021-1-1'
self.years = 30
self.num_yearly_pmts = 12
# instantiate base mortgage
self.loan = Mortgage(
self.purhase_price,
self.down_payment_percent,
self.interest_rate,
self.start_date,
self.years,
num_yearly_payments=self.num_yearly_pmts
)
# extra principal attributes
self.xtra_principal = 500
# instantiate mortgage with extra principal
self.loan_xtra_prncpl = ExtraMonthlyPrincipal(
self.loan,
self.xtra_principal
)
def test_00_get_payment_inheritance(self):
self.assertEqual(
self.loan.get_payment(),
self.loan_xtra_prncpl.get_payment()
)
def test_01_get_extra_principal(self):
self.assertEqual(
self.xtra_principal,
self.loan_xtra_prncpl.get_extra_principal()
)
def test_02_set_extra_principal(self):
self.loan_xtra_prncpl.set_extra_principal(400)
self.assertEqual(
400,
self.loan_xtra_prncpl.get_extra_principal()
)
def test_03_get_amortization_table_df_instance(self):
self.assertIsInstance(
self.loan_xtra_prncpl.get_amortization_table(),
pd.DataFrame
)
def test_04_get_amortization_table_extra_principal_col(self):
self.assertIn(
'Extra Principal',
self.loan_xtra_prncpl.get_amortization_table().columns
)
def test_05_get_amortization_table_equal_extra_principal(self):
self.assertEqual(
self.loan_xtra_prncpl.get_extra_principal(),
self.loan_xtra_prncpl.get_amortization_table().loc[1, 'Extra Principal']
)
def test_06_set_extra_principal_start_date(self):
new_start_date = self.loan_xtra_prncpl.get_payment_range()[12].strftime('%Y-%m-%d')
#self.loan_xtra_prncpl.extra_principal_start_date = new_start_date
self.loan_xtra_prncpl.set_extra_principal_start_date(new_start_date)
atable_extra_prncpl_shifted = self.loan_xtra_prncpl.get_amortization_table()
self.assertEqual(
0,
atable_extra_prncpl_shifted.loc[12, 'Extra Principal']
)
self.assertEqual(
self.loan_xtra_prncpl.get_extra_principal(),
atable_extra_prncpl_shifted.loc[13, 'Extra Principal']
)
def test_07_get_payoff_date(self):
# source: https://www.mortgagecalculator.org/calculators/what-if-i-pay-more-calculator.php#top
# using same inputs, number of periods with monthly extra principal is 167
num_periods = 167
self.assertEqual(
self.loan.get_payment_range()[num_periods-1].strftime('%m-%d-%Y'),
self.loan_xtra_prncpl.get_payoff_date()
)
def test_08_get_periods_saved(self):
periods_saved = 360 - 167
self.assertEqual(
periods_saved,
self.loan_xtra_prncpl.get_periods_saved()
)
def test_09_get_time_saved(self):
periods_saved = 360 - 167
time_saved = f"{periods_saved // self.num_yearly_pmts} years, {periods_saved % self.num_yearly_pmts} months"
self.assertEqual(
time_saved,
self.loan_xtra_prncpl.get_time_saved()
)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_13811
|
# -*- coding: utf-8 -*-
#
# Testing for pl
#
# ------------------------------------------------
# imports
# -------
from .fixtures import add, sleep
# tests
# -----
class TestTaskManagers:
def test_task(self, celery):
# check if task is registered
data = celery.inspect.registered()
worker = list(data.keys())[0]
assert 'tests.fixtures.registered' in data[worker]
# run registered task
assert celery.task.registered()
# run registered task with celery api
task = celery.task.registered.delay()
task.wait()
assert task.result
return
def test_schedule(self, celery):
# assert configuration
assert 'scheduled-task' in celery.controller.conf['CELERYBEAT_SCHEDULE']
schedule = celery.controller.conf['CELERYBEAT_SCHEDULE']['scheduled-task']
assert schedule['task'] == 'tests.fixtures.scheduled'
assert 'crontab' in str(type(schedule['schedule']))
# run scheduled task
assert celery.schedule.scheduled()
# run registered task with celery api
task = celery.schedule.scheduled.delay()
task.wait()
assert task.result
return
class TestCommandManagers:
def test_inspect(self, celery):
celery.submit(sleep).cancel(wait=True)
celery.map(add, [1, 1], [1, 1], [1, 1])
# revoked
workers = celery.inspect.revoked()
revoked = workers[list(workers.keys())[0]]
assert len(revoked) > 0
future = celery.get(revoked[0])
assert revoked[0] == future.id
# stats
stats = celery.inspect.stats()
assert len(stats) > 0
key = list(stats.keys())[0]
stat = stats[key]
assert 'broker' in stat
return
def test_control(self, celery):
workers = celery.control.heartbeat()
beat = workers[list(workers.keys())[0]]
assert beat is None
return
|
the-stack_106_13812
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# The MIT License
#
# Copyright (c) 2016 Grigory Chernyshev
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import json
from distutils.version import LooseVersion
from yagocd.resources import Base, BaseManager
from yagocd.util import since
@since('16.10.0')
class TemplateManager(BaseManager):
"""
The template config API allows users with administrator role to manage template config.
`Official documentation. <https://api.go.cd/current/#template-config>`_
:versionadded: 16.10.0.
"""
RESOURCE_PATH = '{base_api}/admin/templates'
ACCEPT_HEADER = 'application/vnd.go.cd.v3+json'
VERSION_TO_ACCEPT_HEADER = {
'16.10.0': 'application/vnd.go.cd.v1+json',
'16.11.0': 'application/vnd.go.cd.v2+json',
}
def __iter__(self):
"""
Method add iterator protocol for the manager.
:rtype: list of yagocd.resources.template.TemplateConfig
"""
return iter(self.list())
def __getitem__(self, name):
"""
Method add possibility to get template by the name using dictionary like syntax.
:param name: template name.
:rtype: yagocd.resources.template.TemplateConfig
"""
return self.get(name=name)
def list(self):
"""
Lists all available templates with the associated pipelines’ names.
:rtype: list of yagocd.resources.template.TemplateConfig
"""
response = self._session.get(
path=self.RESOURCE_PATH.format(base_api=self.base_api),
headers={'Accept': self._accept_header()}
)
result = list()
data_source = response.json()
if LooseVersion(self._session.server_version) >= LooseVersion('16.11.0'):
data_source = data_source.get('_embedded', {})
etag = response.headers['ETag']
for data in data_source.get('templates', {}):
result.append(TemplateConfig(session=self._session, data=data, etag=etag))
return result
def get(self, name):
"""
Gets template config for specified template name.
:param name: name of the template.
:rtype: yagocd.resources.template.TemplateConfig
"""
response = self._session.get(
path=self._session.urljoin(self.RESOURCE_PATH, name).format(base_api=self.base_api),
headers={'Accept': self._accept_header()}
)
etag = response.headers['ETag']
return TemplateConfig(session=self._session, data=response.json(), etag=etag)
def create(self, config):
"""
Creates a template config object.
:param config: new template configuration.
:rtype: yagocd.resources.template.TemplateConfig
"""
response = self._session.post(
path=self.RESOURCE_PATH.format(base_api=self.base_api),
headers={
'Accept': self._accept_header(),
'Content-Type': 'application/json',
},
data=json.dumps(config),
)
etag = response.headers['ETag']
return TemplateConfig(session=self._session, data=response.json(), etag=etag)
def update(self, name, config, etag):
"""
Update template config for specified template name.
:param name: name of the template to update.
:param config: updated template configuration.
:param etag: etag value from current template object.
:rtype: yagocd.resources.template.TemplateConfig
"""
response = self._session.put(
path=self._session.urljoin(self.RESOURCE_PATH, name).format(base_api=self.base_api),
headers={
'Accept': self._accept_header(),
'Content-Type': 'application/json',
'If-Match': etag,
},
data=json.dumps(config),
)
etag = response.headers['ETag']
return TemplateConfig(session=self._session, data=response.json(), etag=etag)
def delete(self, name):
"""
Deletes a template from the config XML if it is not associated with any pipeline.
:param name: name of template to delete
:return: A message confirmation if the template was deleted.
:rtype: str
"""
response = self._session.delete(
path=self._session.urljoin(self.RESOURCE_PATH, name).format(base_api=self.base_api),
headers={
'Accept': self._accept_header(),
},
)
return response.json().get('message')
class TemplateConfig(Base):
pass
|
the-stack_106_13814
|
# coding=utf-8
'Transforms for dataset augmentation on .example.json objects.'
import random
from metagrok.pkmn.engine.navigation import extract_players
def scramble(item):
player_moves = [_random_permutation(4) for _ in range(6)]
player_pkmns = [0] + _shuffle(list(range(1, 6)))
opponent_moves = [_random_permutation(4) for _ in range(6)]
opponent_pkmns = [0] + _shuffle(list(range(1, 6)))
reorder(item, player_moves, player_pkmns, opponent_moves, opponent_pkmns)
return item
def reorder(item,
player_moves,
player_pkmns,
opponent_moves,
opponent_pkmns):
'''Shuffles move orders and pokemon orders.
- item: the struct to reorder.
- player_moves, opponent_moves: a list of 6 permutations of the values range(4)
- player_pkmns, opponent_pkmns: a permutation of the values range(6) with v[0] == 0 '''
assert len(player_moves) == 6
for i, perm in enumerate(player_moves):
assert is_perm(perm, 4), 'player_moves[%s] is not a permutation: %s' % (i, perm)
assert is_perm(player_pkmns, 6), 'player_pkmns is not a permutation: %s' % player_pkmns
assert player_pkmns[0] == 0
# TODO: for doubles and triples, player_pkmns[:2] or player_pkmns[:3] need to be fixed
assert len(opponent_moves) == 6
for i, perm in enumerate(opponent_moves):
assert is_perm(perm, 4), 'opponent_moves[%s] is not a permutation: %s' % (i, perm)
assert is_perm(opponent_pkmns, 6), 'opponent_pkmns is not a permutation: %s' % opponent_pkmns
assert opponent_pkmns[0] == 0
state = item['state']
candidates = item['candidates']
probs = item['probs']
player, opponent = extract_players(state)
# Shuffle opponent
for perm, pkmn in zip(opponent_moves, opponent['pokemon']):
pkmn['moveTrack'] = _apply_perm(pkmn['moveTrack'], perm)
opponent['pokemon'] = _apply_perm(opponent['pokemon'], opponent_pkmns)
# Shuffle self
for perm, pkmn in zip(player_moves, player['pokemon']):
pkmn['moveTrack'] = _apply_perm(pkmn['moveTrack'], perm)
player['pokemon'] = _apply_perm(player['pokemon'], player_pkmns)
# check requests
if 'request' in item:
req = item['request']
if 'active' in req and len(req['active']) > 1:
active = req['active'][0]
active['moves'] = _apply_perm(active['moves'], player_moves[0])
if 'side' in req and 'pokemon' in req['side']:
side = req['side']
side['pokemon'] = _apply_perm(side['pokemon'], player_pkmns)
candidate_perm = player_moves[0] + [4 + pp for pp in player_pkmns]
while len(candidate_perm) < len(candidates):
last = len(candidate_perm)
candidate_perm.extend([last + mp for mp in player_moves[0]])
candidates = [candidates[i] for i in candidate_perm]
for i in range(4, 10):
if candidates[i]:
candidates[i] = 'switch %d' % (i - 3)
item['candidates'] = candidates
item['probs'] = probs[candidate_perm]
item['action'] = candidate_perm.index(item['action'])
def _apply_perm(old, perm):
new = []
for i in perm:
if len(old) > i:
new.append(old[i])
return new
def _random_permutation(n):
return _shuffle(list(range(n)))
def _shuffle(vs):
rv = list(vs)
random.shuffle(rv)
return rv
def is_perm(vs, length = None):
if length is None:
length = len(vs)
seen = [False for _ in range(length)]
for v in vs:
if v >= length:
return False
seen[v] = True
return all(seen)
def main():
import argparse
from metagrok import np_json as json
parser = argparse.ArgumentParser()
parser.add_argument('input_file', type = argparse.FileType('r'))
args = parser.parse_args()
data = json.load(args.input_file)
player_moves = [list(range(4)) for _ in range(6)]
player_pkmns = list(range(6))
opponent_moves = [list(range(4)) for _ in range(6)]
opponent_pkmns = list(range(6))
player_pkmns = [0, 1, 2, 3, 5, 4]
# opponent_moves[1] = [2, 0, 3, 1]
reorder(data, player_moves, player_pkmns, opponent_moves, opponent_pkmns)
print(json.dumps(data, indent = 2))
if __name__ == '__main__':
main()
|
the-stack_106_13817
|
import os
import pygame
import numpy as np
from utils.core import *
from misc.game.utils import *
graphics_dir = 'misc/game/graphics'
_image_library = {}
def get_image(path):
global _image_library
image = _image_library.get(path)
if image == None:
canonicalized_path = path.replace('/', os.sep).replace('\\', os.sep)
image = pygame.image.load(canonicalized_path)
_image_library[path] = image
return image
class Game:
def __init__(self, world, sim_agents, play=False):
self._running = True
self.world = world
self.sim_agents = sim_agents
self.current_agent = self.sim_agents[0]
self.play = play
# Visual parameters
self.scale = 80 # num pixels per tile
self.holding_scale = 0.5
self.container_scale = 0.7
self.width = self.scale * self.world.width
self.height = self.scale * self.world.height
self.tile_size = (self.scale, self.scale)
self.holding_size = tuple((self.holding_scale * np.asarray(self.tile_size)).astype(int))
self.container_size = tuple((self.container_scale * np.asarray(self.tile_size)).astype(int))
self.holding_container_size = tuple((self.container_scale * np.asarray(self.holding_size)).astype(int))
#self.font = pygame.font.SysFont('arialttf', 10)
def on_init(self):
pygame.init()
if self.play:
self.screen = pygame.display.set_mode((self.width, self.height))
else:
# Create a hidden surface
self.screen = pygame.Surface((self.width, self.height))
self._running = True
def on_event(self, event):
if event.type == pygame.QUIT:
self._running = False
def on_render(self):
self.screen.fill(Color.FLOOR)
objs = []
# Draw gridsquares
for o_list in self.world.objects.values():
for o in o_list:
if isinstance(o, GridSquare):
self.draw_gridsquare(o)
elif o.is_held == False:
objs.append(o)
# Draw objects not held by agents
for o in objs:
self.draw_object(o)
# Draw agents and their holdings
for agent in self.sim_agents:
self.draw_agent(agent)
if self.play:
pygame.display.flip()
pygame.display.update()
def draw_gridsquare(self, gs):
sl = self.scaled_location(gs.location)
fill = pygame.Rect(sl[0], sl[1], self.scale, self.scale)
if isinstance(gs, Counter):
pygame.draw.rect(self.screen, Color.COUNTER, fill)
pygame.draw.rect(self.screen, Color.COUNTER_BORDER, fill, 1)
elif isinstance(gs, Delivery):
pygame.draw.rect(self.screen, Color.DELIVERY, fill)
self.draw('delivery', self.tile_size, sl)
elif isinstance(gs, Cutboard):
pygame.draw.rect(self.screen, Color.COUNTER, fill)
pygame.draw.rect(self.screen, Color.COUNTER_BORDER, fill, 1)
self.draw('cutboard', self.tile_size, sl)
return
def draw(self, path, size, location):
image_path = '{}/{}.png'.format(graphics_dir, path)
image = pygame.transform.scale(get_image(image_path), size)
self.screen.blit(image, location)
def draw_agent(self, agent):
self.draw('agent-{}'.format(agent.color),
self.tile_size, self.scaled_location(agent.location))
self.draw_agent_object(agent.holding)
def draw_agent_object(self, obj):
# Holding shows up in bottom right corner.
if obj is None: return
if any([isinstance(c, Plate) for c in obj.contents]):
self.draw('Plate', self.holding_size, self.holding_location(obj.location))
if len(obj.contents) > 1:
plate = obj.unmerge('Plate')
self.draw(obj.full_name, self.holding_container_size, self.holding_container_location(obj.location))
obj.merge(plate)
else:
self.draw(obj.full_name, self.holding_size, self.holding_location(obj.location))
def draw_object(self, obj):
if obj is None: return
if any([isinstance(c, Plate) for c in obj.contents]):
self.draw('Plate', self.tile_size, self.scaled_location(obj.location))
if len(obj.contents) > 1:
plate = obj.unmerge('Plate')
self.draw(obj.full_name, self.container_size, self.container_location(obj.location))
obj.merge(plate)
else:
self.draw(obj.full_name, self.tile_size, self.scaled_location(obj.location))
def scaled_location(self, loc):
"""Return top-left corner of scaled location given coordinates loc, e.g. (3, 4)"""
return tuple(self.scale * np.asarray(loc))
def holding_location(self, loc):
"""Return top-left corner of location where agent holding will be drawn (bottom right corner) given coordinates loc, e.g. (3, 4)"""
scaled_loc = self.scaled_location(loc)
return tuple((np.asarray(scaled_loc) + self.scale*(1-self.holding_scale)).astype(int))
def container_location(self, loc):
"""Return top-left corner of location where contained (i.e. plated) object will be drawn, given coordinates loc, e.g. (3, 4)"""
scaled_loc = self.scaled_location(loc)
return tuple((np.asarray(scaled_loc) + self.scale*(1-self.container_scale)/2).astype(int))
def holding_container_location(self, loc):
"""Return top-left corner of location where contained, held object will be drawn given coordinates loc, e.g. (3, 4)"""
scaled_loc = self.scaled_location(loc)
factor = (1-self.holding_scale) + (1-self.container_scale)/2*self.holding_scale
return tuple((np.asarray(scaled_loc) + self.scale*factor).astype(int))
def on_cleanup(self):
# pygame.display.quit()
pygame.quit()
|
the-stack_106_13820
|
"""Manage config entries in Home Assistant."""
import asyncio
import functools
import logging
from types import MappingProxyType
from typing import Any, Callable, Dict, List, Optional, Set, Union, cast
import weakref
import attr
from homeassistant import data_entry_flow, loader
from homeassistant.core import CALLBACK_TYPE, HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady, HomeAssistantError
from homeassistant.helpers import entity_registry
from homeassistant.helpers.event import Event
from homeassistant.setup import async_process_deps_reqs, async_setup_component
from homeassistant.util.decorator import Registry
import homeassistant.util.uuid as uuid_util
_LOGGER = logging.getLogger(__name__)
_UNDEF: dict = {}
SOURCE_DISCOVERY = "discovery"
SOURCE_HASSIO = "hassio"
SOURCE_HOMEKIT = "homekit"
SOURCE_IMPORT = "import"
SOURCE_INTEGRATION_DISCOVERY = "integration_discovery"
SOURCE_SSDP = "ssdp"
SOURCE_USER = "user"
SOURCE_ZEROCONF = "zeroconf"
# If a user wants to hide a discovery from the UI they can "Ignore" it. The config_entries/ignore_flow
# websocket command creates a config entry with this source and while it exists normal discoveries
# with the same unique id are ignored.
SOURCE_IGNORE = "ignore"
# This is used when a user uses the "Stop Ignoring" button in the UI (the
# config_entries/ignore_flow websocket command). It's triggered after the "ignore" config entry has
# been removed and unloaded.
SOURCE_UNIGNORE = "unignore"
HANDLERS = Registry()
STORAGE_KEY = "core.config_entries"
STORAGE_VERSION = 1
# Deprecated since 0.73
PATH_CONFIG = ".config_entries.json"
SAVE_DELAY = 1
# The config entry has been set up successfully
ENTRY_STATE_LOADED = "loaded"
# There was an error while trying to set up this config entry
ENTRY_STATE_SETUP_ERROR = "setup_error"
# There was an error while trying to migrate the config entry to a new version
ENTRY_STATE_MIGRATION_ERROR = "migration_error"
# The config entry was not ready to be set up yet, but might be later
ENTRY_STATE_SETUP_RETRY = "setup_retry"
# The config entry has not been loaded
ENTRY_STATE_NOT_LOADED = "not_loaded"
# An error occurred when trying to unload the entry
ENTRY_STATE_FAILED_UNLOAD = "failed_unload"
UNRECOVERABLE_STATES = (ENTRY_STATE_MIGRATION_ERROR, ENTRY_STATE_FAILED_UNLOAD)
DEFAULT_DISCOVERY_UNIQUE_ID = "default_discovery_unique_id"
DISCOVERY_NOTIFICATION_ID = "config_entry_discovery"
DISCOVERY_SOURCES = (
SOURCE_SSDP,
SOURCE_ZEROCONF,
SOURCE_DISCOVERY,
SOURCE_IMPORT,
SOURCE_UNIGNORE,
)
EVENT_FLOW_DISCOVERED = "config_entry_discovered"
CONN_CLASS_CLOUD_PUSH = "cloud_push"
CONN_CLASS_CLOUD_POLL = "cloud_poll"
CONN_CLASS_LOCAL_PUSH = "local_push"
CONN_CLASS_LOCAL_POLL = "local_poll"
CONN_CLASS_ASSUMED = "assumed"
CONN_CLASS_UNKNOWN = "unknown"
class ConfigError(HomeAssistantError):
"""Error while configuring an account."""
class UnknownEntry(ConfigError):
"""Unknown entry specified."""
class OperationNotAllowed(ConfigError):
"""Raised when a config entry operation is not allowed."""
UpdateListenerType = Callable[[HomeAssistant, "ConfigEntry"], Any]
class ConfigEntry:
"""Hold a configuration entry."""
__slots__ = (
"entry_id",
"version",
"domain",
"title",
"data",
"options",
"unique_id",
"system_options",
"source",
"connection_class",
"state",
"_setup_lock",
"update_listeners",
"_async_cancel_retry_setup",
)
def __init__(
self,
version: int,
domain: str,
title: str,
data: dict,
source: str,
connection_class: str,
system_options: dict,
options: Optional[dict] = None,
unique_id: Optional[str] = None,
entry_id: Optional[str] = None,
state: str = ENTRY_STATE_NOT_LOADED,
) -> None:
"""Initialize a config entry."""
# Unique id of the config entry
self.entry_id = entry_id or uuid_util.uuid_v1mc_hex()
# Version of the configuration.
self.version = version
# Domain the configuration belongs to
self.domain = domain
# Title of the configuration
self.title = title
# Config data
self.data = MappingProxyType(data)
# Entry options
self.options = MappingProxyType(options or {})
# Entry system options
self.system_options = SystemOptions(**system_options)
# Source of the configuration (user, discovery, cloud)
self.source = source
# Connection class
self.connection_class = connection_class
# State of the entry (LOADED, NOT_LOADED)
self.state = state
# Unique ID of this entry.
self.unique_id = unique_id
# Listeners to call on update
self.update_listeners: List[weakref.ReferenceType[UpdateListenerType]] = []
# Function to cancel a scheduled retry
self._async_cancel_retry_setup: Optional[Callable[[], Any]] = None
async def async_setup(
self,
hass: HomeAssistant,
*,
integration: Optional[loader.Integration] = None,
tries: int = 0,
) -> None:
"""Set up an entry."""
if self.source == SOURCE_IGNORE:
return
if integration is None:
integration = await loader.async_get_integration(hass, self.domain)
try:
component = integration.get_component()
except ImportError as err:
_LOGGER.error(
"Error importing integration %s to set up %s configuration entry: %s",
integration.domain,
self.domain,
err,
)
if self.domain == integration.domain:
self.state = ENTRY_STATE_SETUP_ERROR
return
if self.domain == integration.domain:
try:
integration.get_platform("config_flow")
except ImportError as err:
_LOGGER.error(
"Error importing platform config_flow from integration %s to set up %s configuration entry: %s",
integration.domain,
self.domain,
err,
)
self.state = ENTRY_STATE_SETUP_ERROR
return
# Perform migration
if not await self.async_migrate(hass):
self.state = ENTRY_STATE_MIGRATION_ERROR
return
try:
result = await component.async_setup_entry( # type: ignore
hass, self
)
if not isinstance(result, bool):
_LOGGER.error(
"%s.async_setup_entry did not return boolean", integration.domain
)
result = False
except ConfigEntryNotReady:
self.state = ENTRY_STATE_SETUP_RETRY
wait_time = 2 ** min(tries, 4) * 5
tries += 1
_LOGGER.warning(
"Config entry for %s not ready yet. Retrying in %d seconds",
self.domain,
wait_time,
)
async def setup_again(now: Any) -> None:
"""Run setup again."""
self._async_cancel_retry_setup = None
await self.async_setup(hass, integration=integration, tries=tries)
self._async_cancel_retry_setup = hass.helpers.event.async_call_later(
wait_time, setup_again
)
return
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Error setting up entry %s for %s", self.title, integration.domain
)
result = False
# Only store setup result as state if it was not forwarded.
if self.domain != integration.domain:
return
if result:
self.state = ENTRY_STATE_LOADED
else:
self.state = ENTRY_STATE_SETUP_ERROR
async def async_unload(
self, hass: HomeAssistant, *, integration: Optional[loader.Integration] = None
) -> bool:
"""Unload an entry.
Returns if unload is possible and was successful.
"""
if self.source == SOURCE_IGNORE:
self.state = ENTRY_STATE_NOT_LOADED
return True
if integration is None:
try:
integration = await loader.async_get_integration(hass, self.domain)
except loader.IntegrationNotFound:
# The integration was likely a custom_component
# that was uninstalled, or an integration
# that has been renamed without removing the config
# entry.
self.state = ENTRY_STATE_NOT_LOADED
return True
component = integration.get_component()
if integration.domain == self.domain:
if self.state in UNRECOVERABLE_STATES:
return False
if self.state != ENTRY_STATE_LOADED:
if self._async_cancel_retry_setup is not None:
self._async_cancel_retry_setup()
self._async_cancel_retry_setup = None
self.state = ENTRY_STATE_NOT_LOADED
return True
supports_unload = hasattr(component, "async_unload_entry")
if not supports_unload:
if integration.domain == self.domain:
self.state = ENTRY_STATE_FAILED_UNLOAD
return False
try:
result = await component.async_unload_entry( # type: ignore
hass, self
)
assert isinstance(result, bool)
# Only adjust state if we unloaded the component
if result and integration.domain == self.domain:
self.state = ENTRY_STATE_NOT_LOADED
return result
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Error unloading entry %s for %s", self.title, integration.domain
)
if integration.domain == self.domain:
self.state = ENTRY_STATE_FAILED_UNLOAD
return False
async def async_remove(self, hass: HomeAssistant) -> None:
"""Invoke remove callback on component."""
if self.source == SOURCE_IGNORE:
return
try:
integration = await loader.async_get_integration(hass, self.domain)
except loader.IntegrationNotFound:
# The integration was likely a custom_component
# that was uninstalled, or an integration
# that has been renamed without removing the config
# entry.
return
component = integration.get_component()
if not hasattr(component, "async_remove_entry"):
return
try:
await component.async_remove_entry( # type: ignore
hass, self
)
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Error calling entry remove callback %s for %s",
self.title,
integration.domain,
)
async def async_migrate(self, hass: HomeAssistant) -> bool:
"""Migrate an entry.
Returns True if config entry is up-to-date or has been migrated.
"""
handler = HANDLERS.get(self.domain)
if handler is None:
_LOGGER.error(
"Flow handler not found for entry %s for %s", self.title, self.domain
)
return False
# Handler may be a partial
while isinstance(handler, functools.partial):
handler = handler.func
if self.version == handler.VERSION:
return True
integration = await loader.async_get_integration(hass, self.domain)
component = integration.get_component()
supports_migrate = hasattr(component, "async_migrate_entry")
if not supports_migrate:
_LOGGER.error(
"Migration handler not found for entry %s for %s",
self.title,
self.domain,
)
return False
try:
result = await component.async_migrate_entry( # type: ignore
hass, self
)
if not isinstance(result, bool):
_LOGGER.error(
"%s.async_migrate_entry did not return boolean", self.domain
)
return False
if result:
# pylint: disable=protected-access
hass.config_entries._async_schedule_save()
return result
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Error migrating entry %s for %s", self.title, self.domain
)
return False
def add_update_listener(self, listener: UpdateListenerType) -> CALLBACK_TYPE:
"""Listen for when entry is updated.
Returns function to unlisten.
"""
weak_listener = weakref.ref(listener)
self.update_listeners.append(weak_listener)
return lambda: self.update_listeners.remove(weak_listener)
def as_dict(self) -> Dict[str, Any]:
"""Return dictionary version of this entry."""
return {
"entry_id": self.entry_id,
"version": self.version,
"domain": self.domain,
"title": self.title,
"data": dict(self.data),
"options": dict(self.options),
"system_options": self.system_options.as_dict(),
"source": self.source,
"connection_class": self.connection_class,
"unique_id": self.unique_id,
}
class ConfigEntriesFlowManager(data_entry_flow.FlowManager):
"""Manage all the config entry flows that are in progress."""
def __init__(
self, hass: HomeAssistant, config_entries: "ConfigEntries", hass_config: dict
):
"""Initialize the config entry flow manager."""
super().__init__(hass)
self.config_entries = config_entries
self._hass_config = hass_config
async def async_finish_flow(
self, flow: data_entry_flow.FlowHandler, result: Dict[str, Any]
) -> Dict[str, Any]:
"""Finish a config flow and add an entry."""
flow = cast(ConfigFlow, flow)
# Remove notification if no other discovery config entries in progress
if not any(
ent["context"]["source"] in DISCOVERY_SOURCES
for ent in self.hass.config_entries.flow.async_progress()
if ent["flow_id"] != flow.flow_id
):
self.hass.components.persistent_notification.async_dismiss(
DISCOVERY_NOTIFICATION_ID
)
if result["type"] != data_entry_flow.RESULT_TYPE_CREATE_ENTRY:
return result
# Check if config entry exists with unique ID. Unload it.
existing_entry = None
if flow.unique_id is not None:
# Abort all flows in progress with same unique ID.
for progress_flow in self.async_progress():
if (
progress_flow["handler"] == flow.handler
and progress_flow["flow_id"] != flow.flow_id
and progress_flow["context"].get("unique_id") == flow.unique_id
):
self.async_abort(progress_flow["flow_id"])
# Reset unique ID when the default discovery ID has been used
if flow.unique_id == DEFAULT_DISCOVERY_UNIQUE_ID:
await flow.async_set_unique_id(None)
# Find existing entry.
for check_entry in self.config_entries.async_entries(result["handler"]):
if check_entry.unique_id == flow.unique_id:
existing_entry = check_entry
break
# Unload the entry before setting up the new one.
# We will remove it only after the other one is set up,
# so that device customizations are not getting lost.
if (
existing_entry is not None
and existing_entry.state not in UNRECOVERABLE_STATES
):
await self.config_entries.async_unload(existing_entry.entry_id)
entry = ConfigEntry(
version=result["version"],
domain=result["handler"],
title=result["title"],
data=result["data"],
options={},
system_options={},
source=flow.context["source"],
connection_class=flow.CONNECTION_CLASS,
unique_id=flow.unique_id,
)
await self.config_entries.async_add(entry)
if existing_entry is not None:
await self.config_entries.async_remove(existing_entry.entry_id)
result["result"] = entry
return result
async def async_create_flow(
self, handler_key: Any, *, context: Optional[Dict] = None, data: Any = None
) -> "ConfigFlow":
"""Create a flow for specified handler.
Handler key is the domain of the component that we want to set up.
"""
try:
integration = await loader.async_get_integration(self.hass, handler_key)
except loader.IntegrationNotFound:
_LOGGER.error("Cannot find integration %s", handler_key)
raise data_entry_flow.UnknownHandler
# Make sure requirements and dependencies of component are resolved
await async_process_deps_reqs(self.hass, self._hass_config, integration)
try:
integration.get_platform("config_flow")
except ImportError as err:
_LOGGER.error(
"Error occurred loading configuration flow for integration %s: %s",
handler_key,
err,
)
raise data_entry_flow.UnknownHandler
handler = HANDLERS.get(handler_key)
if handler is None:
raise data_entry_flow.UnknownHandler
if not context or "source" not in context:
raise KeyError("Context not set or doesn't have a source set")
flow = cast(ConfigFlow, handler())
flow.init_step = context["source"]
return flow
async def async_post_init(
self, flow: data_entry_flow.FlowHandler, result: dict
) -> None:
"""After a flow is initialised trigger new flow notifications."""
source = flow.context["source"]
# Create notification.
if source in DISCOVERY_SOURCES:
self.hass.bus.async_fire(EVENT_FLOW_DISCOVERED)
self.hass.components.persistent_notification.async_create(
title="New devices discovered",
message=(
"We have discovered new devices on your network. "
"[Check it out](/config/integrations)"
),
notification_id=DISCOVERY_NOTIFICATION_ID,
)
class ConfigEntries:
"""Manage the configuration entries.
An instance of this object is available via `hass.config_entries`.
"""
def __init__(self, hass: HomeAssistant, hass_config: dict) -> None:
"""Initialize the entry manager."""
self.hass = hass
self.flow = ConfigEntriesFlowManager(hass, self, hass_config)
self.options = OptionsFlowManager(hass)
self._hass_config = hass_config
self._entries: List[ConfigEntry] = []
self._store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
EntityRegistryDisabledHandler(hass).async_setup()
@callback
def async_domains(self) -> List[str]:
"""Return domains for which we have entries."""
seen: Set[str] = set()
result = []
for entry in self._entries:
if entry.domain not in seen:
seen.add(entry.domain)
result.append(entry.domain)
return result
@callback
def async_get_entry(self, entry_id: str) -> Optional[ConfigEntry]:
"""Return entry with matching entry_id."""
for entry in self._entries:
if entry_id == entry.entry_id:
return entry
return None
@callback
def async_entries(self, domain: Optional[str] = None) -> List[ConfigEntry]:
"""Return all entries or entries for a specific domain."""
if domain is None:
return list(self._entries)
return [entry for entry in self._entries if entry.domain == domain]
async def async_add(self, entry: ConfigEntry) -> None:
"""Add and setup an entry."""
self._entries.append(entry)
await self.async_setup(entry.entry_id)
self._async_schedule_save()
async def async_remove(self, entry_id: str) -> Dict[str, Any]:
"""Remove an entry."""
entry = self.async_get_entry(entry_id)
if entry is None:
raise UnknownEntry
if entry.state in UNRECOVERABLE_STATES:
unload_success = entry.state != ENTRY_STATE_FAILED_UNLOAD
else:
unload_success = await self.async_unload(entry_id)
await entry.async_remove(self.hass)
self._entries.remove(entry)
self._async_schedule_save()
dev_reg, ent_reg = await asyncio.gather(
self.hass.helpers.device_registry.async_get_registry(),
self.hass.helpers.entity_registry.async_get_registry(),
)
dev_reg.async_clear_config_entry(entry_id)
ent_reg.async_clear_config_entry(entry_id)
# After we have fully removed an "ignore" config entry we can try and rediscover it so that a
# user is able to immediately start configuring it. We do this by starting a new flow with
# the 'unignore' step. If the integration doesn't implement async_step_unignore then
# this will be a no-op.
if entry.source == SOURCE_IGNORE:
self.hass.async_create_task(
self.hass.config_entries.flow.async_init(
entry.domain,
context={"source": SOURCE_UNIGNORE},
data={"unique_id": entry.unique_id},
)
)
return {"require_restart": not unload_success}
async def async_initialize(self) -> None:
"""Initialize config entry config."""
# Migrating for config entries stored before 0.73
config = await self.hass.helpers.storage.async_migrator(
self.hass.config.path(PATH_CONFIG),
self._store,
old_conf_migrate_func=_old_conf_migrator,
)
if config is None:
self._entries = []
return
self._entries = [
ConfigEntry(
version=entry["version"],
domain=entry["domain"],
entry_id=entry["entry_id"],
data=entry["data"],
source=entry["source"],
title=entry["title"],
# New in 0.79
connection_class=entry.get("connection_class", CONN_CLASS_UNKNOWN),
# New in 0.89
options=entry.get("options"),
# New in 0.98
system_options=entry.get("system_options", {}),
# New in 0.104
unique_id=entry.get("unique_id"),
)
for entry in config["entries"]
]
async def async_setup(self, entry_id: str) -> bool:
"""Set up a config entry.
Return True if entry has been successfully loaded.
"""
entry = self.async_get_entry(entry_id)
if entry is None:
raise UnknownEntry
if entry.state != ENTRY_STATE_NOT_LOADED:
raise OperationNotAllowed
# Setup Component if not set up yet
if entry.domain in self.hass.config.components:
await entry.async_setup(self.hass)
else:
# Setting up the component will set up all its config entries
result = await async_setup_component(
self.hass, entry.domain, self._hass_config
)
if not result:
return result
return entry.state == ENTRY_STATE_LOADED
async def async_unload(self, entry_id: str) -> bool:
"""Unload a config entry."""
entry = self.async_get_entry(entry_id)
if entry is None:
raise UnknownEntry
if entry.state in UNRECOVERABLE_STATES:
raise OperationNotAllowed
return await entry.async_unload(self.hass)
async def async_reload(self, entry_id: str) -> bool:
"""Reload an entry.
If an entry was not loaded, will just load.
"""
unload_result = await self.async_unload(entry_id)
if not unload_result:
return unload_result
return await self.async_setup(entry_id)
@callback
def async_update_entry(
self,
entry: ConfigEntry,
*,
unique_id: Union[str, dict, None] = _UNDEF,
title: Union[str, dict] = _UNDEF,
data: dict = _UNDEF,
options: dict = _UNDEF,
system_options: dict = _UNDEF,
) -> bool:
"""Update a config entry.
If the entry was changed, the update_listeners are
fired and this function returns True
If the entry was not changed, the update_listeners are
not fired and this function returns False
"""
changed = False
if unique_id is not _UNDEF and entry.unique_id != unique_id:
changed = True
entry.unique_id = cast(Optional[str], unique_id)
if title is not _UNDEF and entry.title != title:
changed = True
entry.title = cast(str, title)
if data is not _UNDEF and entry.data != data: # type: ignore
changed = True
entry.data = MappingProxyType(data)
if options is not _UNDEF and entry.options != options: # type: ignore
changed = True
entry.options = MappingProxyType(options)
if (
system_options is not _UNDEF
and entry.system_options.as_dict() != system_options
):
changed = True
entry.system_options.update(**system_options)
if not changed:
return False
for listener_ref in entry.update_listeners:
listener = listener_ref()
if listener is not None:
self.hass.async_create_task(listener(self.hass, entry))
self._async_schedule_save()
return True
async def async_forward_entry_setup(self, entry: ConfigEntry, domain: str) -> bool:
"""Forward the setup of an entry to a different component.
By default an entry is setup with the component it belongs to. If that
component also has related platforms, the component will have to
forward the entry to be setup by that component.
You don't want to await this coroutine if it is called as part of the
setup of a component, because it can cause a deadlock.
"""
# Setup Component if not set up yet
if domain not in self.hass.config.components:
result = await async_setup_component(self.hass, domain, self._hass_config)
if not result:
return False
integration = await loader.async_get_integration(self.hass, domain)
await entry.async_setup(self.hass, integration=integration)
return True
async def async_forward_entry_unload(self, entry: ConfigEntry, domain: str) -> bool:
"""Forward the unloading of an entry to a different component."""
# It was never loaded.
if domain not in self.hass.config.components:
return True
integration = await loader.async_get_integration(self.hass, domain)
return await entry.async_unload(self.hass, integration=integration)
@callback
def _async_schedule_save(self) -> None:
"""Save the entity registry to a file."""
self._store.async_delay_save(self._data_to_save, SAVE_DELAY)
@callback
def _data_to_save(self) -> Dict[str, List[Dict[str, Any]]]:
"""Return data to save."""
return {"entries": [entry.as_dict() for entry in self._entries]}
async def _old_conf_migrator(old_config: Dict[str, Any]) -> Dict[str, Any]:
"""Migrate the pre-0.73 config format to the latest version."""
return {"entries": old_config}
class ConfigFlow(data_entry_flow.FlowHandler):
"""Base class for config flows with some helpers."""
def __init_subclass__(cls, domain: Optional[str] = None, **kwargs: Any) -> None:
"""Initialize a subclass, register if possible."""
super().__init_subclass__(**kwargs) # type: ignore
if domain is not None:
HANDLERS.register(domain)(cls)
CONNECTION_CLASS = CONN_CLASS_UNKNOWN
@property
def unique_id(self) -> Optional[str]:
"""Return unique ID if available."""
# pylint: disable=no-member
if not self.context:
return None
return cast(Optional[str], self.context.get("unique_id"))
@staticmethod
@callback
def async_get_options_flow(config_entry: ConfigEntry) -> "OptionsFlow":
"""Get the options flow for this handler."""
raise data_entry_flow.UnknownHandler
@callback
def _abort_if_unique_id_configured(
self, updates: Optional[Dict[Any, Any]] = None, reload_on_update: bool = True,
) -> None:
"""Abort if the unique ID is already configured."""
assert self.hass
if self.unique_id is None:
return
for entry in self._async_current_entries():
if entry.unique_id == self.unique_id:
if updates is not None:
changed = self.hass.config_entries.async_update_entry(
entry, data={**entry.data, **updates}
)
if (
changed
and reload_on_update
and entry.state in (ENTRY_STATE_LOADED, ENTRY_STATE_SETUP_RETRY)
):
self.hass.async_create_task(
self.hass.config_entries.async_reload(entry.entry_id)
)
raise data_entry_flow.AbortFlow("already_configured")
async def async_set_unique_id(
self, unique_id: Optional[str] = None, *, raise_on_progress: bool = True
) -> Optional[ConfigEntry]:
"""Set a unique ID for the config flow.
Returns optionally existing config entry with same ID.
"""
if unique_id is None:
self.context["unique_id"] = None # pylint: disable=no-member
return None
if raise_on_progress:
for progress in self._async_in_progress():
if progress["context"].get("unique_id") == unique_id:
raise data_entry_flow.AbortFlow("already_in_progress")
self.context["unique_id"] = unique_id # pylint: disable=no-member
# Abort discoveries done using the default discovery unique id
assert self.hass is not None
if unique_id != DEFAULT_DISCOVERY_UNIQUE_ID:
for progress in self._async_in_progress():
if progress["context"].get("unique_id") == DEFAULT_DISCOVERY_UNIQUE_ID:
self.hass.config_entries.flow.async_abort(progress["flow_id"])
for entry in self._async_current_entries():
if entry.unique_id == unique_id:
return entry
return None
@callback
def _async_current_entries(self) -> List[ConfigEntry]:
"""Return current entries."""
assert self.hass is not None
return self.hass.config_entries.async_entries(self.handler)
@callback
def _async_current_ids(self, include_ignore: bool = True) -> Set[Optional[str]]:
"""Return current unique IDs."""
assert self.hass is not None
return {
entry.unique_id
for entry in self.hass.config_entries.async_entries(self.handler)
if include_ignore or entry.source != SOURCE_IGNORE
}
@callback
def _async_in_progress(self) -> List[Dict]:
"""Return other in progress flows for current domain."""
assert self.hass is not None
return [
flw
for flw in self.hass.config_entries.flow.async_progress()
if flw["handler"] == self.handler and flw["flow_id"] != self.flow_id
]
async def async_step_ignore(self, user_input: Dict[str, Any]) -> Dict[str, Any]:
"""Ignore this config flow."""
await self.async_set_unique_id(user_input["unique_id"], raise_on_progress=False)
return self.async_create_entry(title="Ignored", data={})
async def async_step_unignore(self, user_input: Dict[str, Any]) -> Dict[str, Any]:
"""Rediscover a config entry by it's unique_id."""
return self.async_abort(reason="not_implemented")
async def async_step_user(
self, user_input: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""Handle a flow initiated by the user."""
return self.async_abort(reason="not_implemented")
async def _async_handle_discovery_without_unique_id(self) -> None:
"""Mark this flow discovered, without a unique identifier.
If a flow initiated by discovery, doesn't have a unique ID, this can
be used alternatively. It will ensure only 1 flow is started and only
when the handler has no existing config entries.
It ensures that the discovery can be ignored by the user.
"""
if self.unique_id is not None:
return
# Abort if the handler has config entries already
if self._async_current_entries():
raise data_entry_flow.AbortFlow("already_configured")
# Use an special unique id to differentiate
await self.async_set_unique_id(DEFAULT_DISCOVERY_UNIQUE_ID)
self._abort_if_unique_id_configured()
# Abort if any other flow for this handler is already in progress
assert self.hass is not None
if self._async_in_progress():
raise data_entry_flow.AbortFlow("already_in_progress")
async def async_step_discovery(
self, discovery_info: Dict[str, Any]
) -> Dict[str, Any]:
"""Handle a flow initialized by discovery."""
await self._async_handle_discovery_without_unique_id()
return await self.async_step_user()
async_step_hassio = async_step_discovery
async_step_homekit = async_step_discovery
async_step_ssdp = async_step_discovery
async_step_zeroconf = async_step_discovery
class OptionsFlowManager(data_entry_flow.FlowManager):
"""Flow to set options for a configuration entry."""
async def async_create_flow(
self,
handler_key: Any,
*,
context: Optional[Dict[str, Any]] = None,
data: Optional[Dict[str, Any]] = None,
) -> "OptionsFlow":
"""Create an options flow for a config entry.
Entry_id and flow.handler is the same thing to map entry with flow.
"""
entry = self.hass.config_entries.async_get_entry(handler_key)
if entry is None:
raise UnknownEntry(handler_key)
if entry.domain not in HANDLERS:
raise data_entry_flow.UnknownHandler
return cast(OptionsFlow, HANDLERS[entry.domain].async_get_options_flow(entry))
async def async_finish_flow(
self, flow: data_entry_flow.FlowHandler, result: Dict[str, Any]
) -> Dict[str, Any]:
"""Finish an options flow and update options for configuration entry.
Flow.handler and entry_id is the same thing to map flow with entry.
"""
flow = cast(OptionsFlow, flow)
entry = self.hass.config_entries.async_get_entry(flow.handler)
if entry is None:
raise UnknownEntry(flow.handler)
if result["data"] is not None:
self.hass.config_entries.async_update_entry(entry, options=result["data"])
result["result"] = True
return result
class OptionsFlow(data_entry_flow.FlowHandler):
"""Base class for config option flows."""
handler: str
@attr.s(slots=True)
class SystemOptions:
"""Config entry system options."""
disable_new_entities: bool = attr.ib(default=False)
def update(self, *, disable_new_entities: bool) -> None:
"""Update properties."""
self.disable_new_entities = disable_new_entities
def as_dict(self) -> Dict[str, Any]:
"""Return dictionary version of this config entries system options."""
return {"disable_new_entities": self.disable_new_entities}
class EntityRegistryDisabledHandler:
"""Handler to handle when entities related to config entries updating disabled_by."""
RELOAD_AFTER_UPDATE_DELAY = 30
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize the handler."""
self.hass = hass
self.registry: Optional[entity_registry.EntityRegistry] = None
self.changed: Set[str] = set()
self._remove_call_later: Optional[Callable[[], None]] = None
@callback
def async_setup(self) -> None:
"""Set up the disable handler."""
self.hass.bus.async_listen(
entity_registry.EVENT_ENTITY_REGISTRY_UPDATED, self._handle_entry_updated
)
async def _handle_entry_updated(self, event: Event) -> None:
"""Handle entity registry entry update."""
if (
event.data["action"] != "update"
or "disabled_by" not in event.data["changes"]
):
return
if self.registry is None:
self.registry = await entity_registry.async_get_registry(self.hass)
entity_entry = self.registry.async_get(event.data["entity_id"])
if (
# Stop if no entry found
entity_entry is None
# Stop if entry not connected to config entry
or entity_entry.config_entry_id is None
# Stop if the entry got disabled. In that case the entity handles it
# themselves.
or entity_entry.disabled_by
):
return
config_entry = self.hass.config_entries.async_get_entry(
entity_entry.config_entry_id
)
assert config_entry is not None
if config_entry.entry_id not in self.changed and await support_entry_unload(
self.hass, config_entry.domain
):
self.changed.add(config_entry.entry_id)
if not self.changed:
return
# We are going to delay reloading on *every* entity registry change so that
# if a user is happily clicking along, it will only reload at the end.
if self._remove_call_later:
self._remove_call_later()
self._remove_call_later = self.hass.helpers.event.async_call_later(
self.RELOAD_AFTER_UPDATE_DELAY, self._handle_reload
)
async def _handle_reload(self, _now: Any) -> None:
"""Handle a reload."""
self._remove_call_later = None
to_reload = self.changed
self.changed = set()
_LOGGER.info(
"Reloading configuration entries because disabled_by changed in entity registry: %s",
", ".join(self.changed),
)
await asyncio.gather(
*[self.hass.config_entries.async_reload(entry_id) for entry_id in to_reload]
)
async def support_entry_unload(hass: HomeAssistant, domain: str) -> bool:
"""Test if a domain supports entry unloading."""
integration = await loader.async_get_integration(hass, domain)
component = integration.get_component()
return hasattr(component, "async_unload_entry")
|
the-stack_106_13822
|
# =============================================================================
# SOURCE CODE
# """"
# p = subprocess.Popen(["python", "index.py"])
# returncode = p.wait()
# print ("Process ID of subprocess %s" % p.pid)
#
# # Send SIGTER (on Linux)
# p.terminate()
# # Wait for process to terminate
# returncode = p.wait()
# print ("Returncode of subprocess: %s" % returncode)
#
# subprocess.call(['python', 'father.py'])
# =============================================================================
import subprocess
import time
import psutil
import pandas as pd
import autoswitches
master = pd.read_csv('dbs\master.csv')
orgs = list(master['names'])
machs = list(master['machines'])
def kill(proc_pid):
process = psutil.Process(proc_pid)
for proc in process.children(recursive=True):
proc.kill()
process.kill()
def setup():
files=["father.py","master_init.py","mother_adp.py","mother_agents.py", "mother_map.py","mother_xmls.py"]
for file in files:
p = subprocess.Popen(["python",file])
if (file=='index.py'):
#print(f"{file} done")
#print("starting view....")
break
returncode = p.wait()
#print(f"{file} done")
setup()
autoswitches.startAgent()
##print("starting agents...")
time.sleep(3+len(orgs)/2)
autoswitches.startAdps()
#print("starting adapters...")
#def killall():
# for i in pidList:
# kill(i)
#
#while True:
# cmd = input("What?")
# if (cmd=='kill'):
# p.terminate()
# returncode = p.wait()
# autoswitches.stopAgent()
# print ("stopping view")
# break
# else:
# print("invalid cmd")
|
the-stack_106_13823
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from resource_management import *
from shared_initialization import *
class BeforeStartHook(Hook):
def hook(self, env):
import params
self.run_custom_hook('before-ANY')
self.run_custom_hook('after-INSTALL')
env.set_params(params)
setup_hadoop()
setup_database()
setup_configs()
create_javahome_symlink()
if __name__ == "__main__":
BeforeStartHook().execute()
|
the-stack_106_13824
|
import click, json, os, sys
import cw.server
@click.command(short_help="get metadata of a workflow")
@click.argument("workflow-id", required=True, nargs=1)
def metadata_cmd(workflow_id):
"""
Get Workflow Metadata
"""
server = cw.server.server_factory()
url = f"{server.url()}/api/workflows/v1/{workflow_id}/metadata?excludeKey=submittedFiles&expandSubWorkflows=true"
response = server.query(url)
if not response or not response.ok:
sys.stderr.write(f"Failed to get response from server at {url}\n")
return 3
sys.stdout.write(f"{json.dumps(json.loads(response.content.decode()), indent=4)}")
|
the-stack_106_13825
|
import os
import socket
import ssl
import sys
import threading
from collections import namedtuple
from http.server import HTTPServer
from socketserver import ThreadingMixIn
from urllib.request import _parse_proxy
from .modifier import RequestModifier
from .storage import RequestStorage
class BoundedThreadingMixin(ThreadingMixIn):
"""Mix-in class that allows for a maximum number of threads to handle requests."""
def __init__(self, max_threads, *args, **kwargs):
self.sema = threading.BoundedSemaphore(value=max_threads)
super().__init__(*args, **kwargs)
def process_request_thread(self, request, client_address):
super().process_request_thread(request, client_address)
self.sema.release()
def process_request(self, request, client_address):
t = threading.Thread(target=self.process_request_thread,
args=(request, client_address))
t.daemon = self.daemon_threads
if not t.daemon and self._block_on_close:
if self._threads is None:
self._threads = []
self._threads.append(t)
self.sema.acquire()
t.start()
class ProxyHTTPServer(BoundedThreadingMixin, HTTPServer):
address_family = socket.AF_INET
daemon_threads = True
def __init__(self, *args, proxy_config=None, options=None, **kwargs):
# The server's upstream proxy configuration (if any)
self.proxy_config = self._sanitise_proxy_config(
self._merge_with_env(proxy_config or {}))
# Additional configuration
self.options = options or {}
# Used to stored captured requests
self.storage = RequestStorage(
base_dir=self.options.pop('request_storage_base_dir', None)
)
# Used to modify requests/responses passing through the server
self.modifier = RequestModifier()
# The scope of requests we're interested in capturing.
self.scopes = []
super().__init__(self.options.get('max_threads', 9999), *args, **kwargs)
def _merge_with_env(self, proxy_config):
"""Merge upstream proxy configuration with configuration loaded
from the environment.
"""
http_proxy = os.environ.get('HTTP_PROXY')
https_proxy = os.environ.get('HTTPS_PROXY')
no_proxy = os.environ.get('NO_PROXY')
merged = {}
if http_proxy:
merged['http'] = http_proxy
if https_proxy:
merged['https'] = https_proxy
if no_proxy:
merged['no_proxy'] = no_proxy
merged.update(proxy_config)
return merged
def _sanitise_proxy_config(self, proxy_config):
"""Parse the proxy configuration into something more usable."""
conf = namedtuple('ProxyConf', 'scheme username password hostport')
for proxy_type in ('http', 'https'):
# Parse the upstream proxy URL into (scheme, username, password, hostport)
# for ease of access.
if proxy_config.get(proxy_type) is not None:
proxy_config[proxy_type] = conf(*_parse_proxy(proxy_config[proxy_type]))
return proxy_config
def shutdown(self):
super().shutdown()
self.storage.cleanup()
def handle_error(self, request, client_address):
# Suppress socket/ssl related errors
cls, e = sys.exc_info()[:2]
if issubclass(cls, socket.error) or issubclass(cls, ssl.SSLError):
pass
else:
return HTTPServer.handle_error(self, request, client_address)
|
the-stack_106_13827
|
import datetime
import hashlib
import itertools
import logging
import os
import time
from collections import defaultdict
from dataclasses import asdict, dataclass, field
from operator import itemgetter
from typing import (
AbstractSet,
Any,
Callable,
Collection,
Dict,
Iterable,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Union,
)
import django.db.utils
import orjson
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.core.files import File
from django.db import IntegrityError, connection, transaction
from django.db.models import Count, Exists, F, OuterRef, Q, Sum
from django.db.models.query import QuerySet
from django.utils.html import escape
from django.utils.timezone import now as timezone_now
from django.utils.translation import gettext as _
from django.utils.translation import override as override_language
from psycopg2.extras import execute_values
from psycopg2.sql import SQL
from typing_extensions import TypedDict
from analytics.lib.counts import COUNT_STATS, RealmCount, do_increment_logging_stat
from analytics.models import StreamCount
from confirmation import settings as confirmation_settings
from confirmation.models import (
Confirmation,
confirmation_url,
create_confirmation_link,
generate_key,
)
from zerver.decorator import statsd_increment
from zerver.lib import retention as retention
from zerver.lib.addressee import Addressee
from zerver.lib.alert_words import (
add_user_alert_words,
get_alert_word_automaton,
remove_user_alert_words,
)
from zerver.lib.avatar import avatar_url, avatar_url_from_dict
from zerver.lib.bot_config import ConfigError, get_bot_config, get_bot_configs, set_bot_config
from zerver.lib.bulk_create import bulk_create_users
from zerver.lib.cache import (
bot_dict_fields,
cache_delete,
cache_delete_many,
cache_set,
cache_set_many,
cache_with_key,
delete_user_profile_caches,
display_recipient_cache_key,
flush_user_profile,
to_dict_cache_key_id,
user_profile_by_api_key_cache_key,
user_profile_delivery_email_cache_key,
)
from zerver.lib.create_user import create_user, get_display_email_address
from zerver.lib.email_mirror_helpers import encode_email_address, encode_email_address_helper
from zerver.lib.email_notifications import enqueue_welcome_emails
from zerver.lib.email_validation import (
email_reserved_for_system_bots_error,
get_existing_user_errors,
get_realm_email_validator,
validate_email_is_valid,
)
from zerver.lib.emoji import check_emoji_request, emoji_name_to_emoji_code, get_emoji_file_name
from zerver.lib.exceptions import (
InvitationError,
JsonableError,
MarkdownRenderingException,
StreamDoesNotExistError,
StreamWithIDDoesNotExistError,
ZephyrMessageAlreadySentException,
)
from zerver.lib.export import get_realm_exports_serialized
from zerver.lib.external_accounts import DEFAULT_EXTERNAL_ACCOUNTS
from zerver.lib.hotspots import get_next_hotspots
from zerver.lib.i18n import get_language_name
from zerver.lib.markdown import MessageRenderingResult, topic_links
from zerver.lib.markdown import version as markdown_version
from zerver.lib.mention import MentionData
from zerver.lib.message import (
MessageDict,
SendMessageRequest,
access_message,
get_last_message_id,
normalize_body,
render_markdown,
truncate_topic,
update_first_visible_message_id,
wildcard_mention_allowed,
)
from zerver.lib.notification_data import UserMessageNotificationsData, get_user_group_mentions_data
from zerver.lib.pysa import mark_sanitized
from zerver.lib.queue import queue_json_publish
from zerver.lib.realm_icon import realm_icon_url
from zerver.lib.realm_logo import get_realm_logo_data
from zerver.lib.retention import move_messages_to_archive
from zerver.lib.send_email import (
FromAddress,
clear_scheduled_emails,
clear_scheduled_invitation_emails,
send_email,
send_email_to_admins,
)
from zerver.lib.server_initialization import create_internal_realm, server_initialized
from zerver.lib.sessions import delete_user_sessions
from zerver.lib.storage import static_path
from zerver.lib.stream_subscription import (
SubInfo,
bulk_get_private_peers,
bulk_get_subscriber_peer_info,
get_active_subscriptions_for_stream_id,
get_bulk_stream_subscriber_info,
get_stream_subscriptions_for_user,
get_stream_subscriptions_for_users,
get_subscribed_stream_ids_for_user,
get_subscriptions_for_send_message,
get_user_ids_for_streams,
num_subscribers_for_stream_id,
subscriber_ids_with_stream_history_access,
)
from zerver.lib.stream_topic import StreamTopicTarget
from zerver.lib.streams import (
access_stream_by_id,
access_stream_for_send_message,
can_access_stream_user_ids,
check_stream_access_based_on_stream_post_policy,
check_stream_name,
create_stream_if_needed,
get_default_value_for_history_public_to_subscribers,
render_stream_description,
send_stream_creation_event,
subscribed_to_stream,
)
from zerver.lib.timestamp import datetime_to_timestamp, timestamp_to_datetime
from zerver.lib.timezone import canonicalize_timezone
from zerver.lib.topic import (
LEGACY_PREV_TOPIC,
ORIG_TOPIC,
TOPIC_LINKS,
TOPIC_NAME,
filter_by_exact_message_topic,
filter_by_topic_name_via_message,
save_message_for_edit_use_case,
update_edit_history,
update_messages_for_topic_edit,
)
from zerver.lib.topic_mutes import add_topic_mute, get_topic_mutes, remove_topic_mute
from zerver.lib.types import ProfileFieldData
from zerver.lib.upload import (
claim_attachment,
delete_avatar_image,
delete_export_tarball,
delete_message_image,
upload_emoji_image,
)
from zerver.lib.user_groups import access_user_group_by_id, create_user_group
from zerver.lib.user_mutes import add_user_mute, get_muting_users, get_user_mutes
from zerver.lib.user_status import update_user_status
from zerver.lib.users import (
check_bot_name_available,
check_full_name,
format_user_row,
get_api_key,
user_profile_to_user_row,
)
from zerver.lib.utils import generate_api_key, log_statsd_event
from zerver.lib.validator import check_widget_content
from zerver.lib.widget import do_widget_post_save_actions, is_widget_message
from zerver.models import (
Attachment,
Client,
CustomProfileField,
CustomProfileFieldValue,
DefaultStream,
DefaultStreamGroup,
EmailChangeStatus,
Message,
MultiuseInvite,
MutedUser,
PreregistrationUser,
Reaction,
Realm,
RealmAuditLog,
RealmDomain,
RealmEmoji,
RealmFilter,
RealmPlayground,
Recipient,
ScheduledEmail,
ScheduledMessage,
Service,
Stream,
SubMessage,
Subscription,
UserActivity,
UserActivityInterval,
UserGroup,
UserGroupMembership,
UserHotspot,
UserMessage,
UserPresence,
UserProfile,
UserStatus,
active_non_guest_user_ids,
active_user_ids,
custom_profile_fields_for_realm,
filter_to_valid_prereg_users,
get_active_streams,
get_bot_dicts_in_realm,
get_bot_services,
get_client,
get_default_stream_groups,
get_huddle_recipient,
get_huddle_user_ids,
get_old_unclaimed_attachments,
get_realm_playgrounds,
get_stream,
get_stream_by_id_in_realm,
get_stream_cache_key,
get_system_bot,
get_user_by_delivery_email,
get_user_by_id_in_realm_including_cross_realm,
get_user_profile_by_id,
is_cross_realm_bot_email,
linkifiers_for_realm,
query_for_ids,
realm_filters_for_realm,
validate_attachment_request,
)
from zerver.tornado.django_api import send_event
if settings.BILLING_ENABLED:
from corporate.lib.stripe import (
downgrade_now_without_creating_additional_invoices,
update_license_ledger_if_needed,
)
@dataclass
class SubscriptionInfo:
subscriptions: List[Dict[str, Any]]
unsubscribed: List[Dict[str, Any]]
never_subscribed: List[Dict[str, Any]]
ONBOARDING_TOTAL_MESSAGES = 1000
ONBOARDING_UNREAD_MESSAGES = 20
STREAM_ASSIGNMENT_COLORS = [
"#76ce90",
"#fae589",
"#a6c7e5",
"#e79ab5",
"#bfd56f",
"#f4ae55",
"#b0a5fd",
"#addfe5",
"#f5ce6e",
"#c2726a",
"#94c849",
"#bd86e5",
"#ee7e4a",
"#a6dcbf",
"#95a5fd",
"#53a063",
"#9987e1",
"#e4523d",
"#c2c2c2",
"#4f8de4",
"#c6a8ad",
"#e7cc4d",
"#c8bebf",
"#a47462",
]
def subscriber_info(user_id: int) -> Dict[str, Any]:
return {"id": user_id, "flags": ["read"]}
def bot_owner_user_ids(user_profile: UserProfile) -> Set[int]:
is_private_bot = (
user_profile.default_sending_stream
and user_profile.default_sending_stream.invite_only
or user_profile.default_events_register_stream
and user_profile.default_events_register_stream.invite_only
)
if is_private_bot:
return {user_profile.bot_owner_id}
else:
users = {user.id for user in user_profile.realm.get_human_admin_users()}
users.add(user_profile.bot_owner_id)
return users
def realm_user_count(realm: Realm) -> int:
return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False).count()
def realm_user_count_by_role(realm: Realm) -> Dict[str, Any]:
human_counts = {
str(UserProfile.ROLE_REALM_ADMINISTRATOR): 0,
str(UserProfile.ROLE_REALM_OWNER): 0,
str(UserProfile.ROLE_MODERATOR): 0,
str(UserProfile.ROLE_MEMBER): 0,
str(UserProfile.ROLE_GUEST): 0,
}
for value_dict in list(
UserProfile.objects.filter(realm=realm, is_bot=False, is_active=True)
.values("role")
.annotate(Count("role"))
):
human_counts[str(value_dict["role"])] = value_dict["role__count"]
bot_count = UserProfile.objects.filter(realm=realm, is_bot=True, is_active=True).count()
return {
RealmAuditLog.ROLE_COUNT_HUMANS: human_counts,
RealmAuditLog.ROLE_COUNT_BOTS: bot_count,
}
def get_signups_stream(realm: Realm) -> Stream:
# This one-liner helps us work around a lint rule.
return get_stream("signups", realm)
def send_message_to_signup_notification_stream(
sender: UserProfile, realm: Realm, message: str, topic_name: str = _("signups")
) -> None:
signup_notifications_stream = realm.get_signup_notifications_stream()
if signup_notifications_stream is None:
return
with override_language(realm.default_language):
internal_send_stream_message(sender, signup_notifications_stream, topic_name, message)
def notify_new_user(user_profile: UserProfile) -> None:
user_count = realm_user_count(user_profile.realm)
sender = get_system_bot(settings.NOTIFICATION_BOT)
is_first_user = user_count == 1
if not is_first_user:
message = _("{user} just signed up for Zulip. (total: {user_count})").format(
user=f"@_**{user_profile.full_name}|{user_profile.id}**", user_count=user_count
)
if settings.BILLING_ENABLED:
from corporate.lib.registration import generate_licenses_low_warning_message_if_required
licenses_low_warning_message = generate_licenses_low_warning_message_if_required(
user_profile.realm
)
if licenses_low_warning_message is not None:
message += "\n"
message += licenses_low_warning_message
send_message_to_signup_notification_stream(sender, user_profile.realm, message)
# We also send a notification to the Zulip administrative realm
admin_realm = sender.realm
try:
# Check whether the stream exists
signups_stream = get_signups_stream(admin_realm)
# We intentionally use the same strings as above to avoid translation burden.
message = _("{user} just signed up for Zulip. (total: {user_count})").format(
user=f"{user_profile.full_name} <`{user_profile.email}`>", user_count=user_count
)
internal_send_stream_message(
sender, signups_stream, user_profile.realm.display_subdomain, message
)
except Stream.DoesNotExist:
# If the signups stream hasn't been created in the admin
# realm, don't auto-create it to send to it; just do nothing.
pass
def notify_invites_changed(user_profile: UserProfile) -> None:
event = dict(type="invites_changed")
admin_ids = [user.id for user in user_profile.realm.get_admin_users_and_bots()]
send_event(user_profile.realm, event, admin_ids)
def add_new_user_history(user_profile: UserProfile, streams: Iterable[Stream]) -> None:
"""Give you the last ONBOARDING_TOTAL_MESSAGES messages on your public
streams, so you have something to look at in your home view once
you finish the tutorial. The most recent ONBOARDING_UNREAD_MESSAGES
are marked unread.
"""
one_week_ago = timezone_now() - datetime.timedelta(weeks=1)
recipient_ids = [stream.recipient_id for stream in streams if not stream.invite_only]
recent_messages = Message.objects.filter(
recipient_id__in=recipient_ids, date_sent__gt=one_week_ago
).order_by("-id")
message_ids_to_use = list(
reversed(recent_messages.values_list("id", flat=True)[0:ONBOARDING_TOTAL_MESSAGES])
)
if len(message_ids_to_use) == 0:
return
# Handle the race condition where a message arrives between
# bulk_add_subscriptions above and the Message query just above
already_ids = set(
UserMessage.objects.filter(
message_id__in=message_ids_to_use, user_profile=user_profile
).values_list("message_id", flat=True)
)
# Mark the newest ONBOARDING_UNREAD_MESSAGES as unread.
marked_unread = 0
ums_to_create = []
for message_id in reversed(message_ids_to_use):
if message_id in already_ids:
continue
um = UserMessage(user_profile=user_profile, message_id=message_id)
if marked_unread < ONBOARDING_UNREAD_MESSAGES:
marked_unread += 1
else:
um.flags = UserMessage.flags.read
ums_to_create.append(um)
UserMessage.objects.bulk_create(reversed(ums_to_create))
# Does the processing for a new user account:
# * Subscribes to default/invitation streams
# * Fills in some recent historical messages
# * Notifies other users in realm and Zulip about the signup
# * Deactivates PreregistrationUser objects
def process_new_human_user(
user_profile: UserProfile,
prereg_user: Optional[PreregistrationUser] = None,
default_stream_groups: Sequence[DefaultStreamGroup] = [],
realm_creation: bool = False,
) -> None:
realm = user_profile.realm
mit_beta_user = realm.is_zephyr_mirror_realm
if prereg_user is not None:
streams = prereg_user.streams.all()
acting_user: Optional[UserProfile] = prereg_user.referred_by
else:
streams = []
acting_user = None
# If the user's invitation didn't explicitly list some streams, we
# add the default streams
if len(streams) == 0:
streams = get_default_subs(user_profile)
for default_stream_group in default_stream_groups:
default_stream_group_streams = default_stream_group.streams.all()
for stream in default_stream_group_streams:
if stream not in streams:
streams.append(stream)
bulk_add_subscriptions(
realm,
streams,
[user_profile],
from_user_creation=True,
acting_user=acting_user,
)
add_new_user_history(user_profile, streams)
# mit_beta_users don't have a referred_by field
if not mit_beta_user and prereg_user is not None and prereg_user.referred_by is not None:
# This is a cross-realm private message.
with override_language(prereg_user.referred_by.default_language):
internal_send_private_message(
get_system_bot(settings.NOTIFICATION_BOT),
prereg_user.referred_by,
_("{user} accepted your invitation to join Zulip!").format(
user=f"{user_profile.full_name} <`{user_profile.email}`>"
),
)
revoke_preregistration_users(user_profile, prereg_user, realm_creation)
if not realm_creation and prereg_user is not None and prereg_user.referred_by is not None:
notify_invites_changed(user_profile)
notify_new_user(user_profile)
# Clear any scheduled invitation emails to prevent them
# from being sent after the user is created.
clear_scheduled_invitation_emails(user_profile.delivery_email)
if realm.send_welcome_emails:
enqueue_welcome_emails(user_profile, realm_creation)
# We have an import loop here; it's intentional, because we want
# to keep all the onboarding code in zerver/lib/onboarding.py.
from zerver.lib.onboarding import send_initial_pms
send_initial_pms(user_profile)
def revoke_preregistration_users(
created_user_profile: UserProfile,
used_preregistration_user: Optional[PreregistrationUser],
realm_creation: bool,
) -> None:
if used_preregistration_user is None:
assert not realm_creation, "realm_creation should only happen with a PreregistrationUser"
if used_preregistration_user is not None:
used_preregistration_user.status = confirmation_settings.STATUS_ACTIVE
used_preregistration_user.save(update_fields=["status"])
# In the special case of realm creation, there can be no additional PreregistrationUser
# for us to want to modify - because other realm_creation PreregistrationUsers should be
# left usable for creating different realms.
if realm_creation:
return
# Mark any other PreregistrationUsers in the realm that are STATUS_ACTIVE as
# inactive so we can keep track of the PreregistrationUser we
# actually used for analytics.
if used_preregistration_user is not None:
PreregistrationUser.objects.filter(
email__iexact=created_user_profile.delivery_email, realm=created_user_profile.realm
).exclude(id=used_preregistration_user.id).update(
status=confirmation_settings.STATUS_REVOKED
)
else:
PreregistrationUser.objects.filter(
email__iexact=created_user_profile.delivery_email, realm=created_user_profile.realm
).update(status=confirmation_settings.STATUS_REVOKED)
def notify_created_user(user_profile: UserProfile) -> None:
user_row = user_profile_to_user_row(user_profile)
person = format_user_row(
user_profile.realm,
user_profile,
user_row,
# Since we don't know what the client
# supports at this point in the code, we
# just assume client_gravatar and
# user_avatar_url_field_optional = False :(
client_gravatar=False,
user_avatar_url_field_optional=False,
# We assume there's no custom profile
# field data for a new user; initial
# values are expected to be added in a
# later event.
custom_profile_field_data={},
)
event: Dict[str, Any] = dict(type="realm_user", op="add", person=person)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def created_bot_event(user_profile: UserProfile) -> Dict[str, Any]:
def stream_name(stream: Optional[Stream]) -> Optional[str]:
if not stream:
return None
return stream.name
default_sending_stream_name = stream_name(user_profile.default_sending_stream)
default_events_register_stream_name = stream_name(user_profile.default_events_register_stream)
bot = dict(
email=user_profile.email,
user_id=user_profile.id,
full_name=user_profile.full_name,
bot_type=user_profile.bot_type,
is_active=user_profile.is_active,
api_key=get_api_key(user_profile),
default_sending_stream=default_sending_stream_name,
default_events_register_stream=default_events_register_stream_name,
default_all_public_streams=user_profile.default_all_public_streams,
avatar_url=avatar_url(user_profile),
services=get_service_dicts_for_bot(user_profile.id),
)
# Set the owner key only when the bot has an owner.
# The default bots don't have an owner. So don't
# set the owner key while reactivating them.
if user_profile.bot_owner is not None:
bot["owner_id"] = user_profile.bot_owner.id
return dict(type="realm_bot", op="add", bot=bot)
def notify_created_bot(user_profile: UserProfile) -> None:
event = created_bot_event(user_profile)
send_event(user_profile.realm, event, bot_owner_user_ids(user_profile))
def create_users(
realm: Realm, name_list: Iterable[Tuple[str, str]], bot_type: Optional[int] = None
) -> None:
user_set = set()
for full_name, email in name_list:
user_set.add((email, full_name, True))
bulk_create_users(realm, user_set, bot_type)
def do_create_user(
email: str,
password: Optional[str],
realm: Realm,
full_name: str,
bot_type: Optional[int] = None,
role: Optional[int] = None,
bot_owner: Optional[UserProfile] = None,
tos_version: Optional[str] = None,
timezone: str = "",
avatar_source: str = UserProfile.AVATAR_FROM_GRAVATAR,
default_sending_stream: Optional[Stream] = None,
default_events_register_stream: Optional[Stream] = None,
default_all_public_streams: Optional[bool] = None,
prereg_user: Optional[PreregistrationUser] = None,
default_stream_groups: Sequence[DefaultStreamGroup] = [],
source_profile: Optional[UserProfile] = None,
realm_creation: bool = False,
*,
acting_user: Optional[UserProfile],
) -> UserProfile:
user_profile = create_user(
email=email,
password=password,
realm=realm,
full_name=full_name,
role=role,
bot_type=bot_type,
bot_owner=bot_owner,
tos_version=tos_version,
timezone=timezone,
avatar_source=avatar_source,
default_sending_stream=default_sending_stream,
default_events_register_stream=default_events_register_stream,
default_all_public_streams=default_all_public_streams,
source_profile=source_profile,
)
event_time = user_profile.date_joined
if not acting_user:
acting_user = user_profile
RealmAuditLog.objects.create(
realm=user_profile.realm,
acting_user=acting_user,
modified_user=user_profile,
event_type=RealmAuditLog.USER_CREATED,
event_time=event_time,
extra_data=orjson.dumps(
{
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}
).decode(),
)
if realm_creation:
# If this user just created a realm, make sure they are
# properly tagged as the creator of the realm.
realm_creation_audit_log = (
RealmAuditLog.objects.filter(event_type=RealmAuditLog.REALM_CREATED, realm=realm)
.order_by("id")
.last()
)
realm_creation_audit_log.acting_user = user_profile
realm_creation_audit_log.save(update_fields=["acting_user"])
do_increment_logging_stat(
user_profile.realm,
COUNT_STATS["active_users_log:is_bot:day"],
user_profile.is_bot,
event_time,
)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
# Note that for bots, the caller will send an additional event
# with bot-specific info like services.
notify_created_user(user_profile)
if bot_type is None:
process_new_human_user(
user_profile,
prereg_user=prereg_user,
default_stream_groups=default_stream_groups,
realm_creation=realm_creation,
)
return user_profile
def do_activate_user(user_profile: UserProfile, *, acting_user: Optional[UserProfile]) -> None:
with transaction.atomic():
change_user_is_active(user_profile, True)
user_profile.is_mirror_dummy = False
user_profile.set_unusable_password()
user_profile.date_joined = timezone_now()
user_profile.tos_version = settings.TOS_VERSION
user_profile.save(
update_fields=["date_joined", "password", "is_mirror_dummy", "tos_version"]
)
event_time = user_profile.date_joined
RealmAuditLog.objects.create(
realm=user_profile.realm,
modified_user=user_profile,
acting_user=acting_user,
event_type=RealmAuditLog.USER_ACTIVATED,
event_time=event_time,
extra_data=orjson.dumps(
{
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}
).decode(),
)
do_increment_logging_stat(
user_profile.realm,
COUNT_STATS["active_users_log:is_bot:day"],
user_profile.is_bot,
event_time,
)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
notify_created_user(user_profile)
def do_reactivate_user(user_profile: UserProfile, *, acting_user: Optional[UserProfile]) -> None:
# Unlike do_activate_user, this is meant for re-activating existing users,
# so it doesn't reset their password, etc.
with transaction.atomic():
change_user_is_active(user_profile, True)
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
modified_user=user_profile,
acting_user=acting_user,
event_type=RealmAuditLog.USER_REACTIVATED,
event_time=event_time,
extra_data=orjson.dumps(
{
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}
).decode(),
)
do_increment_logging_stat(
user_profile.realm,
COUNT_STATS["active_users_log:is_bot:day"],
user_profile.is_bot,
event_time,
)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
notify_created_user(user_profile)
if user_profile.is_bot:
notify_created_bot(user_profile)
def active_humans_in_realm(realm: Realm) -> Sequence[UserProfile]:
return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False)
def do_set_realm_property(
realm: Realm, name: str, value: Any, *, acting_user: Optional[UserProfile]
) -> None:
"""Takes in a realm object, the name of an attribute to update, the
value to update and and the user who initiated the update.
"""
property_type = Realm.property_types[name]
assert isinstance(
value, property_type
), f"Cannot update {name}: {value} is not an instance of {property_type}"
old_value = getattr(realm, name)
setattr(realm, name, value)
realm.save(update_fields=[name])
event = dict(
type="realm",
op="update",
property=name,
value=value,
)
send_event(realm, event, active_user_ids(realm.id))
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_PROPERTY_CHANGED,
event_time=event_time,
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: value,
"property": name,
}
).decode(),
)
if name == "email_address_visibility":
if Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE not in [old_value, value]:
# We use real email addresses on UserProfile.email only if
# EMAIL_ADDRESS_VISIBILITY_EVERYONE is configured, so
# changes between values that will not require changing
# that field, so we can save work and return here.
return
user_profiles = UserProfile.objects.filter(realm=realm, is_bot=False)
for user_profile in user_profiles:
user_profile.email = get_display_email_address(user_profile)
# TODO: Design a bulk event for this or force-reload all clients
send_user_email_update_event(user_profile)
UserProfile.objects.bulk_update(user_profiles, ["email"])
for user_profile in user_profiles:
flush_user_profile(sender=UserProfile, instance=user_profile)
def do_set_realm_authentication_methods(
realm: Realm, authentication_methods: Dict[str, bool], *, acting_user: Optional[UserProfile]
) -> None:
old_value = realm.authentication_methods_dict()
for key, value in list(authentication_methods.items()):
index = getattr(realm.authentication_methods, key).number
realm.authentication_methods.set_bit(index, int(value))
realm.save(update_fields=["authentication_methods"])
updated_value = realm.authentication_methods_dict()
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_PROPERTY_CHANGED,
event_time=timezone_now(),
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: updated_value,
"property": "authentication_methods",
}
).decode(),
)
event = dict(
type="realm",
op="update_dict",
property="default",
data=dict(authentication_methods=updated_value),
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_message_editing(
realm: Realm,
allow_message_editing: bool,
message_content_edit_limit_seconds: int,
edit_topic_policy: int,
*,
acting_user: Optional[UserProfile],
) -> None:
old_values = dict(
allow_message_editing=realm.allow_message_editing,
message_content_edit_limit_seconds=realm.message_content_edit_limit_seconds,
edit_topic_policy=realm.edit_topic_policy,
)
realm.allow_message_editing = allow_message_editing
realm.message_content_edit_limit_seconds = message_content_edit_limit_seconds
realm.edit_topic_policy = edit_topic_policy
event_time = timezone_now()
updated_properties = dict(
allow_message_editing=allow_message_editing,
message_content_edit_limit_seconds=message_content_edit_limit_seconds,
edit_topic_policy=edit_topic_policy,
)
for updated_property, updated_value in updated_properties.items():
if updated_value == old_values[updated_property]:
continue
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_PROPERTY_CHANGED,
event_time=event_time,
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_values[updated_property],
RealmAuditLog.NEW_VALUE: updated_value,
"property": updated_property,
}
).decode(),
)
realm.save(update_fields=list(updated_properties.keys()))
event = dict(
type="realm",
op="update_dict",
property="default",
data=updated_properties,
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_notifications_stream(
realm: Realm, stream: Optional[Stream], stream_id: int, *, acting_user: Optional[UserProfile]
) -> None:
old_value = realm.notifications_stream_id
realm.notifications_stream = stream
realm.save(update_fields=["notifications_stream"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_PROPERTY_CHANGED,
event_time=event_time,
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: stream_id,
"property": "notifications_stream",
}
).decode(),
)
event = dict(
type="realm",
op="update",
property="notifications_stream_id",
value=stream_id,
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_signup_notifications_stream(
realm: Realm, stream: Optional[Stream], stream_id: int, *, acting_user: Optional[UserProfile]
) -> None:
old_value = realm.signup_notifications_stream_id
realm.signup_notifications_stream = stream
realm.save(update_fields=["signup_notifications_stream"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_PROPERTY_CHANGED,
event_time=event_time,
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: stream_id,
"property": "signup_notifications_stream",
}
).decode(),
)
event = dict(
type="realm",
op="update",
property="signup_notifications_stream_id",
value=stream_id,
)
send_event(realm, event, active_user_ids(realm.id))
def do_deactivate_realm(realm: Realm, *, acting_user: Optional[UserProfile]) -> None:
"""
Deactivate this realm. Do NOT deactivate the users -- we need to be able to
tell the difference between users that were intentionally deactivated,
e.g. by a realm admin, and users who can't currently use Zulip because their
realm has been deactivated.
"""
if realm.deactivated:
return
realm.deactivated = True
realm.save(update_fields=["deactivated"])
if settings.BILLING_ENABLED:
downgrade_now_without_creating_additional_invoices(realm)
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_DEACTIVATED,
event_time=event_time,
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(realm),
}
).decode(),
)
ScheduledEmail.objects.filter(realm=realm).delete()
for user in active_humans_in_realm(realm):
# Don't deactivate the users, but do delete their sessions so they get
# bumped to the login screen, where they'll get a realm deactivation
# notice when they try to log in.
delete_user_sessions(user)
# This event will only ever be received by clients with an active
# longpoll connection, because by this point clients will be
# unable to authenticate again to their event queue (triggering an
# immediate reload into the page explaining the realm was
# deactivated). So the purpose of sending this is to flush all
# active longpoll connections for the realm.
event = dict(type="realm", op="deactivated", realm_id=realm.id)
send_event(realm, event, active_user_ids(realm.id))
def do_reactivate_realm(realm: Realm) -> None:
realm.deactivated = False
realm.save(update_fields=["deactivated"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_REACTIVATED,
event_time=event_time,
extra_data=orjson.dumps(
{
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(realm),
}
).decode(),
)
def do_change_realm_subdomain(
realm: Realm, new_subdomain: str, *, acting_user: Optional[UserProfile]
) -> None:
old_subdomain = realm.subdomain
old_uri = realm.uri
realm.string_id = new_subdomain
realm.save(update_fields=["string_id"])
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_SUBDOMAIN_CHANGED,
event_time=timezone_now(),
acting_user=acting_user,
extra_data={"old_subdomain": old_subdomain, "new_subdomain": new_subdomain},
)
# If a realm if being renamed multiple times, we should find all the placeholder
# realms and reset their deactivated_redirect field to point to the new realm uri
placeholder_realms = Realm.objects.filter(deactivated_redirect=old_uri, deactivated=True)
for placeholder_realm in placeholder_realms:
do_add_deactivated_redirect(placeholder_realm, realm.uri)
# When we change a realm's subdomain the realm with old subdomain is basically
# deactivated. We are creating a deactivated realm using old subdomain and setting
# it's deactivated redirect to new_subdomain so that we can tell the users that
# the realm has been moved to a new subdomain.
placeholder_realm = do_create_realm(old_subdomain, "placeholder-realm")
do_deactivate_realm(placeholder_realm, acting_user=None)
do_add_deactivated_redirect(placeholder_realm, realm.uri)
def do_add_deactivated_redirect(realm: Realm, redirect_url: str) -> None:
realm.deactivated_redirect = redirect_url
realm.save(update_fields=["deactivated_redirect"])
def do_scrub_realm(realm: Realm, *, acting_user: Optional[UserProfile]) -> None:
if settings.BILLING_ENABLED:
downgrade_now_without_creating_additional_invoices(realm)
users = UserProfile.objects.filter(realm=realm)
for user in users:
do_delete_messages_by_sender(user)
do_delete_avatar_image(user, acting_user=acting_user)
user.full_name = f"Scrubbed {generate_key()[:15]}"
scrubbed_email = f"scrubbed-{generate_key()[:15]}@{realm.host}"
user.email = scrubbed_email
user.delivery_email = scrubbed_email
user.save(update_fields=["full_name", "email", "delivery_email"])
do_remove_realm_custom_profile_fields(realm)
Attachment.objects.filter(realm=realm).delete()
RealmAuditLog.objects.create(
realm=realm,
event_time=timezone_now(),
acting_user=acting_user,
event_type=RealmAuditLog.REALM_SCRUBBED,
)
def do_delete_user(user_profile: UserProfile) -> None:
if user_profile.realm.is_zephyr_mirror_realm:
raise AssertionError("Deleting zephyr mirror users is not supported")
do_deactivate_user(user_profile, acting_user=None)
subscribed_huddle_recipient_ids = set(
Subscription.objects.filter(
user_profile=user_profile, recipient__type=Recipient.HUDDLE
).values_list("recipient_id", flat=True)
)
user_id = user_profile.id
realm = user_profile.realm
personal_recipient = user_profile.recipient
user_profile.delete()
# Recipient objects don't get deleted through CASCADE, so we need to handle
# the user's personal recipient manually. This will also delete all Messages pointing
# to this recipient (all private messages sent to the user).
personal_recipient.delete()
replacement_user = create_user(
force_id=user_id,
email=f"deleteduser{user_id}@{realm.uri}",
password=None,
realm=realm,
full_name=f"Deleted User {user_id}",
is_mirror_dummy=True,
)
subs_to_recreate = [
Subscription(
user_profile=replacement_user,
recipient=recipient,
is_user_active=replacement_user.is_active,
)
for recipient in Recipient.objects.filter(id__in=subscribed_huddle_recipient_ids)
]
Subscription.objects.bulk_create(subs_to_recreate)
def change_user_is_active(user_profile: UserProfile, value: bool) -> None:
"""
Helper function for changing the .is_active field. Not meant as a standalone function
in production code as properly activating/deactivating users requires more steps.
This changes the is_active value and saves it, while ensuring
Subscription.is_user_active values are updated in the same db transaction.
"""
with transaction.atomic(savepoint=False):
user_profile.is_active = value
user_profile.save(update_fields=["is_active"])
Subscription.objects.filter(user_profile=user_profile).update(is_user_active=value)
def get_active_bots_owned_by_user(user_profile: UserProfile) -> QuerySet:
return UserProfile.objects.filter(is_bot=True, is_active=True, bot_owner=user_profile)
def do_deactivate_user(
user_profile: UserProfile, _cascade: bool = True, *, acting_user: Optional[UserProfile]
) -> None:
if not user_profile.is_active:
return
if _cascade:
# We need to deactivate bots before the target user, to ensure
# that a failure partway through this function cannot result
# in only the user being deactivated.
bot_profiles = get_active_bots_owned_by_user(user_profile)
for profile in bot_profiles:
do_deactivate_user(profile, _cascade=False, acting_user=acting_user)
with transaction.atomic():
if user_profile.realm.is_zephyr_mirror_realm: # nocoverage
# For zephyr mirror users, we need to make them a mirror dummy
# again; otherwise, other users won't get the correct behavior
# when trying to send messages to this person inside Zulip.
#
# Ideally, we need to also ensure their zephyr mirroring bot
# isn't running, but that's a separate issue.
user_profile.is_mirror_dummy = True
user_profile.save(update_fields=["is_mirror_dummy"])
change_user_is_active(user_profile, False)
delete_user_sessions(user_profile)
clear_scheduled_emails([user_profile.id])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
modified_user=user_profile,
acting_user=acting_user,
event_type=RealmAuditLog.USER_DEACTIVATED,
event_time=event_time,
extra_data=orjson.dumps(
{
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}
).decode(),
)
do_increment_logging_stat(
user_profile.realm,
COUNT_STATS["active_users_log:is_bot:day"],
user_profile.is_bot,
event_time,
increment=-1,
)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
event = dict(
type="realm_user",
op="remove",
person=dict(user_id=user_profile.id, full_name=user_profile.full_name),
)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
if user_profile.is_bot:
event = dict(
type="realm_bot",
op="remove",
bot=dict(user_id=user_profile.id, full_name=user_profile.full_name),
)
send_event(user_profile.realm, event, bot_owner_user_ids(user_profile))
def do_deactivate_stream(
stream: Stream, log: bool = True, *, acting_user: Optional[UserProfile]
) -> None:
# We want to mark all messages in the to-be-deactivated stream as
# read for all users; otherwise they will pollute queries like
# "Get the user's first unread message". Since this can be an
# expensive operation, we do it via the deferred_work queue
# processor.
deferred_work_event = {
"type": "mark_stream_messages_as_read_for_everyone",
"stream_recipient_id": stream.recipient_id,
}
queue_json_publish("deferred_work", deferred_work_event)
# Get the affected user ids *before* we deactivate everybody.
affected_user_ids = can_access_stream_user_ids(stream)
get_active_subscriptions_for_stream_id(stream.id, include_deactivated_users=True).update(
active=False
)
was_invite_only = stream.invite_only
stream.deactivated = True
stream.invite_only = True
# Preserve as much as possible the original stream name while giving it a
# special prefix that both indicates that the stream is deactivated and
# frees up the original name for reuse.
old_name = stream.name
# Prepend a substring of the hashed stream ID to the new stream name
streamID = str(stream.id)
stream_id_hash_object = hashlib.sha512(streamID.encode("utf-8"))
hashed_stream_id = stream_id_hash_object.hexdigest()[0:7]
new_name = (hashed_stream_id + "!DEACTIVATED:" + old_name)[: Stream.MAX_NAME_LENGTH]
stream.name = new_name[: Stream.MAX_NAME_LENGTH]
stream.save(update_fields=["name", "deactivated", "invite_only"])
# If this is a default stream, remove it, properly sending a
# notification to browser clients.
if DefaultStream.objects.filter(realm_id=stream.realm_id, stream_id=stream.id).exists():
do_remove_default_stream(stream)
default_stream_groups_for_stream = DefaultStreamGroup.objects.filter(streams__id=stream.id)
for group in default_stream_groups_for_stream:
do_remove_streams_from_default_stream_group(stream.realm, group, [stream])
# Remove the old stream information from remote cache.
old_cache_key = get_stream_cache_key(old_name, stream.realm_id)
cache_delete(old_cache_key)
stream_dict = stream.to_dict()
stream_dict.update(dict(name=old_name, invite_only=was_invite_only))
event = dict(type="stream", op="delete", streams=[stream_dict])
send_event(stream.realm, event, affected_user_ids)
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=stream.realm,
acting_user=acting_user,
modified_stream=stream,
event_type=RealmAuditLog.STREAM_DEACTIVATED,
event_time=event_time,
)
def send_user_email_update_event(user_profile: UserProfile) -> None:
payload = dict(user_id=user_profile.id, new_email=user_profile.email)
send_event(
user_profile.realm,
dict(type="realm_user", op="update", person=payload),
active_user_ids(user_profile.realm_id),
)
def do_change_user_delivery_email(user_profile: UserProfile, new_email: str) -> None:
delete_user_profile_caches([user_profile])
user_profile.delivery_email = new_email
if user_profile.email_address_is_realm_public():
user_profile.email = new_email
user_profile.save(update_fields=["email", "delivery_email"])
else:
user_profile.save(update_fields=["delivery_email"])
# We notify just the target user (and eventually org admins, only
# when email_address_visibility=EMAIL_ADDRESS_VISIBILITY_ADMINS)
# about their new delivery email, since that field is private.
payload = dict(user_id=user_profile.id, delivery_email=new_email)
event = dict(type="realm_user", op="update", person=payload)
send_event(user_profile.realm, event, [user_profile.id])
if user_profile.avatar_source == UserProfile.AVATAR_FROM_GRAVATAR:
# If the user is using Gravatar to manage their email address,
# their Gravatar just changed, and we need to notify other
# clients.
notify_avatar_url_change(user_profile)
if user_profile.email_address_is_realm_public():
# Additionally, if we're also changing the publicly visible
# email, we send a new_email event as well.
send_user_email_update_event(user_profile)
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
acting_user=user_profile,
modified_user=user_profile,
event_type=RealmAuditLog.USER_EMAIL_CHANGED,
event_time=event_time,
)
def do_start_email_change_process(user_profile: UserProfile, new_email: str) -> None:
old_email = user_profile.delivery_email
obj = EmailChangeStatus.objects.create(
new_email=new_email,
old_email=old_email,
user_profile=user_profile,
realm=user_profile.realm,
)
activation_url = create_confirmation_link(obj, Confirmation.EMAIL_CHANGE)
from zerver.context_processors import common_context
context = common_context(user_profile)
context.update(
old_email=old_email,
new_email=new_email,
activate_url=activation_url,
)
language = user_profile.default_language
send_email(
"zerver/emails/confirm_new_email",
to_emails=[new_email],
from_name=FromAddress.security_email_from_name(language=language),
from_address=FromAddress.tokenized_no_reply_address(),
language=language,
context=context,
realm=user_profile.realm,
)
def compute_irc_user_fullname(email: str) -> str:
return email.split("@")[0] + " (IRC)"
def compute_jabber_user_fullname(email: str) -> str:
return email.split("@")[0] + " (XMPP)"
@cache_with_key(
lambda realm, email, f: user_profile_delivery_email_cache_key(email, realm),
timeout=3600 * 24 * 7,
)
def create_mirror_user_if_needed(
realm: Realm, email: str, email_to_fullname: Callable[[str], str]
) -> UserProfile:
try:
return get_user_by_delivery_email(email, realm)
except UserProfile.DoesNotExist:
try:
# Forge a user for this person
return create_user(
email=email,
password=None,
realm=realm,
full_name=email_to_fullname(email),
active=False,
is_mirror_dummy=True,
)
except IntegrityError:
return get_user_by_delivery_email(email, realm)
def render_incoming_message(
message: Message,
content: str,
user_ids: Set[int],
realm: Realm,
mention_data: Optional[MentionData] = None,
email_gateway: bool = False,
) -> MessageRenderingResult:
realm_alert_words_automaton = get_alert_word_automaton(realm)
try:
rendering_result = render_markdown(
message=message,
content=content,
realm=realm,
realm_alert_words_automaton=realm_alert_words_automaton,
mention_data=mention_data,
email_gateway=email_gateway,
)
except MarkdownRenderingException:
raise JsonableError(_("Unable to render message"))
return rendering_result
class RecipientInfoResult(TypedDict):
active_user_ids: Set[int]
online_push_user_ids: Set[int]
stream_email_user_ids: Set[int]
stream_push_user_ids: Set[int]
wildcard_mention_user_ids: Set[int]
muted_sender_user_ids: Set[int]
um_eligible_user_ids: Set[int]
long_term_idle_user_ids: Set[int]
default_bot_user_ids: Set[int]
service_bot_tuples: List[Tuple[int, int]]
def get_recipient_info(
*,
realm_id: int,
recipient: Recipient,
sender_id: int,
stream_topic: Optional[StreamTopicTarget],
possibly_mentioned_user_ids: AbstractSet[int] = set(),
possible_wildcard_mention: bool = True,
) -> RecipientInfoResult:
stream_push_user_ids: Set[int] = set()
stream_email_user_ids: Set[int] = set()
wildcard_mention_user_ids: Set[int] = set()
muted_sender_user_ids: Set[int] = get_muting_users(sender_id)
if recipient.type == Recipient.PERSONAL:
# The sender and recipient may be the same id, so
# de-duplicate using a set.
message_to_user_ids = list({recipient.type_id, sender_id})
assert len(message_to_user_ids) in [1, 2]
elif recipient.type == Recipient.STREAM:
# Anybody calling us w/r/t a stream message needs to supply
# stream_topic. We may eventually want to have different versions
# of this function for different message types.
assert stream_topic is not None
user_ids_muting_topic = stream_topic.user_ids_muting_topic()
subscription_rows = (
get_subscriptions_for_send_message(
realm_id=realm_id,
stream_id=stream_topic.stream_id,
possible_wildcard_mention=possible_wildcard_mention,
possibly_mentioned_user_ids=possibly_mentioned_user_ids,
)
.annotate(
user_profile_email_notifications=F(
"user_profile__enable_stream_email_notifications"
),
user_profile_push_notifications=F("user_profile__enable_stream_push_notifications"),
user_profile_wildcard_mentions_notify=F("user_profile__wildcard_mentions_notify"),
)
.values(
"user_profile_id",
"push_notifications",
"email_notifications",
"wildcard_mentions_notify",
"user_profile_email_notifications",
"user_profile_push_notifications",
"user_profile_wildcard_mentions_notify",
"is_muted",
)
.order_by("user_profile_id")
)
message_to_user_ids = [row["user_profile_id"] for row in subscription_rows]
def should_send(setting: str, row: Dict[str, Any]) -> bool:
# This implements the structure that the UserProfile stream notification settings
# are defaults, which can be overridden by the stream-level settings (if those
# values are not null).
if row["is_muted"]:
return False
if row["user_profile_id"] in user_ids_muting_topic:
return False
if row[setting] is not None:
return row[setting]
return row["user_profile_" + setting]
stream_push_user_ids = {
row["user_profile_id"]
for row in subscription_rows
# Note: muting a stream overrides stream_push_notify
if should_send("push_notifications", row)
}
stream_email_user_ids = {
row["user_profile_id"]
for row in subscription_rows
# Note: muting a stream overrides stream_email_notify
if should_send("email_notifications", row)
}
if possible_wildcard_mention:
# If there's a possible wildcard mention, we need to
# determine which users would receive a wildcard mention
# notification for this message should the message indeed
# contain a wildcard mention.
#
# We don't have separate values for push/email
# notifications here; at this stage, we're just
# determining whether this wildcard mention should be
# treated as a mention (and follow the user's mention
# notification preferences) or a normal message.
wildcard_mention_user_ids = {
row["user_profile_id"]
for row in subscription_rows
if should_send("wildcard_mentions_notify", row)
}
elif recipient.type == Recipient.HUDDLE:
message_to_user_ids = get_huddle_user_ids(recipient)
else:
raise ValueError("Bad recipient type")
message_to_user_id_set = set(message_to_user_ids)
user_ids = set(message_to_user_id_set)
# Important note: Because we haven't rendered Markdown yet, we
# don't yet know which of these possibly-mentioned users was
# actually mentioned in the message (in other words, the
# mention syntax might have been in a code block or otherwise
# escaped). `get_ids_for` will filter these extra user rows
# for our data structures not related to bots
user_ids |= possibly_mentioned_user_ids
if user_ids:
query = UserProfile.objects.filter(is_active=True).values(
"id",
"enable_online_push_notifications",
"is_bot",
"bot_type",
"long_term_idle",
)
# query_for_ids is fast highly optimized for large queries, and we
# need this codepath to be fast (it's part of sending messages)
query = query_for_ids(
query=query,
user_ids=sorted(user_ids),
field="id",
)
rows = list(query)
else:
# TODO: We should always have at least one user_id as a recipient
# of any message we send. Right now the exception to this
# rule is `notify_new_user`, which, at least in a possibly
# contrived test scenario, can attempt to send messages
# to an inactive bot. When we plug that hole, we can avoid
# this `else` clause and just `assert(user_ids)`.
#
# UPDATE: It's February 2020 (and a couple years after the above
# comment was written). We have simplified notify_new_user
# so that it should be a little easier to reason about.
# There is currently some cleanup to how we handle cross
# realm bots that is still under development. Once that
# effort is complete, we should be able to address this
# to-do.
rows = []
def get_ids_for(f: Callable[[Dict[str, Any]], bool]) -> Set[int]:
"""Only includes users on the explicit message to line"""
return {row["id"] for row in rows if f(row)} & message_to_user_id_set
def is_service_bot(row: Dict[str, Any]) -> bool:
return row["is_bot"] and (row["bot_type"] in UserProfile.SERVICE_BOT_TYPES)
active_user_ids = get_ids_for(lambda r: True)
online_push_user_ids = get_ids_for(
lambda r: r["enable_online_push_notifications"],
)
# Service bots don't get UserMessage rows.
um_eligible_user_ids = get_ids_for(
lambda r: not is_service_bot(r),
)
long_term_idle_user_ids = get_ids_for(
lambda r: r["long_term_idle"],
)
# These two bot data structures need to filter from the full set
# of users who either are receiving the message or might have been
# mentioned in it, and so can't use get_ids_for.
#
# Further in the do_send_messages code path, once
# `mentioned_user_ids` has been computed via Markdown, we'll filter
# these data structures for just those users who are either a
# direct recipient or were mentioned; for now, we're just making
# sure we have the data we need for that without extra database
# queries.
default_bot_user_ids = {
row["id"] for row in rows if row["is_bot"] and row["bot_type"] == UserProfile.DEFAULT_BOT
}
service_bot_tuples = [(row["id"], row["bot_type"]) for row in rows if is_service_bot(row)]
info: RecipientInfoResult = dict(
active_user_ids=active_user_ids,
online_push_user_ids=online_push_user_ids,
stream_push_user_ids=stream_push_user_ids,
stream_email_user_ids=stream_email_user_ids,
wildcard_mention_user_ids=wildcard_mention_user_ids,
muted_sender_user_ids=muted_sender_user_ids,
um_eligible_user_ids=um_eligible_user_ids,
long_term_idle_user_ids=long_term_idle_user_ids,
default_bot_user_ids=default_bot_user_ids,
service_bot_tuples=service_bot_tuples,
)
return info
def get_service_bot_events(
sender: UserProfile,
service_bot_tuples: List[Tuple[int, int]],
mentioned_user_ids: Set[int],
active_user_ids: Set[int],
recipient_type: int,
) -> Dict[str, List[Dict[str, Any]]]:
event_dict: Dict[str, List[Dict[str, Any]]] = defaultdict(list)
# Avoid infinite loops by preventing messages sent by bots from generating
# Service events.
if sender.is_bot:
return event_dict
def maybe_add_event(user_profile_id: int, bot_type: int) -> None:
if bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
queue_name = "outgoing_webhooks"
elif bot_type == UserProfile.EMBEDDED_BOT:
queue_name = "embedded_bots"
else:
logging.error(
"Unexpected bot_type for Service bot id=%s: %s",
user_profile_id,
bot_type,
)
return
is_stream = recipient_type == Recipient.STREAM
# Important note: service_bot_tuples may contain service bots
# who were not actually mentioned in the message (e.g. if
# mention syntax for that bot appeared in a code block).
# Thus, it is important to filter any users who aren't part of
# either mentioned_user_ids (the actual mentioned users) or
# active_user_ids (the actual recipients).
#
# So even though this is implied by the logic below, we filter
# these not-actually-mentioned users here, to help keep this
# function future-proof.
if user_profile_id not in mentioned_user_ids and user_profile_id not in active_user_ids:
return
# Mention triggers, for stream messages
if is_stream and user_profile_id in mentioned_user_ids:
trigger = "mention"
# PM triggers for personal and huddle messages
elif (not is_stream) and (user_profile_id in active_user_ids):
trigger = "private_message"
else:
return
event_dict[queue_name].append(
{
"trigger": trigger,
"user_profile_id": user_profile_id,
}
)
for user_profile_id, bot_type in service_bot_tuples:
maybe_add_event(
user_profile_id=user_profile_id,
bot_type=bot_type,
)
return event_dict
def do_schedule_messages(send_message_requests: Sequence[SendMessageRequest]) -> List[int]:
scheduled_messages: List[ScheduledMessage] = []
for send_request in send_message_requests:
scheduled_message = ScheduledMessage()
scheduled_message.sender = send_request.message.sender
scheduled_message.recipient = send_request.message.recipient
topic_name = send_request.message.topic_name()
scheduled_message.set_topic_name(topic_name=topic_name)
scheduled_message.content = send_request.message.content
scheduled_message.sending_client = send_request.message.sending_client
scheduled_message.stream = send_request.stream
scheduled_message.realm = send_request.realm
assert send_request.deliver_at is not None
scheduled_message.scheduled_timestamp = send_request.deliver_at
if send_request.delivery_type == "send_later":
scheduled_message.delivery_type = ScheduledMessage.SEND_LATER
elif send_request.delivery_type == "remind":
scheduled_message.delivery_type = ScheduledMessage.REMIND
scheduled_messages.append(scheduled_message)
ScheduledMessage.objects.bulk_create(scheduled_messages)
return [scheduled_message.id for scheduled_message in scheduled_messages]
def build_message_send_dict(
message: Message,
stream: Optional[Stream] = None,
local_id: Optional[str] = None,
sender_queue_id: Optional[str] = None,
realm: Optional[Realm] = None,
widget_content_dict: Optional[Dict[str, Any]] = None,
email_gateway: bool = False,
) -> SendMessageRequest:
"""Returns a dictionary that can be passed into do_send_messages. In
production, this is always called by check_message, but some
testing code paths call it directly.
"""
if realm is None:
realm = message.sender.realm
mention_data = MentionData(
realm_id=realm.id,
content=message.content,
)
if message.is_stream_message():
stream_id = message.recipient.type_id
stream_topic: Optional[StreamTopicTarget] = StreamTopicTarget(
stream_id=stream_id,
topic_name=message.topic_name(),
)
else:
stream_topic = None
info = get_recipient_info(
realm_id=realm.id,
recipient=message.recipient,
sender_id=message.sender_id,
stream_topic=stream_topic,
possibly_mentioned_user_ids=mention_data.get_user_ids(),
possible_wildcard_mention=mention_data.message_has_wildcards(),
)
# Render our message_dicts.
assert message.rendered_content is None
rendering_result = render_incoming_message(
message,
message.content,
info["active_user_ids"],
realm,
mention_data=mention_data,
email_gateway=email_gateway,
)
message.rendered_content = rendering_result.rendered_content
message.rendered_content_version = markdown_version
links_for_embed = rendering_result.links_for_preview
mentioned_user_groups_map = get_user_group_mentions_data(
mentioned_user_ids=rendering_result.mentions_user_ids,
mentioned_user_group_ids=list(rendering_result.mentions_user_group_ids),
mention_data=mention_data,
)
# For single user as well as user group mentions, we set the `mentioned`
# flag on `UserMessage`
for group_id in rendering_result.mentions_user_group_ids:
members = mention_data.get_group_members(group_id)
rendering_result.mentions_user_ids.update(members)
# Only send data to Tornado about wildcard mentions if message
# rendering determined the message had an actual wildcard
# mention in it (and not e.g. wildcard mention syntax inside a
# code block).
if rendering_result.mentions_wildcard:
wildcard_mention_user_ids = info["wildcard_mention_user_ids"]
else:
wildcard_mention_user_ids = set()
"""
Once we have the actual list of mentioned ids from message
rendering, we can patch in "default bots" (aka normal bots)
who were directly mentioned in this message as eligible to
get UserMessage rows.
"""
mentioned_user_ids = rendering_result.mentions_user_ids
default_bot_user_ids = info["default_bot_user_ids"]
mentioned_bot_user_ids = default_bot_user_ids & mentioned_user_ids
info["um_eligible_user_ids"] |= mentioned_bot_user_ids
message_send_dict = SendMessageRequest(
stream=stream,
local_id=local_id,
sender_queue_id=sender_queue_id,
realm=realm,
mention_data=mention_data,
mentioned_user_groups_map=mentioned_user_groups_map,
message=message,
rendering_result=rendering_result,
active_user_ids=info["active_user_ids"],
online_push_user_ids=info["online_push_user_ids"],
stream_push_user_ids=info["stream_push_user_ids"],
stream_email_user_ids=info["stream_email_user_ids"],
muted_sender_user_ids=info["muted_sender_user_ids"],
um_eligible_user_ids=info["um_eligible_user_ids"],
long_term_idle_user_ids=info["long_term_idle_user_ids"],
default_bot_user_ids=info["default_bot_user_ids"],
service_bot_tuples=info["service_bot_tuples"],
wildcard_mention_user_ids=wildcard_mention_user_ids,
links_for_embed=links_for_embed,
widget_content=widget_content_dict,
)
return message_send_dict
def do_send_messages(
send_message_requests_maybe_none: Sequence[Optional[SendMessageRequest]],
email_gateway: bool = False,
mark_as_read: Sequence[int] = [],
) -> List[int]:
"""See
https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html
for high-level documentation on this subsystem.
"""
# Filter out messages which didn't pass internal_prep_message properly
send_message_requests = [
send_request
for send_request in send_message_requests_maybe_none
if send_request is not None
]
# Save the message receipts in the database
user_message_flags: Dict[int, Dict[int, List[str]]] = defaultdict(dict)
with transaction.atomic():
Message.objects.bulk_create(send_request.message for send_request in send_message_requests)
# Claim attachments in message
for send_request in send_message_requests:
if do_claim_attachments(
send_request.message, send_request.rendering_result.potential_attachment_path_ids
):
send_request.message.has_attachment = True
send_request.message.save(update_fields=["has_attachment"])
ums: List[UserMessageLite] = []
for send_request in send_message_requests:
# Service bots (outgoing webhook bots and embedded bots) don't store UserMessage rows;
# they will be processed later.
mentioned_user_ids = send_request.rendering_result.mentions_user_ids
# Extend the set with users who have muted the sender.
mark_as_read_for_users = send_request.muted_sender_user_ids
mark_as_read_for_users.update(mark_as_read)
user_messages = create_user_messages(
message=send_request.message,
rendering_result=send_request.rendering_result,
um_eligible_user_ids=send_request.um_eligible_user_ids,
long_term_idle_user_ids=send_request.long_term_idle_user_ids,
stream_push_user_ids=send_request.stream_push_user_ids,
stream_email_user_ids=send_request.stream_email_user_ids,
mentioned_user_ids=mentioned_user_ids,
mark_as_read_for_users=mark_as_read_for_users,
)
for um in user_messages:
user_message_flags[send_request.message.id][um.user_profile_id] = um.flags_list()
ums.extend(user_messages)
send_request.message.service_queue_events = get_service_bot_events(
sender=send_request.message.sender,
service_bot_tuples=send_request.service_bot_tuples,
mentioned_user_ids=mentioned_user_ids,
active_user_ids=send_request.active_user_ids,
recipient_type=send_request.message.recipient.type,
)
bulk_insert_ums(ums)
for send_request in send_message_requests:
do_widget_post_save_actions(send_request)
# This next loop is responsible for notifying other parts of the
# Zulip system about the messages we just committed to the database:
# * Notifying clients via send_event
# * Triggering outgoing webhooks via the service event queue.
# * Updating the `first_message_id` field for streams without any message history.
# * Implementing the Welcome Bot reply hack
# * Adding links to the embed_links queue for open graph processing.
for send_request in send_message_requests:
realm_id: Optional[int] = None
if send_request.message.is_stream_message():
if send_request.stream is None:
stream_id = send_request.message.recipient.type_id
send_request.stream = Stream.objects.select_related().get(id=stream_id)
# assert needed because stubs for django are missing
assert send_request.stream is not None
realm_id = send_request.stream.realm_id
# Deliver events to the real-time push system, as well as
# enqueuing any additional processing triggered by the message.
wide_message_dict = MessageDict.wide_dict(send_request.message, realm_id)
user_flags = user_message_flags.get(send_request.message.id, {})
"""
TODO: We may want to limit user_ids to only those users who have
UserMessage rows, if only for minor performance reasons.
For now we queue events for all subscribers/sendees of the
message, since downstream code may still do notifications
that don't require UserMessage rows.
Our automated tests have gotten better on this codepath,
but we may have coverage gaps, so we should be careful
about changing the next line.
"""
user_ids = send_request.active_user_ids | set(user_flags.keys())
sender_id = send_request.message.sender_id
# We make sure the sender is listed first in the `users` list;
# this results in the sender receiving the message first if
# there are thousands of recipients, decreasing perceived latency.
if sender_id in user_ids:
user_list = [sender_id] + list(user_ids - {sender_id})
else:
user_list = list(user_ids)
users: List[Dict[str, Union[int, List[str]]]] = []
for user_id in user_list:
flags = user_flags.get(user_id, [])
user_data = dict(id=user_id, flags=flags)
if user_id in send_request.mentioned_user_groups_map:
user_data["mentioned_user_group_id"] = send_request.mentioned_user_groups_map[
user_id
]
users.append(user_data)
sender = send_request.message.sender
message_type = wide_message_dict["type"]
active_users_data = [
UserMessageNotificationsData.from_user_id_sets(
user_id=user_id,
flags=user_flags.get(user_id, []),
online_push_user_ids=send_request.online_push_user_ids,
stream_push_user_ids=send_request.stream_push_user_ids,
stream_email_user_ids=send_request.stream_email_user_ids,
wildcard_mention_user_ids=send_request.wildcard_mention_user_ids,
muted_sender_user_ids=send_request.muted_sender_user_ids,
)
for user_id in send_request.active_user_ids
]
presence_idle_user_ids = get_active_presence_idle_user_ids(
realm=sender.realm,
sender_id=sender.id,
message_type=message_type,
active_users_data=active_users_data,
)
event = dict(
type="message",
message=send_request.message.id,
message_dict=wide_message_dict,
presence_idle_user_ids=presence_idle_user_ids,
online_push_user_ids=list(send_request.online_push_user_ids),
stream_push_user_ids=list(send_request.stream_push_user_ids),
stream_email_user_ids=list(send_request.stream_email_user_ids),
wildcard_mention_user_ids=list(send_request.wildcard_mention_user_ids),
muted_sender_user_ids=list(send_request.muted_sender_user_ids),
)
if send_request.message.is_stream_message():
# Note: This is where authorization for single-stream
# get_updates happens! We only attach stream data to the
# notify new_message request if it's a public stream,
# ensuring that in the tornado server, non-public stream
# messages are only associated to their subscribed users.
# assert needed because stubs for django are missing
assert send_request.stream is not None
if send_request.stream.is_public():
event["realm_id"] = send_request.stream.realm_id
event["stream_name"] = send_request.stream.name
if send_request.stream.invite_only:
event["invite_only"] = True
if send_request.stream.first_message_id is None:
send_request.stream.first_message_id = send_request.message.id
send_request.stream.save(update_fields=["first_message_id"])
if send_request.local_id is not None:
event["local_id"] = send_request.local_id
if send_request.sender_queue_id is not None:
event["sender_queue_id"] = send_request.sender_queue_id
send_event(send_request.realm, event, users)
if send_request.links_for_embed:
event_data = {
"message_id": send_request.message.id,
"message_content": send_request.message.content,
"message_realm_id": send_request.realm.id,
"urls": list(send_request.links_for_embed),
}
queue_json_publish("embed_links", event_data)
if send_request.message.recipient.type == Recipient.PERSONAL:
welcome_bot_id = get_system_bot(settings.WELCOME_BOT).id
if (
welcome_bot_id in send_request.active_user_ids
and welcome_bot_id != send_request.message.sender_id
):
from zerver.lib.onboarding import send_welcome_bot_response
send_welcome_bot_response(send_request)
for queue_name, events in send_request.message.service_queue_events.items():
for event in events:
queue_json_publish(
queue_name,
{
"message": wide_message_dict,
"trigger": event["trigger"],
"user_profile_id": event["user_profile_id"],
},
)
return [send_request.message.id for send_request in send_message_requests]
class UserMessageLite:
"""
The Django ORM is too slow for bulk operations. This class
is optimized for the simple use case of inserting a bunch of
rows into zerver_usermessage.
"""
def __init__(self, user_profile_id: int, message_id: int, flags: int) -> None:
self.user_profile_id = user_profile_id
self.message_id = message_id
self.flags = flags
def flags_list(self) -> List[str]:
return UserMessage.flags_list_for_flags(self.flags)
def create_user_messages(
message: Message,
rendering_result: MessageRenderingResult,
um_eligible_user_ids: AbstractSet[int],
long_term_idle_user_ids: AbstractSet[int],
stream_push_user_ids: AbstractSet[int],
stream_email_user_ids: AbstractSet[int],
mentioned_user_ids: AbstractSet[int],
mark_as_read_for_users: Set[int],
) -> List[UserMessageLite]:
# These properties on the Message are set via
# render_markdown by code in the Markdown inline patterns
ids_with_alert_words = rendering_result.user_ids_with_alert_words
sender_id = message.sender.id
is_stream_message = message.is_stream_message()
base_flags = 0
if rendering_result.mentions_wildcard:
base_flags |= UserMessage.flags.wildcard_mentioned
if message.recipient.type in [Recipient.HUDDLE, Recipient.PERSONAL]:
base_flags |= UserMessage.flags.is_private
# For long_term_idle (aka soft-deactivated) users, we are allowed
# to optimize by lazily not creating UserMessage rows that would
# have the default 0 flag set (since the soft-reactivation logic
# knows how to create those when the user comes back). We need to
# create the UserMessage rows for these long_term_idle users
# non-lazily in a few cases:
#
# * There are nonzero flags (e.g. the user was mentioned), since
# that case is rare and this saves a lot of complexity in
# soft-reactivation.
#
# * If the user is going to be notified (e.g. they get push/email
# notifications for every message on a stream), since in that
# case the notifications code will call `access_message` on the
# message to re-verify permissions, and for private streams,
# will get an error if the UserMessage row doesn't exist yet.
#
# See https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html#soft-deactivation
# for details on this system.
user_messages = []
for user_profile_id in um_eligible_user_ids:
flags = base_flags
if (
user_profile_id == sender_id and message.sent_by_human()
) or user_profile_id in mark_as_read_for_users:
flags |= UserMessage.flags.read
if user_profile_id in mentioned_user_ids:
flags |= UserMessage.flags.mentioned
if user_profile_id in ids_with_alert_words:
flags |= UserMessage.flags.has_alert_word
if (
user_profile_id in long_term_idle_user_ids
and user_profile_id not in stream_push_user_ids
and user_profile_id not in stream_email_user_ids
and is_stream_message
and int(flags) == 0
):
continue
um = UserMessageLite(
user_profile_id=user_profile_id,
message_id=message.id,
flags=flags,
)
user_messages.append(um)
return user_messages
def bulk_insert_ums(ums: List[UserMessageLite]) -> None:
"""
Doing bulk inserts this way is much faster than using Django,
since we don't have any ORM overhead. Profiling with 1000
users shows a speedup of 0.436 -> 0.027 seconds, so we're
talking about a 15x speedup.
"""
if not ums:
return
vals = [(um.user_profile_id, um.message_id, um.flags) for um in ums]
query = SQL(
"""
INSERT into
zerver_usermessage (user_profile_id, message_id, flags)
VALUES %s
"""
)
with connection.cursor() as cursor:
execute_values(cursor.cursor, query, vals)
def verify_submessage_sender(
*,
message_id: int,
message_sender_id: int,
submessage_sender_id: int,
) -> None:
"""Even though our submessage architecture is geared toward
collaboration among all message readers, we still enforce
the the first person to attach a submessage to the message
must be the original sender of the message.
"""
if message_sender_id == submessage_sender_id:
return
if SubMessage.objects.filter(
message_id=message_id,
sender_id=message_sender_id,
).exists():
return
raise JsonableError(_("You cannot attach a submessage to this message."))
def do_add_submessage(
realm: Realm,
sender_id: int,
message_id: int,
msg_type: str,
content: str,
) -> None:
"""Should be called while holding a SELECT FOR UPDATE lock
(e.g. via access_message(..., lock_message=True)) on the
Message row, to prevent race conditions.
"""
submessage = SubMessage(
sender_id=sender_id,
message_id=message_id,
msg_type=msg_type,
content=content,
)
submessage.save()
event = dict(
type="submessage",
msg_type=msg_type,
message_id=message_id,
submessage_id=submessage.id,
sender_id=sender_id,
content=content,
)
ums = UserMessage.objects.filter(message_id=message_id)
target_user_ids = [um.user_profile_id for um in ums]
transaction.on_commit(lambda: send_event(realm, event, target_user_ids))
def notify_reaction_update(
user_profile: UserProfile, message: Message, reaction: Reaction, op: str
) -> None:
user_dict = {
"user_id": user_profile.id,
"email": user_profile.email,
"full_name": user_profile.full_name,
}
event: Dict[str, Any] = {
"type": "reaction",
"op": op,
"user_id": user_profile.id,
# TODO: We plan to remove this redundant user_dict object once
# clients are updated to support accessing use user_id. See
# https://github.com/zulip/zulip/pull/14711 for details.
"user": user_dict,
"message_id": message.id,
"emoji_name": reaction.emoji_name,
"emoji_code": reaction.emoji_code,
"reaction_type": reaction.reaction_type,
}
# Update the cached message since new reaction is added.
update_to_dict_cache([message])
# Recipients for message update events, including reactions, are
# everyone who got the original message, plus subscribers of
# streams with the access to stream's full history.
#
# This means reactions won't live-update in preview narrows for a
# stream the user isn't yet subscribed to; this is the right
# performance tradeoff to avoid sending every reaction to public
# stream messages to all users.
#
# To ensure that reactions do live-update for any user who has
# actually participated in reacting to a message, we add a
# "historical" UserMessage row for any user who reacts to message,
# subscribing them to future notifications, even if they are not
# subscribed to the stream.
user_ids = set(
UserMessage.objects.filter(message=message.id).values_list("user_profile_id", flat=True)
)
if message.recipient.type == Recipient.STREAM:
stream_id = message.recipient.type_id
stream = Stream.objects.get(id=stream_id)
user_ids |= subscriber_ids_with_stream_history_access(stream)
transaction.on_commit(lambda: send_event(user_profile.realm, event, list(user_ids)))
def do_add_reaction(
user_profile: UserProfile,
message: Message,
emoji_name: str,
emoji_code: str,
reaction_type: str,
) -> None:
"""Should be called while holding a SELECT FOR UPDATE lock
(e.g. via access_message(..., lock_message=True)) on the
Message row, to prevent race conditions.
"""
reaction = Reaction(
user_profile=user_profile,
message=message,
emoji_name=emoji_name,
emoji_code=emoji_code,
reaction_type=reaction_type,
)
reaction.save()
notify_reaction_update(user_profile, message, reaction, "add")
def check_add_reaction(
user_profile: UserProfile,
message_id: int,
emoji_name: str,
emoji_code: Optional[str],
reaction_type: Optional[str],
) -> None:
message, user_message = access_message(user_profile, message_id, lock_message=True)
if emoji_code is None:
# The emoji_code argument is only required for rare corner
# cases discussed in the long block comment below. For simple
# API clients, we allow specifying just the name, and just
# look up the code using the current name->code mapping.
emoji_code = emoji_name_to_emoji_code(message.sender.realm, emoji_name)[0]
if reaction_type is None:
reaction_type = emoji_name_to_emoji_code(message.sender.realm, emoji_name)[1]
if Reaction.objects.filter(
user_profile=user_profile,
message=message,
emoji_code=emoji_code,
reaction_type=reaction_type,
).exists():
raise JsonableError(_("Reaction already exists."))
query = Reaction.objects.filter(
message=message, emoji_code=emoji_code, reaction_type=reaction_type
)
if query.exists():
# If another user has already reacted to this message with
# same emoji code, we treat the new reaction as a vote for the
# existing reaction. So the emoji name used by that earlier
# reaction takes precedence over whatever was passed in this
# request. This is necessary to avoid a message having 2
# "different" emoji reactions with the same emoji code (and
# thus same image) on the same message, which looks ugly.
#
# In this "voting for an existing reaction" case, we shouldn't
# check whether the emoji code and emoji name match, since
# it's possible that the (emoji_type, emoji_name, emoji_code)
# triple for this existing rection xmay not pass validation
# now (e.g. because it is for a realm emoji that has been
# since deactivated). We still want to allow users to add a
# vote any old reaction they see in the UI even if that is a
# deactivated custom emoji, so we just use the emoji name from
# the existing reaction with no further validation.
emoji_name = query.first().emoji_name
else:
# Otherwise, use the name provided in this request, but verify
# it is valid in the user's realm (e.g. not a deactivated
# realm emoji).
check_emoji_request(user_profile.realm, emoji_name, emoji_code, reaction_type)
if user_message is None:
# Users can see and react to messages sent to streams they
# were not a subscriber to; in order to receive events for
# those, we give the user a `historical` UserMessage objects
# for the message. This is the same trick we use for starring
# messages.
UserMessage.objects.create(
user_profile=user_profile,
message=message,
flags=UserMessage.flags.historical | UserMessage.flags.read,
)
do_add_reaction(user_profile, message, emoji_name, emoji_code, reaction_type)
def do_remove_reaction(
user_profile: UserProfile, message: Message, emoji_code: str, reaction_type: str
) -> None:
"""Should be called while holding a SELECT FOR UPDATE lock
(e.g. via access_message(..., lock_message=True)) on the
Message row, to prevent race conditions.
"""
reaction = Reaction.objects.filter(
user_profile=user_profile,
message=message,
emoji_code=emoji_code,
reaction_type=reaction_type,
).get()
reaction.delete()
notify_reaction_update(user_profile, message, reaction, "remove")
def do_send_typing_notification(
realm: Realm, sender: UserProfile, recipient_user_profiles: List[UserProfile], operator: str
) -> None:
sender_dict = {"user_id": sender.id, "email": sender.email}
# Include a list of recipients in the event body to help identify where the typing is happening
recipient_dicts = [
{"user_id": profile.id, "email": profile.email} for profile in recipient_user_profiles
]
event = dict(
type="typing",
message_type="private",
op=operator,
sender=sender_dict,
recipients=recipient_dicts,
)
# Only deliver the notification to active user recipients
user_ids_to_notify = [user.id for user in recipient_user_profiles if user.is_active]
send_event(realm, event, user_ids_to_notify)
# check_send_typing_notification:
# Checks the typing notification and sends it
def check_send_typing_notification(sender: UserProfile, user_ids: List[int], operator: str) -> None:
realm = sender.realm
if sender.id not in user_ids:
user_ids.append(sender.id)
# If any of the user_ids being sent in are invalid, we will
# just reject the whole request, since a partial list of user_ids
# can create confusion related to huddles. Plus it's a good
# sign that a client is confused (or possibly even malicious) if
# we get bad user_ids.
user_profiles = []
for user_id in user_ids:
try:
# We include cross-bot realms as possible recipients,
# so that clients can know which huddle conversation
# is relevant here.
user_profile = get_user_by_id_in_realm_including_cross_realm(user_id, sender.realm)
except UserProfile.DoesNotExist:
raise JsonableError(_("Invalid user ID {}").format(user_id))
user_profiles.append(user_profile)
do_send_typing_notification(
realm=realm,
sender=sender,
recipient_user_profiles=user_profiles,
operator=operator,
)
def do_send_stream_typing_notification(
sender: UserProfile, operator: str, stream: Stream, topic: str
) -> None:
sender_dict = {"user_id": sender.id, "email": sender.email}
event = dict(
type="typing",
message_type="stream",
op=operator,
sender=sender_dict,
stream_id=stream.id,
topic=topic,
)
user_ids_to_notify = get_user_ids_for_streams({stream.id})[stream.id]
send_event(sender.realm, event, user_ids_to_notify)
def ensure_stream(
realm: Realm,
stream_name: str,
invite_only: bool = False,
stream_description: str = "",
*,
acting_user: Optional[UserProfile],
) -> Stream:
return create_stream_if_needed(
realm,
stream_name,
invite_only=invite_only,
stream_description=stream_description,
acting_user=acting_user,
)[0]
def get_recipient_from_user_profiles(
recipient_profiles: Sequence[UserProfile],
forwarded_mirror_message: bool,
forwarder_user_profile: Optional[UserProfile],
sender: UserProfile,
) -> Recipient:
# Avoid mutating the passed in list of recipient_profiles.
recipient_profiles_map = {user_profile.id: user_profile for user_profile in recipient_profiles}
if forwarded_mirror_message:
# In our mirroring integrations with some third-party
# protocols, bots subscribed to the third-party protocol
# forward to Zulip messages that they received in the
# third-party service. The permissions model for that
# forwarding is that users can only submit to Zulip private
# messages they personally received, and here we do the check
# for whether forwarder_user_profile is among the private
# message recipients of the message.
assert forwarder_user_profile is not None
if forwarder_user_profile.id not in recipient_profiles_map:
raise ValidationError(_("User not authorized for this query"))
# If the private message is just between the sender and
# another person, force it to be a personal internally
if len(recipient_profiles_map) == 2 and sender.id in recipient_profiles_map:
del recipient_profiles_map[sender.id]
assert recipient_profiles_map
if len(recipient_profiles_map) == 1:
[user_profile] = recipient_profiles_map.values()
return user_profile.recipient
# Otherwise, we need a huddle. Make sure the sender is included in huddle messages
recipient_profiles_map[sender.id] = sender
user_ids = set(recipient_profiles_map)
return get_huddle_recipient(user_ids)
def validate_recipient_user_profiles(
user_profiles: Sequence[UserProfile], sender: UserProfile, allow_deactivated: bool = False
) -> Sequence[UserProfile]:
recipient_profiles_map: Dict[int, UserProfile] = {}
# We exempt cross-realm bots from the check that all the recipients
# are in the same realm.
realms = set()
if not is_cross_realm_bot_email(sender.email):
realms.add(sender.realm_id)
for user_profile in user_profiles:
if (
not user_profile.is_active
and not user_profile.is_mirror_dummy
and not allow_deactivated
) or user_profile.realm.deactivated:
raise ValidationError(
_("'{email}' is no longer using Zulip.").format(email=user_profile.email)
)
recipient_profiles_map[user_profile.id] = user_profile
if not is_cross_realm_bot_email(user_profile.email):
realms.add(user_profile.realm_id)
if len(realms) > 1:
raise ValidationError(_("You can't send private messages outside of your organization."))
return list(recipient_profiles_map.values())
def recipient_for_user_profiles(
user_profiles: Sequence[UserProfile],
forwarded_mirror_message: bool,
forwarder_user_profile: Optional[UserProfile],
sender: UserProfile,
allow_deactivated: bool = False,
) -> Recipient:
recipient_profiles = validate_recipient_user_profiles(
user_profiles, sender, allow_deactivated=allow_deactivated
)
return get_recipient_from_user_profiles(
recipient_profiles, forwarded_mirror_message, forwarder_user_profile, sender
)
def already_sent_mirrored_message_id(message: Message) -> Optional[int]:
if message.recipient.type == Recipient.HUDDLE:
# For huddle messages, we use a 10-second window because the
# timestamps aren't guaranteed to actually match between two
# copies of the same message.
time_window = datetime.timedelta(seconds=10)
else:
time_window = datetime.timedelta(seconds=0)
query = Message.objects.filter(
sender=message.sender,
recipient=message.recipient,
content=message.content,
sending_client=message.sending_client,
date_sent__gte=message.date_sent - time_window,
date_sent__lte=message.date_sent + time_window,
)
messages = filter_by_exact_message_topic(
query=query,
message=message,
)
if messages.exists():
return messages[0].id
return None
def extract_stream_indicator(s: str) -> Union[str, int]:
# Users can pass stream name as either an id or a name,
# and if they choose to pass a name, they may JSON encode
# it for legacy reasons.
try:
data = orjson.loads(s)
except orjson.JSONDecodeError:
# If there was no JSON encoding, then we just
# have a raw stream name.
return s
# We should stop supporting this odd use case
# once we improve our documentation.
if isinstance(data, list):
if len(data) != 1: # nocoverage
raise JsonableError(_("Expected exactly one stream"))
data = data[0]
if isinstance(data, str):
# We had a JSON-encoded stream name.
return data
if isinstance(data, int):
# We had a stream id.
return data
raise JsonableError(_("Invalid data type for stream"))
def extract_private_recipients(s: str) -> Union[List[str], List[int]]:
# We try to accept multiple incoming formats for recipients.
# See test_extract_recipients() for examples of what we allow.
try:
data = orjson.loads(s)
except orjson.JSONDecodeError:
data = s
if isinstance(data, str):
data = data.split(",")
if not isinstance(data, list):
raise JsonableError(_("Invalid data type for recipients"))
if not data:
# We don't complain about empty message recipients here
return data
if isinstance(data[0], str):
return get_validated_emails(data)
if not isinstance(data[0], int):
raise JsonableError(_("Invalid data type for recipients"))
return get_validated_user_ids(data)
def get_validated_user_ids(user_ids: Collection[int]) -> List[int]:
for user_id in user_ids:
if not isinstance(user_id, int):
raise JsonableError(_("Recipient lists may contain emails or user IDs, but not both."))
return list(set(user_ids))
def get_validated_emails(emails: Collection[str]) -> List[str]:
for email in emails:
if not isinstance(email, str):
raise JsonableError(_("Recipient lists may contain emails or user IDs, but not both."))
return list(filter(bool, {email.strip() for email in emails}))
def check_send_stream_message(
sender: UserProfile,
client: Client,
stream_name: str,
topic: str,
body: str,
realm: Optional[Realm] = None,
) -> int:
addressee = Addressee.for_stream_name(stream_name, topic)
message = check_message(sender, client, addressee, body, realm)
return do_send_messages([message])[0]
def check_send_stream_message_by_id(
sender: UserProfile,
client: Client,
stream_id: int,
topic: str,
body: str,
realm: Optional[Realm] = None,
) -> int:
addressee = Addressee.for_stream_id(stream_id, topic)
message = check_message(sender, client, addressee, body, realm)
return do_send_messages([message])[0]
def check_send_private_message(
sender: UserProfile, client: Client, receiving_user: UserProfile, body: str
) -> int:
addressee = Addressee.for_user_profile(receiving_user)
message = check_message(sender, client, addressee, body)
return do_send_messages([message])[0]
# check_send_message:
# Returns the id of the sent message. Has same argspec as check_message.
def check_send_message(
sender: UserProfile,
client: Client,
message_type_name: str,
message_to: Union[Sequence[int], Sequence[str]],
topic_name: Optional[str],
message_content: str,
realm: Optional[Realm] = None,
forged: bool = False,
forged_timestamp: Optional[float] = None,
forwarder_user_profile: Optional[UserProfile] = None,
local_id: Optional[str] = None,
sender_queue_id: Optional[str] = None,
widget_content: Optional[str] = None,
*,
skip_stream_access_check: bool = False,
) -> int:
addressee = Addressee.legacy_build(sender, message_type_name, message_to, topic_name)
try:
message = check_message(
sender,
client,
addressee,
message_content,
realm,
forged,
forged_timestamp,
forwarder_user_profile,
local_id,
sender_queue_id,
widget_content,
skip_stream_access_check=skip_stream_access_check,
)
except ZephyrMessageAlreadySentException as e:
return e.message_id
return do_send_messages([message])[0]
def check_schedule_message(
sender: UserProfile,
client: Client,
message_type_name: str,
message_to: Union[Sequence[str], Sequence[int]],
topic_name: Optional[str],
message_content: str,
delivery_type: str,
deliver_at: datetime.datetime,
realm: Optional[Realm] = None,
forwarder_user_profile: Optional[UserProfile] = None,
) -> int:
addressee = Addressee.legacy_build(sender, message_type_name, message_to, topic_name)
send_request = check_message(
sender,
client,
addressee,
message_content,
realm=realm,
forwarder_user_profile=forwarder_user_profile,
)
send_request.deliver_at = deliver_at
send_request.delivery_type = delivery_type
recipient = send_request.message.recipient
if delivery_type == "remind" and (
recipient.type != Recipient.STREAM and recipient.type_id != sender.id
):
raise JsonableError(_("Reminders can only be set for streams."))
return do_schedule_messages([send_request])[0]
def validate_message_edit_payload(
message: Message,
stream_id: Optional[int],
topic_name: Optional[str],
propagate_mode: Optional[str],
content: Optional[str],
) -> None:
"""
Checks that the data sent is well-formed. Does not handle editability, permissions etc.
"""
if topic_name is None and content is None and stream_id is None:
raise JsonableError(_("Nothing to change"))
if not message.is_stream_message():
if stream_id is not None:
raise JsonableError(_("Private messages cannot be moved to streams."))
if topic_name is not None:
raise JsonableError(_("Private messages cannot have topics."))
if propagate_mode != "change_one" and topic_name is None and stream_id is None:
raise JsonableError(_("Invalid propagate_mode without topic edit"))
if topic_name == "":
raise JsonableError(_("Topic can't be empty"))
if stream_id is not None and content is not None:
raise JsonableError(_("Cannot change message content while changing stream"))
# Right now, we prevent users from editing widgets.
if content is not None and is_widget_message(message):
raise JsonableError(_("Widgets cannot be edited."))
def can_edit_content_or_topic(
message: Message,
user_profile: UserProfile,
is_no_topic_msg: bool,
content: Optional[str] = None,
topic_name: Optional[str] = None,
) -> bool:
# You have permission to edit the message (both content and topic) if you sent it.
if message.sender_id == user_profile.id:
return True
# You cannot edit the content of message sent by someone else.
if content is not None:
return False
assert topic_name is not None
# The following cases are the various reasons a user might be
# allowed to edit topics.
# We allow anyone to edit (no topic) messages to help tend them.
if is_no_topic_msg:
return True
# The can_edit_topic_of_any_message helper returns whether the user can edit the topic
# or not based on edit_topic_policy setting and the user's role.
if user_profile.can_edit_topic_of_any_message():
return True
return False
def check_update_message(
user_profile: UserProfile,
message_id: int,
stream_id: Optional[int] = None,
topic_name: Optional[str] = None,
propagate_mode: Optional[str] = "change_one",
send_notification_to_old_thread: bool = True,
send_notification_to_new_thread: bool = True,
content: Optional[str] = None,
) -> int:
"""This will update a message given the message id and user profile.
It checks whether the user profile has the permission to edit the message
and raises a JsonableError if otherwise.
It returns the number changed.
"""
message, ignored_user_message = access_message(user_profile, message_id)
if not user_profile.realm.allow_message_editing:
raise JsonableError(_("Your organization has turned off message editing"))
# The zerver/views/message_edit.py callpoint already strips this
# via REQ_topic; so we can delete this line if we arrange a
# contract where future callers in the embedded bots system strip
# use REQ_topic as well (or otherwise are guaranteed to strip input).
if topic_name is not None:
topic_name = topic_name.strip()
if topic_name == message.topic_name():
topic_name = None
validate_message_edit_payload(message, stream_id, topic_name, propagate_mode, content)
is_no_topic_msg = message.topic_name() == "(no topic)"
if content is not None or topic_name is not None:
if not can_edit_content_or_topic(
message, user_profile, is_no_topic_msg, content, topic_name
):
raise JsonableError(_("You don't have permission to edit this message"))
# If there is a change to the content, check that it hasn't been too long
# Allow an extra 20 seconds since we potentially allow editing 15 seconds
# past the limit, and in case there are network issues, etc. The 15 comes
# from (min_seconds_to_edit + seconds_left_buffer) in message_edit.js; if
# you change this value also change those two parameters in message_edit.js.
edit_limit_buffer = 20
if content is not None and user_profile.realm.message_content_edit_limit_seconds > 0:
deadline_seconds = user_profile.realm.message_content_edit_limit_seconds + edit_limit_buffer
if (timezone_now() - message.date_sent) > datetime.timedelta(seconds=deadline_seconds):
raise JsonableError(_("The time limit for editing this message has passed"))
# If there is a change to the topic, check that the user is allowed to
# edit it and that it has not been too long. If this is not the user who
# sent the message, they are not the admin, and the time limit for editing
# topics is passed, raise an error.
if (
topic_name is not None
and message.sender != user_profile
and not user_profile.is_realm_admin
and not user_profile.is_moderator
and not is_no_topic_msg
):
deadline_seconds = Realm.DEFAULT_COMMUNITY_TOPIC_EDITING_LIMIT_SECONDS + edit_limit_buffer
if (timezone_now() - message.date_sent) > datetime.timedelta(seconds=deadline_seconds):
raise JsonableError(_("The time limit for editing this message's topic has passed"))
rendering_result = None
links_for_embed: Set[str] = set()
prior_mention_user_ids: Set[int] = set()
mention_data: Optional[MentionData] = None
if content is not None:
if content.rstrip() == "":
content = "(deleted)"
content = normalize_body(content)
mention_data = MentionData(
realm_id=user_profile.realm.id,
content=content,
)
user_info = get_user_info_for_message_updates(message.id)
prior_mention_user_ids = user_info["mention_user_ids"]
# We render the message using the current user's realm; since
# the cross-realm bots never edit messages, this should be
# always correct.
# Note: If rendering fails, the called code will raise a JsonableError.
rendering_result = render_incoming_message(
message,
content,
user_info["message_user_ids"],
user_profile.realm,
mention_data=mention_data,
)
links_for_embed |= rendering_result.links_for_preview
new_stream = None
number_changed = 0
if stream_id is not None:
assert message.is_stream_message()
if not user_profile.can_move_messages_between_streams():
raise JsonableError(_("You don't have permission to move this message"))
try:
access_stream_by_id(user_profile, message.recipient.type_id)
except JsonableError:
raise JsonableError(
_(
"You don't have permission to move this message due to missing access to its stream"
)
)
new_stream = access_stream_by_id(user_profile, stream_id, require_active=True)[0]
check_stream_access_based_on_stream_post_policy(user_profile, new_stream)
number_changed = do_update_message(
user_profile,
message,
new_stream,
topic_name,
propagate_mode,
send_notification_to_old_thread,
send_notification_to_new_thread,
content,
rendering_result,
prior_mention_user_ids,
mention_data,
)
if links_for_embed:
event_data = {
"message_id": message.id,
"message_content": message.content,
# The choice of `user_profile.realm_id` rather than
# `sender.realm_id` must match the decision made in the
# `render_incoming_message` call earlier in this function.
"message_realm_id": user_profile.realm_id,
"urls": list(links_for_embed),
}
queue_json_publish("embed_links", event_data)
return number_changed
def check_default_stream_group_name(group_name: str) -> None:
if group_name.strip() == "":
raise JsonableError(_("Invalid default stream group name '{}'").format(group_name))
if len(group_name) > DefaultStreamGroup.MAX_NAME_LENGTH:
raise JsonableError(
_("Default stream group name too long (limit: {} characters)").format(
DefaultStreamGroup.MAX_NAME_LENGTH,
)
)
for i in group_name:
if ord(i) == 0:
raise JsonableError(
_("Default stream group name '{}' contains NULL (0x00) characters.").format(
group_name,
)
)
def send_rate_limited_pm_notification_to_bot_owner(
sender: UserProfile, realm: Realm, content: str
) -> None:
"""
Sends a PM error notification to a bot's owner if one hasn't already
been sent in the last 5 minutes.
"""
if sender.realm.is_zephyr_mirror_realm or sender.realm.deactivated:
return
if not sender.is_bot or sender.bot_owner is None:
return
# Don't send these notifications for cross-realm bot messages
# (e.g. from EMAIL_GATEWAY_BOT) since the owner for
# EMAIL_GATEWAY_BOT is probably the server administrator, not
# the owner of the bot who could potentially fix the problem.
if sender.realm != realm:
return
# We warn the user once every 5 minutes to avoid a flood of
# PMs on a misconfigured integration, re-using the
# UserProfile.last_reminder field, which is not used for bots.
last_reminder = sender.last_reminder
waitperiod = datetime.timedelta(minutes=UserProfile.BOT_OWNER_STREAM_ALERT_WAITPERIOD)
if last_reminder and timezone_now() - last_reminder <= waitperiod:
return
internal_send_private_message(
get_system_bot(settings.NOTIFICATION_BOT), sender.bot_owner, content
)
sender.last_reminder = timezone_now()
sender.save(update_fields=["last_reminder"])
def send_pm_if_empty_stream(
stream: Optional[Stream],
realm: Realm,
sender: UserProfile,
stream_name: Optional[str] = None,
stream_id: Optional[int] = None,
) -> None:
"""If a bot sends a message to a stream that doesn't exist or has no
subscribers, sends a notification to the bot owner (if not a
cross-realm bot) so that the owner can correct the issue."""
if not sender.is_bot or sender.bot_owner is None:
return
arg_dict = {
"bot_identity": f"`{sender.delivery_email}`",
"stream_id": stream_id,
"stream_name": f"#**{stream_name}**",
"new_stream_link": "#streams/new",
}
if sender.bot_owner is not None:
with override_language(sender.bot_owner.default_language):
if stream is None:
if stream_id is not None:
content = _(
"Your bot {bot_identity} tried to send a message to stream ID "
"{stream_id}, but there is no stream with that ID."
).format(**arg_dict)
else:
assert stream_name is not None
content = _(
"Your bot {bot_identity} tried to send a message to stream "
"{stream_name}, but that stream does not exist. "
"Click [here]({new_stream_link}) to create it."
).format(**arg_dict)
else:
if num_subscribers_for_stream_id(stream.id) > 0:
return
content = _(
"Your bot {bot_identity} tried to send a message to "
"stream {stream_name}. The stream exists but "
"does not have any subscribers."
).format(**arg_dict)
send_rate_limited_pm_notification_to_bot_owner(sender, realm, content)
def validate_stream_name_with_pm_notification(
stream_name: str, realm: Realm, sender: UserProfile
) -> Stream:
stream_name = stream_name.strip()
check_stream_name(stream_name)
try:
stream = get_stream(stream_name, realm)
send_pm_if_empty_stream(stream, realm, sender)
except Stream.DoesNotExist:
send_pm_if_empty_stream(None, realm, sender, stream_name=stream_name)
raise StreamDoesNotExistError(escape(stream_name))
return stream
def validate_stream_id_with_pm_notification(
stream_id: int, realm: Realm, sender: UserProfile
) -> Stream:
try:
stream = get_stream_by_id_in_realm(stream_id, realm)
send_pm_if_empty_stream(stream, realm, sender)
except Stream.DoesNotExist:
send_pm_if_empty_stream(None, realm, sender, stream_id=stream_id)
raise StreamWithIDDoesNotExistError(stream_id)
return stream
def check_private_message_policy(
realm: Realm, sender: UserProfile, user_profiles: Sequence[UserProfile]
) -> None:
if realm.private_message_policy == Realm.PRIVATE_MESSAGE_POLICY_DISABLED:
if sender.is_bot or (len(user_profiles) == 1 and user_profiles[0].is_bot):
# We allow PMs only between users and bots, to avoid
# breaking the tutorial as well as automated
# notifications from system bots to users.
return
raise JsonableError(_("Private messages are disabled in this organization."))
# check_message:
# Returns message ready for sending with do_send_message on success or the error message (string) on error.
def check_message(
sender: UserProfile,
client: Client,
addressee: Addressee,
message_content_raw: str,
realm: Optional[Realm] = None,
forged: bool = False,
forged_timestamp: Optional[float] = None,
forwarder_user_profile: Optional[UserProfile] = None,
local_id: Optional[str] = None,
sender_queue_id: Optional[str] = None,
widget_content: Optional[str] = None,
email_gateway: bool = False,
*,
skip_stream_access_check: bool = False,
) -> SendMessageRequest:
"""See
https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html
for high-level documentation on this subsystem.
"""
stream = None
message_content = normalize_body(message_content_raw)
if realm is None:
realm = sender.realm
if addressee.is_stream():
topic_name = addressee.topic()
topic_name = truncate_topic(topic_name)
stream_name = addressee.stream_name()
stream_id = addressee.stream_id()
if stream_name is not None:
stream = validate_stream_name_with_pm_notification(stream_name, realm, sender)
elif stream_id is not None:
stream = validate_stream_id_with_pm_notification(stream_id, realm, sender)
else:
stream = addressee.stream()
assert stream is not None
# To save a database round trip, we construct the Recipient
# object for the Stream rather than fetching it from the
# database using the stream.recipient foreign key.
#
# This is simpler than ensuring that code paths that fetch a
# Stream that will be used for sending a message have a
# `select_related("recipient"), which would also needlessly
# expand Stream objects in memory (all the fields of Recipient
# are already known given the Stream object).
recipient = Recipient(
id=stream.recipient_id,
type_id=stream.id,
type=Recipient.STREAM,
)
if not skip_stream_access_check:
access_stream_for_send_message(
sender=sender, stream=stream, forwarder_user_profile=forwarder_user_profile
)
else:
# Defensive assertion - the only currently supported use case
# for this option is for outgoing webhook bots and since this
# is security-sensitive code, it's beneficial to ensure nothing
# else can sneak past the access check.
assert sender.bot_type == sender.OUTGOING_WEBHOOK_BOT
elif addressee.is_private():
user_profiles = addressee.user_profiles()
mirror_message = client and client.name in [
"zephyr_mirror",
"irc_mirror",
"jabber_mirror",
"JabberMirror",
]
check_private_message_policy(realm, sender, user_profiles)
# API super-users who set the `forged` flag are allowed to
# forge messages sent by any user, so we disable the
# `forwarded_mirror_message` security check in that case.
forwarded_mirror_message = mirror_message and not forged
try:
recipient = recipient_for_user_profiles(
user_profiles, forwarded_mirror_message, forwarder_user_profile, sender
)
except ValidationError as e:
assert isinstance(e.messages[0], str)
raise JsonableError(e.messages[0])
else:
# This is defensive code--Addressee already validates
# the message type.
raise AssertionError("Invalid message type")
message = Message()
message.sender = sender
message.content = message_content
message.recipient = recipient
if addressee.is_stream():
message.set_topic_name(topic_name)
if forged and forged_timestamp is not None:
# Forged messages come with a timestamp
message.date_sent = timestamp_to_datetime(forged_timestamp)
else:
message.date_sent = timezone_now()
message.sending_client = client
# We render messages later in the process.
assert message.rendered_content is None
if client.name == "zephyr_mirror":
id = already_sent_mirrored_message_id(message)
if id is not None:
raise ZephyrMessageAlreadySentException(id)
widget_content_dict = None
if widget_content is not None:
try:
widget_content_dict = orjson.loads(widget_content)
except orjson.JSONDecodeError:
raise JsonableError(_("Widgets: API programmer sent invalid JSON content"))
try:
check_widget_content(widget_content_dict)
except ValidationError as error:
raise JsonableError(
_("Widgets: {error_msg}").format(
error_msg=error.message,
)
)
message_send_dict = build_message_send_dict(
message=message,
stream=stream,
local_id=local_id,
sender_queue_id=sender_queue_id,
realm=realm,
widget_content_dict=widget_content_dict,
email_gateway=email_gateway,
)
if stream is not None and message_send_dict.rendering_result.mentions_wildcard:
if not wildcard_mention_allowed(sender, stream):
raise JsonableError(
_("You do not have permission to use wildcard mentions in this stream.")
)
return message_send_dict
def _internal_prep_message(
realm: Realm,
sender: UserProfile,
addressee: Addressee,
content: str,
email_gateway: bool = False,
) -> Optional[SendMessageRequest]:
"""
Create a message object and checks it, but doesn't send it or save it to the database.
The internal function that calls this can therefore batch send a bunch of created
messages together as one database query.
Call do_send_messages with a list of the return values of this method.
"""
# Remove any null bytes from the content
if len(content) > settings.MAX_MESSAGE_LENGTH:
content = content[0:3900] + "\n\n[message was too long and has been truncated]"
# If we have a stream name, and the stream doesn't exist, we
# create it here (though this code path should probably be removed
# eventually, moving that responsibility to the caller). If
# addressee.stream_name() is None (i.e. we're sending to a stream
# by ID), we skip this, as the stream object must already exist.
if addressee.is_stream():
stream_name = addressee.stream_name()
if stream_name is not None:
ensure_stream(realm, stream_name, acting_user=sender)
try:
return check_message(
sender,
get_client("Internal"),
addressee,
content,
realm=realm,
email_gateway=email_gateway,
)
except JsonableError as e:
logging.exception(
"Error queueing internal message by %s: %s",
sender.delivery_email,
e.msg,
stack_info=True,
)
return None
def internal_prep_stream_message(
sender: UserProfile,
stream: Stream,
topic: str,
content: str,
email_gateway: bool = False,
) -> Optional[SendMessageRequest]:
"""
See _internal_prep_message for details of how this works.
"""
realm = stream.realm
addressee = Addressee.for_stream(stream, topic)
return _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
email_gateway=email_gateway,
)
def internal_prep_stream_message_by_name(
realm: Realm,
sender: UserProfile,
stream_name: str,
topic: str,
content: str,
) -> Optional[SendMessageRequest]:
"""
See _internal_prep_message for details of how this works.
"""
addressee = Addressee.for_stream_name(stream_name, topic)
return _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
def internal_prep_private_message(
realm: Realm, sender: UserProfile, recipient_user: UserProfile, content: str
) -> Optional[SendMessageRequest]:
"""
See _internal_prep_message for details of how this works.
"""
addressee = Addressee.for_user_profile(recipient_user)
return _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
def internal_send_private_message(
sender: UserProfile, recipient_user: UserProfile, content: str
) -> Optional[int]:
realm = recipient_user.realm
message = internal_prep_private_message(realm, sender, recipient_user, content)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def internal_send_stream_message(
sender: UserProfile,
stream: Stream,
topic: str,
content: str,
email_gateway: bool = False,
) -> Optional[int]:
message = internal_prep_stream_message(sender, stream, topic, content, email_gateway)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def internal_send_stream_message_by_name(
realm: Realm,
sender: UserProfile,
stream_name: str,
topic: str,
content: str,
) -> Optional[int]:
message = internal_prep_stream_message_by_name(
realm,
sender,
stream_name,
topic,
content,
)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def internal_send_huddle_message(
realm: Realm, sender: UserProfile, emails: List[str], content: str
) -> Optional[int]:
addressee = Addressee.for_private(emails, realm)
message = _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def pick_color(user_profile: UserProfile, used_colors: Set[str]) -> str:
# These colors are shared with the palette in subs.js.
available_colors = [s for s in STREAM_ASSIGNMENT_COLORS if s not in used_colors]
if available_colors:
return available_colors[0]
else:
return STREAM_ASSIGNMENT_COLORS[len(used_colors) % len(STREAM_ASSIGNMENT_COLORS)]
def validate_user_access_to_subscribers(
user_profile: Optional[UserProfile], stream: Stream
) -> None:
"""Validates whether the user can view the subscribers of a stream. Raises a JsonableError if:
* The user and the stream are in different realms
* The realm is MIT and the stream is not invite only.
* The stream is invite only, requesting_user is passed, and that user
does not subscribe to the stream.
"""
validate_user_access_to_subscribers_helper(
user_profile,
{
"realm_id": stream.realm_id,
"is_web_public": stream.is_web_public,
"invite_only": stream.invite_only,
},
# We use a lambda here so that we only compute whether the
# user is subscribed if we have to
lambda user_profile: subscribed_to_stream(user_profile, stream.id),
)
def validate_user_access_to_subscribers_helper(
user_profile: Optional[UserProfile],
stream_dict: Mapping[str, Any],
check_user_subscribed: Callable[[UserProfile], bool],
) -> None:
"""Helper for validate_user_access_to_subscribers that doesn't require
a full stream object. This function is a bit hard to read,
because it is carefully optimized for performance in the two code
paths we call it from:
* In `bulk_get_subscriber_user_ids`, we already know whether the
user was subscribed via `sub_dict`, and so we want to avoid a
database query at all (especially since it calls this in a loop);
* In `validate_user_access_to_subscribers`, we want to only check
if the user is subscribed when we absolutely have to, since it
costs a database query.
The `check_user_subscribed` argument is a function that reports
whether the user is subscribed to the stream.
Note also that we raise a ValidationError in cases where the
caller is doing the wrong thing (maybe these should be
AssertionErrors), and JsonableError for 400 type errors.
"""
if user_profile is None:
raise ValidationError("Missing user to validate access for")
if user_profile.realm_id != stream_dict["realm_id"]:
raise ValidationError("Requesting user not in given realm")
# Even guest users can access subscribers to web-public streams,
# since they can freely become subscribers to these streams.
if stream_dict["is_web_public"]:
return
# With the exception of web public streams, a guest must
# be subscribed to a stream (even a public one) in order
# to see subscribers.
if user_profile.is_guest:
if check_user_subscribed(user_profile):
return
# We could explicitly handle the case where guests aren't
# subscribed here in an `else` statement or we can fall
# through to the subsequent logic. Tim prefers the latter.
# Adding an `else` would ensure better code coverage.
if not user_profile.can_access_public_streams() and not stream_dict["invite_only"]:
raise JsonableError(_("Subscriber data is not available for this stream"))
# Organization administrators can view subscribers for all streams.
if user_profile.is_realm_admin:
return
if stream_dict["invite_only"] and not check_user_subscribed(user_profile):
raise JsonableError(_("Unable to retrieve subscribers for private stream"))
def bulk_get_subscriber_user_ids(
stream_dicts: Collection[Mapping[str, Any]],
user_profile: UserProfile,
subscribed_stream_ids: Set[int],
) -> Dict[int, List[int]]:
"""sub_dict maps stream_id => whether the user is subscribed to that stream."""
target_stream_dicts = []
for stream_dict in stream_dicts:
stream_id = stream_dict["id"]
is_subscribed = stream_id in subscribed_stream_ids
try:
validate_user_access_to_subscribers_helper(
user_profile,
stream_dict,
lambda user_profile: is_subscribed,
)
except JsonableError:
continue
target_stream_dicts.append(stream_dict)
recip_to_stream_id = {stream["recipient_id"]: stream["id"] for stream in target_stream_dicts}
recipient_ids = sorted([stream["recipient_id"] for stream in target_stream_dicts])
result: Dict[int, List[int]] = {stream["id"]: [] for stream in stream_dicts}
if not recipient_ids:
return result
"""
The raw SQL below leads to more than a 2x speedup when tested with
20k+ total subscribers. (For large realms with lots of default
streams, this function deals with LOTS of data, so it is important
to optimize.)
"""
query = SQL(
"""
SELECT
zerver_subscription.recipient_id,
zerver_subscription.user_profile_id
FROM
zerver_subscription
WHERE
zerver_subscription.recipient_id in %(recipient_ids)s AND
zerver_subscription.active AND
zerver_subscription.is_user_active
ORDER BY
zerver_subscription.recipient_id,
zerver_subscription.user_profile_id
"""
)
cursor = connection.cursor()
cursor.execute(query, {"recipient_ids": tuple(recipient_ids)})
rows = cursor.fetchall()
cursor.close()
"""
Using groupby/itemgetter here is important for performance, at scale.
It makes it so that all interpreter overhead is just O(N) in nature.
"""
for recip_id, recip_rows in itertools.groupby(rows, itemgetter(0)):
user_profile_ids = [r[1] for r in recip_rows]
stream_id = recip_to_stream_id[recip_id]
result[stream_id] = list(user_profile_ids)
return result
def get_subscribers_query(stream: Stream, requesting_user: Optional[UserProfile]) -> QuerySet:
# TODO: Make a generic stub for QuerySet
"""Build a query to get the subscribers list for a stream, raising a JsonableError if:
'realm' is optional in stream.
The caller can refine this query with select_related(), values(), etc. depending
on whether it wants objects or just certain fields
"""
validate_user_access_to_subscribers(requesting_user, stream)
return get_active_subscriptions_for_stream_id(stream.id, include_deactivated_users=False)
def get_subscriber_emails(
stream: Stream, requesting_user: Optional[UserProfile] = None
) -> List[str]:
subscriptions_query = get_subscribers_query(stream, requesting_user)
subscriptions = subscriptions_query.values("user_profile__email")
return [subscription["user_profile__email"] for subscription in subscriptions]
def send_subscription_add_events(
realm: Realm,
sub_info_list: List[SubInfo],
subscriber_dict: Dict[int, Set[int]],
) -> None:
info_by_user: Dict[int, List[SubInfo]] = defaultdict(list)
for sub_info in sub_info_list:
info_by_user[sub_info.user.id].append(sub_info)
stream_ids = {sub_info.stream.id for sub_info in sub_info_list}
recent_traffic = get_streams_traffic(stream_ids=stream_ids)
for user_id, sub_infos in info_by_user.items():
sub_dicts = []
for sub_info in sub_infos:
stream = sub_info.stream
subscription = sub_info.sub
sub_dict = stream.to_dict()
for field_name in Subscription.API_FIELDS:
sub_dict[field_name] = getattr(subscription, field_name)
sub_dict["in_home_view"] = not subscription.is_muted
sub_dict["email_address"] = encode_email_address(stream, show_sender=True)
sub_dict["stream_weekly_traffic"] = get_average_weekly_stream_traffic(
stream.id, stream.date_created, recent_traffic
)
if stream.is_in_zephyr_realm and not stream.invite_only:
sub_dict["subscribers"] = []
else:
sub_dict["subscribers"] = list(subscriber_dict[stream.id])
sub_dicts.append(sub_dict)
# Send a notification to the user who subscribed.
event = dict(type="subscription", op="add", subscriptions=sub_dicts)
send_event(realm, event, [user_id])
SubT = Tuple[List[SubInfo], List[SubInfo]]
def bulk_add_subscriptions(
realm: Realm,
streams: Collection[Stream],
users: Iterable[UserProfile],
color_map: Mapping[str, str] = {},
from_user_creation: bool = False,
*,
acting_user: Optional[UserProfile],
) -> SubT:
users = list(users)
# Sanity check out callers
for stream in streams:
assert stream.realm_id == realm.id
for user in users:
assert user.realm_id == realm.id
recipient_id_to_stream = {stream.recipient_id: stream for stream in streams}
subs_by_user: Dict[int, List[Subscription]] = defaultdict(list)
all_subs_query = get_stream_subscriptions_for_users(users)
for sub in all_subs_query:
subs_by_user[sub.user_profile_id].append(sub)
already_subscribed: List[SubInfo] = []
subs_to_activate: List[SubInfo] = []
subs_to_add: List[SubInfo] = []
for user_profile in users:
my_subs = subs_by_user[user_profile.id]
used_colors = {sub.color for sub in my_subs}
# Make a fresh set of all new recipient ids, and then we will
# remove any for which our user already has a subscription
# (and we'll re-activate any subscriptions as needed).
new_recipient_ids = {stream.recipient_id for stream in streams}
for sub in my_subs:
if sub.recipient_id in new_recipient_ids:
new_recipient_ids.remove(sub.recipient_id)
stream = recipient_id_to_stream[sub.recipient_id]
sub_info = SubInfo(user_profile, sub, stream)
if sub.active:
already_subscribed.append(sub_info)
else:
subs_to_activate.append(sub_info)
for recipient_id in new_recipient_ids:
stream = recipient_id_to_stream[recipient_id]
if stream.name in color_map:
color = color_map[stream.name]
else:
color = pick_color(user_profile, used_colors)
used_colors.add(color)
sub = Subscription(
user_profile=user_profile,
is_user_active=user_profile.is_active,
active=True,
color=color,
recipient_id=recipient_id,
)
sub_info = SubInfo(user_profile, sub, stream)
subs_to_add.append(sub_info)
bulk_add_subs_to_db_with_logging(
realm=realm,
acting_user=acting_user,
subs_to_add=subs_to_add,
subs_to_activate=subs_to_activate,
)
altered_user_dict: Dict[int, Set[int]] = defaultdict(set)
for sub_info in subs_to_add + subs_to_activate:
altered_user_dict[sub_info.stream.id].add(sub_info.user.id)
stream_dict = {stream.id: stream for stream in streams}
new_streams = [stream_dict[stream_id] for stream_id in altered_user_dict]
subscriber_peer_info = bulk_get_subscriber_peer_info(
realm=realm,
streams=new_streams,
)
# We now send several types of events to notify browsers. The
# first batches of notifications are sent only to the user(s)
# being subscribed; we can skip these notifications when this is
# being called from the new user creation flow.
if not from_user_creation:
send_stream_creation_events_for_private_streams(
realm=realm,
stream_dict=stream_dict,
altered_user_dict=altered_user_dict,
)
send_subscription_add_events(
realm=realm,
sub_info_list=subs_to_add + subs_to_activate,
subscriber_dict=subscriber_peer_info.subscribed_ids,
)
send_peer_subscriber_events(
op="peer_add",
realm=realm,
altered_user_dict=altered_user_dict,
stream_dict=stream_dict,
private_peer_dict=subscriber_peer_info.private_peer_dict,
)
return (
subs_to_add + subs_to_activate,
already_subscribed,
)
# This function contains all the database changes as part of
# subscribing users to streams; we use a transaction to ensure that
# the RealmAuditLog entries are created atomically with the
# Subscription object creation (and updates).
@transaction.atomic
def bulk_add_subs_to_db_with_logging(
realm: Realm,
acting_user: Optional[UserProfile],
subs_to_add: List[SubInfo],
subs_to_activate: List[SubInfo],
) -> None:
Subscription.objects.bulk_create(info.sub for info in subs_to_add)
sub_ids = [info.sub.id for info in subs_to_activate]
Subscription.objects.filter(id__in=sub_ids).update(active=True)
# Log subscription activities in RealmAuditLog
event_time = timezone_now()
event_last_message_id = get_last_message_id()
all_subscription_logs: (List[RealmAuditLog]) = []
for sub_info in subs_to_add:
all_subscription_logs.append(
RealmAuditLog(
realm=realm,
acting_user=acting_user,
modified_user=sub_info.user,
modified_stream=sub_info.stream,
event_last_message_id=event_last_message_id,
event_type=RealmAuditLog.SUBSCRIPTION_CREATED,
event_time=event_time,
)
)
for sub_info in subs_to_activate:
all_subscription_logs.append(
RealmAuditLog(
realm=realm,
acting_user=acting_user,
modified_user=sub_info.user,
modified_stream=sub_info.stream,
event_last_message_id=event_last_message_id,
event_type=RealmAuditLog.SUBSCRIPTION_ACTIVATED,
event_time=event_time,
)
)
# Now since we have all log objects generated we can do a bulk insert
RealmAuditLog.objects.bulk_create(all_subscription_logs)
def send_stream_creation_events_for_private_streams(
realm: Realm,
stream_dict: Dict[int, Stream],
altered_user_dict: Dict[int, Set[int]],
) -> None:
for stream_id, stream_users_ids in altered_user_dict.items():
stream = stream_dict[stream_id]
if not stream.is_public():
# Users newly added to invite-only streams
# need a `create` notification. The former, because
# they need the stream to exist before
# they get the "subscribe" notification, and the latter so
# they can manage the new stream.
# Realm admins already have all created private streams.
realm_admin_ids = {user.id for user in realm.get_admin_users_and_bots()}
notify_user_ids = list(stream_users_ids - realm_admin_ids)
if notify_user_ids:
send_stream_creation_event(stream, notify_user_ids)
def send_peer_subscriber_events(
op: str,
realm: Realm,
stream_dict: Dict[int, Stream],
altered_user_dict: Dict[int, Set[int]],
private_peer_dict: Dict[int, Set[int]],
) -> None:
# Send peer_add/peer_remove events to other users who are tracking the
# subscribers lists of streams in their browser; everyone for
# public streams and only existing subscribers for private streams.
assert op in ["peer_add", "peer_remove"]
private_stream_ids = [
stream_id for stream_id in altered_user_dict if stream_dict[stream_id].invite_only
]
for stream_id in private_stream_ids:
altered_user_ids = altered_user_dict[stream_id]
peer_user_ids = private_peer_dict[stream_id] - altered_user_ids
if peer_user_ids and altered_user_ids:
event = dict(
type="subscription",
op=op,
stream_ids=[stream_id],
user_ids=sorted(list(altered_user_ids)),
)
send_event(realm, event, peer_user_ids)
public_stream_ids = [
stream_id
for stream_id in altered_user_dict
if not stream_dict[stream_id].invite_only and not stream_dict[stream_id].is_in_zephyr_realm
]
if public_stream_ids:
user_streams: Dict[int, Set[int]] = defaultdict(set)
public_peer_ids = set(active_non_guest_user_ids(realm.id))
for stream_id in public_stream_ids:
altered_user_ids = altered_user_dict[stream_id]
peer_user_ids = public_peer_ids - altered_user_ids
if peer_user_ids and altered_user_ids:
if len(altered_user_ids) == 1:
# If we only have one user, we will try to
# find other streams they have (un)subscribed to
# (where it's just them). This optimization
# typically works when a single user is subscribed
# to multiple default public streams during
# new-user registration.
#
# This optimization depends on all public streams
# having the same peers for any single user, which
# isn't the case for private streams.
altered_user_id = list(altered_user_ids)[0]
user_streams[altered_user_id].add(stream_id)
else:
event = dict(
type="subscription",
op=op,
stream_ids=[stream_id],
user_ids=sorted(list(altered_user_ids)),
)
send_event(realm, event, peer_user_ids)
for user_id, stream_ids in user_streams.items():
peer_user_ids = public_peer_ids - {user_id}
event = dict(
type="subscription",
op=op,
stream_ids=sorted(list(stream_ids)),
user_ids=[user_id],
)
send_event(realm, event, peer_user_ids)
def send_peer_remove_events(
realm: Realm,
streams: List[Stream],
altered_user_dict: Dict[int, Set[int]],
) -> None:
private_streams = [stream for stream in streams if stream.invite_only]
private_peer_dict = bulk_get_private_peers(
realm=realm,
private_streams=private_streams,
)
stream_dict = {stream.id: stream for stream in streams}
send_peer_subscriber_events(
op="peer_remove",
realm=realm,
stream_dict=stream_dict,
altered_user_dict=altered_user_dict,
private_peer_dict=private_peer_dict,
)
def get_available_notification_sounds() -> List[str]:
notification_sounds_path = static_path("audio/notification_sounds")
available_notification_sounds = []
for file_name in os.listdir(notification_sounds_path):
root, ext = os.path.splitext(file_name)
if "." in root: # nocoverage
# Exclude e.g. zulip.abcd1234.ogg (generated by production hash-naming)
# to avoid spurious duplicates.
continue
if ext == ".ogg":
available_notification_sounds.append(root)
return sorted(available_notification_sounds)
def notify_subscriptions_removed(user_profile: UserProfile, streams: Iterable[Stream]) -> None:
payload = [dict(name=stream.name, stream_id=stream.id) for stream in streams]
event = dict(type="subscription", op="remove", subscriptions=payload)
send_event(user_profile.realm, event, [user_profile.id])
SubAndRemovedT = Tuple[List[Tuple[UserProfile, Stream]], List[Tuple[UserProfile, Stream]]]
def bulk_remove_subscriptions(
users: Iterable[UserProfile],
streams: Iterable[Stream],
acting_client: Client,
*,
acting_user: Optional[UserProfile],
) -> SubAndRemovedT:
users = list(users)
streams = list(streams)
stream_dict = {stream.id: stream for stream in streams}
existing_subs_by_user = get_bulk_stream_subscriber_info(users, streams)
def get_non_subscribed_subs() -> List[Tuple[UserProfile, Stream]]:
stream_ids = {stream.id for stream in streams}
not_subscribed: List[Tuple[UserProfile, Stream]] = []
for user_profile in users:
user_sub_stream_info = existing_subs_by_user[user_profile.id]
subscribed_stream_ids = {sub_info.stream.id for sub_info in user_sub_stream_info}
not_subscribed_stream_ids = stream_ids - subscribed_stream_ids
for stream_id in not_subscribed_stream_ids:
stream = stream_dict[stream_id]
not_subscribed.append((user_profile, stream))
return not_subscribed
not_subscribed = get_non_subscribed_subs()
subs_to_deactivate: List[SubInfo] = []
sub_ids_to_deactivate: List[int] = []
# This loop just flattens out our data into big lists for
# bulk operations.
for sub_infos in existing_subs_by_user.values():
for sub_info in sub_infos:
subs_to_deactivate.append(sub_info)
sub_ids_to_deactivate.append(sub_info.sub.id)
our_realm = users[0].realm
# We do all the database changes in a transaction to ensure
# RealmAuditLog entries are atomically created when making changes.
with transaction.atomic():
occupied_streams_before = list(get_occupied_streams(our_realm))
Subscription.objects.filter(
id__in=sub_ids_to_deactivate,
).update(active=False)
occupied_streams_after = list(get_occupied_streams(our_realm))
# Log subscription activities in RealmAuditLog
event_time = timezone_now()
event_last_message_id = get_last_message_id()
all_subscription_logs = [
RealmAuditLog(
realm=sub_info.user.realm,
acting_user=acting_user,
modified_user=sub_info.user,
modified_stream=sub_info.stream,
event_last_message_id=event_last_message_id,
event_type=RealmAuditLog.SUBSCRIPTION_DEACTIVATED,
event_time=event_time,
)
for sub_info in subs_to_deactivate
]
# Now since we have all log objects generated we can do a bulk insert
RealmAuditLog.objects.bulk_create(all_subscription_logs)
altered_user_dict: Dict[int, Set[int]] = defaultdict(set)
streams_by_user: Dict[int, List[Stream]] = defaultdict(list)
for sub_info in subs_to_deactivate:
stream = sub_info.stream
streams_by_user[sub_info.user.id].append(stream)
altered_user_dict[stream.id].add(sub_info.user.id)
for user_profile in users:
if len(streams_by_user[user_profile.id]) == 0:
continue
notify_subscriptions_removed(user_profile, streams_by_user[user_profile.id])
event = {
"type": "mark_stream_messages_as_read",
"user_profile_id": user_profile.id,
"stream_recipient_ids": [stream.recipient_id for stream in streams],
}
queue_json_publish("deferred_work", event)
send_peer_remove_events(
realm=our_realm,
streams=streams,
altered_user_dict=altered_user_dict,
)
new_vacant_streams = set(occupied_streams_before) - set(occupied_streams_after)
new_vacant_private_streams = [stream for stream in new_vacant_streams if stream.invite_only]
if new_vacant_private_streams:
# Deactivate any newly-vacant private streams
for stream in new_vacant_private_streams:
do_deactivate_stream(stream, acting_user=acting_user)
return (
[(sub_info.user, sub_info.stream) for sub_info in subs_to_deactivate],
not_subscribed,
)
def do_change_subscription_property(
user_profile: UserProfile,
sub: Subscription,
stream: Stream,
property_name: str,
value: Any,
*,
acting_user: Optional[UserProfile],
) -> None:
database_property_name = property_name
event_property_name = property_name
database_value = value
event_value = value
# For this property, is_muted is used in the database, but
# in_home_view in the API, since we haven't migrated the events
# API to the new name yet.
if property_name == "in_home_view":
database_property_name = "is_muted"
database_value = not value
if property_name == "is_muted":
event_property_name = "in_home_view"
event_value = not value
old_value = getattr(sub, database_property_name)
setattr(sub, database_property_name, database_value)
sub.save(update_fields=[database_property_name])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
event_type=RealmAuditLog.SUBSCRIPTION_PROPERTY_CHANGED,
event_time=event_time,
modified_user=user_profile,
acting_user=acting_user,
modified_stream=stream,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: database_value,
"property": database_property_name,
}
).decode(),
)
event = dict(
type="subscription",
op="update",
property=event_property_name,
value=event_value,
stream_id=stream.id,
)
send_event(user_profile.realm, event, [user_profile.id])
def do_change_password(user_profile: UserProfile, password: str, commit: bool = True) -> None:
user_profile.set_password(password)
if commit:
user_profile.save(update_fields=["password"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
acting_user=user_profile,
modified_user=user_profile,
event_type=RealmAuditLog.USER_PASSWORD_CHANGED,
event_time=event_time,
)
def do_change_full_name(
user_profile: UserProfile, full_name: str, acting_user: Optional[UserProfile]
) -> None:
old_name = user_profile.full_name
user_profile.full_name = full_name
user_profile.save(update_fields=["full_name"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
acting_user=acting_user,
modified_user=user_profile,
event_type=RealmAuditLog.USER_FULL_NAME_CHANGED,
event_time=event_time,
extra_data=old_name,
)
payload = dict(user_id=user_profile.id, full_name=user_profile.full_name)
send_event(
user_profile.realm,
dict(type="realm_user", op="update", person=payload),
active_user_ids(user_profile.realm_id),
)
if user_profile.is_bot:
send_event(
user_profile.realm,
dict(type="realm_bot", op="update", bot=payload),
bot_owner_user_ids(user_profile),
)
def check_change_full_name(
user_profile: UserProfile, full_name_raw: str, acting_user: UserProfile
) -> str:
"""Verifies that the user's proposed full name is valid. The caller
is responsible for checking check permissions. Returns the new
full name, which may differ from what was passed in (because this
function strips whitespace)."""
new_full_name = check_full_name(full_name_raw)
do_change_full_name(user_profile, new_full_name, acting_user)
return new_full_name
def check_change_bot_full_name(
user_profile: UserProfile, full_name_raw: str, acting_user: UserProfile
) -> None:
new_full_name = check_full_name(full_name_raw)
if new_full_name == user_profile.full_name:
# Our web app will try to patch full_name even if the user didn't
# modify the name in the form. We just silently ignore those
# situations.
return
check_bot_name_available(
realm_id=user_profile.realm_id,
full_name=new_full_name,
)
do_change_full_name(user_profile, new_full_name, acting_user)
def do_change_bot_owner(
user_profile: UserProfile, bot_owner: UserProfile, acting_user: UserProfile
) -> None:
previous_owner = user_profile.bot_owner
user_profile.bot_owner = bot_owner
user_profile.save() # Can't use update_fields because of how the foreign key works.
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
acting_user=acting_user,
modified_user=user_profile,
event_type=RealmAuditLog.USER_BOT_OWNER_CHANGED,
event_time=event_time,
)
update_users = bot_owner_user_ids(user_profile)
# For admins, update event is sent instead of delete/add
# event. bot_data of admin contains all the
# bots and none of them should be removed/(added again).
# Delete the bot from previous owner's bot data.
if previous_owner and not previous_owner.is_realm_admin:
send_event(
user_profile.realm,
dict(
type="realm_bot",
op="delete",
bot=dict(
user_id=user_profile.id,
),
),
{previous_owner.id},
)
# Do not send update event for previous bot owner.
update_users = update_users - {previous_owner.id}
# Notify the new owner that the bot has been added.
if not bot_owner.is_realm_admin:
add_event = created_bot_event(user_profile)
send_event(user_profile.realm, add_event, {bot_owner.id})
# Do not send update event for bot_owner.
update_users = update_users - {bot_owner.id}
send_event(
user_profile.realm,
dict(
type="realm_bot",
op="update",
bot=dict(
user_id=user_profile.id,
owner_id=user_profile.bot_owner.id,
),
),
update_users,
)
# Since `bot_owner_id` is included in the user profile dict we need
# to update the users dict with the new bot owner id
event: Dict[str, Any] = dict(
type="realm_user",
op="update",
person=dict(
user_id=user_profile.id,
bot_owner_id=user_profile.bot_owner.id,
),
)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def do_change_tos_version(user_profile: UserProfile, tos_version: str) -> None:
user_profile.tos_version = tos_version
user_profile.save(update_fields=["tos_version"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
acting_user=user_profile,
modified_user=user_profile,
event_type=RealmAuditLog.USER_TOS_VERSION_CHANGED,
event_time=event_time,
)
def do_regenerate_api_key(user_profile: UserProfile, acting_user: UserProfile) -> str:
old_api_key = user_profile.api_key
new_api_key = generate_api_key()
user_profile.api_key = new_api_key
user_profile.save(update_fields=["api_key"])
# We need to explicitly delete the old API key from our caches,
# because the on-save handler for flushing the UserProfile object
# in zerver/lib/cache.py only has access to the new API key.
cache_delete(user_profile_by_api_key_cache_key(old_api_key))
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
acting_user=acting_user,
modified_user=user_profile,
event_type=RealmAuditLog.USER_API_KEY_CHANGED,
event_time=event_time,
)
if user_profile.is_bot:
send_event(
user_profile.realm,
dict(
type="realm_bot",
op="update",
bot=dict(
user_id=user_profile.id,
api_key=new_api_key,
),
),
bot_owner_user_ids(user_profile),
)
event = {"type": "clear_push_device_tokens", "user_profile_id": user_profile.id}
queue_json_publish("deferred_work", event)
return new_api_key
def notify_avatar_url_change(user_profile: UserProfile) -> None:
if user_profile.is_bot:
send_event(
user_profile.realm,
dict(
type="realm_bot",
op="update",
bot=dict(
user_id=user_profile.id,
avatar_url=avatar_url(user_profile),
),
),
bot_owner_user_ids(user_profile),
)
payload = dict(
avatar_source=user_profile.avatar_source,
avatar_url=avatar_url(user_profile),
avatar_url_medium=avatar_url(user_profile, medium=True),
avatar_version=user_profile.avatar_version,
# Even clients using client_gravatar don't need the email,
# since we're sending the URL anyway.
user_id=user_profile.id,
)
send_event(
user_profile.realm,
dict(type="realm_user", op="update", person=payload),
active_user_ids(user_profile.realm_id),
)
def do_change_avatar_fields(
user_profile: UserProfile,
avatar_source: str,
skip_notify: bool = False,
*,
acting_user: Optional[UserProfile],
) -> None:
user_profile.avatar_source = avatar_source
user_profile.avatar_version += 1
user_profile.save(update_fields=["avatar_source", "avatar_version"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
modified_user=user_profile,
event_type=RealmAuditLog.USER_AVATAR_SOURCE_CHANGED,
extra_data={"avatar_source": avatar_source},
event_time=event_time,
acting_user=acting_user,
)
if not skip_notify:
notify_avatar_url_change(user_profile)
def do_delete_avatar_image(user: UserProfile, *, acting_user: Optional[UserProfile]) -> None:
do_change_avatar_fields(user, UserProfile.AVATAR_FROM_GRAVATAR, acting_user=acting_user)
delete_avatar_image(user)
def do_change_icon_source(
realm: Realm, icon_source: str, *, acting_user: Optional[UserProfile]
) -> None:
realm.icon_source = icon_source
realm.icon_version += 1
realm.save(update_fields=["icon_source", "icon_version"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_ICON_SOURCE_CHANGED,
extra_data={"icon_source": icon_source, "icon_version": realm.icon_version},
event_time=event_time,
acting_user=acting_user,
)
send_event(
realm,
dict(
type="realm",
op="update_dict",
property="icon",
data=dict(icon_source=realm.icon_source, icon_url=realm_icon_url(realm)),
),
active_user_ids(realm.id),
)
def do_change_logo_source(
realm: Realm, logo_source: str, night: bool, *, acting_user: Optional[UserProfile]
) -> None:
if not night:
realm.logo_source = logo_source
realm.logo_version += 1
realm.save(update_fields=["logo_source", "logo_version"])
else:
realm.night_logo_source = logo_source
realm.night_logo_version += 1
realm.save(update_fields=["night_logo_source", "night_logo_version"])
RealmAuditLog.objects.create(
event_type=RealmAuditLog.REALM_LOGO_CHANGED,
realm=realm,
event_time=timezone_now(),
acting_user=acting_user,
)
event = dict(
type="realm",
op="update_dict",
property="night_logo" if night else "logo",
data=get_realm_logo_data(realm, night),
)
send_event(realm, event, active_user_ids(realm.id))
def do_change_plan_type(
realm: Realm, plan_type: int, *, acting_user: Optional[UserProfile]
) -> None:
old_value = realm.plan_type
realm.plan_type = plan_type
realm.save(update_fields=["plan_type"])
RealmAuditLog.objects.create(
event_type=RealmAuditLog.REALM_PLAN_TYPE_CHANGED,
realm=realm,
event_time=timezone_now(),
acting_user=acting_user,
extra_data={"old_value": old_value, "new_value": plan_type},
)
if plan_type == Realm.STANDARD:
realm.max_invites = Realm.INVITES_STANDARD_REALM_DAILY_MAX
realm.message_visibility_limit = None
realm.upload_quota_gb = Realm.UPLOAD_QUOTA_STANDARD
elif plan_type == Realm.SELF_HOSTED:
realm.max_invites = None # type: ignore[assignment] # Apparent mypy bug with Optional[int] setter.
realm.message_visibility_limit = None
realm.upload_quota_gb = None
elif plan_type == Realm.STANDARD_FREE:
realm.max_invites = Realm.INVITES_STANDARD_REALM_DAILY_MAX
realm.message_visibility_limit = None
realm.upload_quota_gb = Realm.UPLOAD_QUOTA_STANDARD
elif plan_type == Realm.LIMITED:
realm.max_invites = settings.INVITES_DEFAULT_REALM_DAILY_MAX
realm.message_visibility_limit = Realm.MESSAGE_VISIBILITY_LIMITED
realm.upload_quota_gb = Realm.UPLOAD_QUOTA_LIMITED
else:
raise AssertionError("Invalid plan type")
update_first_visible_message_id(realm)
realm.save(update_fields=["_max_invites", "message_visibility_limit", "upload_quota_gb"])
event = {
"type": "realm",
"op": "update",
"property": "plan_type",
"value": plan_type,
"extra_data": {"upload_quota": realm.upload_quota_bytes()},
}
send_event(realm, event, active_user_ids(realm.id))
def do_change_default_sending_stream(
user_profile: UserProfile, stream: Optional[Stream], *, acting_user: Optional[UserProfile]
) -> None:
old_value = user_profile.default_sending_stream_id
user_profile.default_sending_stream = stream
user_profile.save(update_fields=["default_sending_stream"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
event_type=RealmAuditLog.USER_DEFAULT_SENDING_STREAM_CHANGED,
event_time=event_time,
modified_user=user_profile,
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: None if stream is None else stream.id,
}
).decode(),
)
if user_profile.is_bot:
if stream:
stream_name: Optional[str] = stream.name
else:
stream_name = None
send_event(
user_profile.realm,
dict(
type="realm_bot",
op="update",
bot=dict(
user_id=user_profile.id,
default_sending_stream=stream_name,
),
),
bot_owner_user_ids(user_profile),
)
def do_change_default_events_register_stream(
user_profile: UserProfile, stream: Optional[Stream], *, acting_user: Optional[UserProfile]
) -> None:
old_value = user_profile.default_events_register_stream_id
user_profile.default_events_register_stream = stream
user_profile.save(update_fields=["default_events_register_stream"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
event_type=RealmAuditLog.USER_DEFAULT_REGISTER_STREAM_CHANGED,
event_time=event_time,
modified_user=user_profile,
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: None if stream is None else stream.id,
}
).decode(),
)
if user_profile.is_bot:
if stream:
stream_name: Optional[str] = stream.name
else:
stream_name = None
send_event(
user_profile.realm,
dict(
type="realm_bot",
op="update",
bot=dict(
user_id=user_profile.id,
default_events_register_stream=stream_name,
),
),
bot_owner_user_ids(user_profile),
)
def do_change_default_all_public_streams(
user_profile: UserProfile, value: bool, *, acting_user: Optional[UserProfile]
) -> None:
old_value = user_profile.default_all_public_streams
user_profile.default_all_public_streams = value
user_profile.save(update_fields=["default_all_public_streams"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
event_type=RealmAuditLog.USER_DEFAULT_ALL_PUBLIC_STREAMS_CHANGED,
event_time=event_time,
modified_user=user_profile,
acting_user=acting_user,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: value,
}
).decode(),
)
if user_profile.is_bot:
send_event(
user_profile.realm,
dict(
type="realm_bot",
op="update",
bot=dict(
user_id=user_profile.id,
default_all_public_streams=user_profile.default_all_public_streams,
),
),
bot_owner_user_ids(user_profile),
)
def do_change_user_role(
user_profile: UserProfile, value: int, *, acting_user: Optional[UserProfile]
) -> None:
old_value = user_profile.role
user_profile.role = value
user_profile.save(update_fields=["role"])
RealmAuditLog.objects.create(
realm=user_profile.realm,
modified_user=user_profile,
acting_user=acting_user,
event_type=RealmAuditLog.USER_ROLE_CHANGED,
event_time=timezone_now(),
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: value,
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}
).decode(),
)
event = dict(
type="realm_user", op="update", person=dict(user_id=user_profile.id, role=user_profile.role)
)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def do_make_user_billing_admin(user_profile: UserProfile) -> None:
user_profile.is_billing_admin = True
user_profile.save(update_fields=["is_billing_admin"])
event = dict(
type="realm_user", op="update", person=dict(user_id=user_profile.id, is_billing_admin=True)
)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def do_change_can_forge_sender(user_profile: UserProfile, value: bool) -> None:
user_profile.can_forge_sender = value
user_profile.save(update_fields=["can_forge_sender"])
def do_change_can_create_users(user_profile: UserProfile, value: bool) -> None:
user_profile.can_create_users = value
user_profile.save(update_fields=["can_create_users"])
def do_change_stream_invite_only(
stream: Stream, invite_only: bool, history_public_to_subscribers: Optional[bool] = None
) -> None:
history_public_to_subscribers = get_default_value_for_history_public_to_subscribers(
stream.realm,
invite_only,
history_public_to_subscribers,
)
stream.invite_only = invite_only
stream.history_public_to_subscribers = history_public_to_subscribers
stream.is_web_public = False
stream.save(update_fields=["invite_only", "history_public_to_subscribers", "is_web_public"])
event = dict(
op="update",
type="stream",
property="invite_only",
value=invite_only,
history_public_to_subscribers=history_public_to_subscribers,
is_web_public=False,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
def do_make_stream_web_public(stream: Stream) -> None:
stream.is_web_public = True
stream.invite_only = False
stream.history_public_to_subscribers = True
stream.save(update_fields=["invite_only", "history_public_to_subscribers", "is_web_public"])
def do_change_stream_post_policy(stream: Stream, stream_post_policy: int) -> None:
stream.stream_post_policy = stream_post_policy
stream.save(update_fields=["stream_post_policy"])
event = dict(
op="update",
type="stream",
property="stream_post_policy",
value=stream_post_policy,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
# Backwards-compatibility code: We removed the
# is_announcement_only property in early 2020, but we send a
# duplicate event for legacy mobile clients that might want the
# data.
event = dict(
op="update",
type="stream",
property="is_announcement_only",
value=stream.stream_post_policy == Stream.STREAM_POST_POLICY_ADMINS,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
def do_rename_stream(stream: Stream, new_name: str, user_profile: UserProfile) -> Dict[str, str]:
old_name = stream.name
stream.name = new_name
stream.save(update_fields=["name"])
RealmAuditLog.objects.create(
realm=stream.realm,
acting_user=user_profile,
modified_stream=stream,
event_type=RealmAuditLog.STREAM_NAME_CHANGED,
event_time=timezone_now(),
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_name,
RealmAuditLog.NEW_VALUE: new_name,
}
).decode(),
)
recipient_id = stream.recipient_id
messages = Message.objects.filter(recipient_id=recipient_id).only("id")
# Update the display recipient and stream, which are easy single
# items to set.
old_cache_key = get_stream_cache_key(old_name, stream.realm_id)
new_cache_key = get_stream_cache_key(stream.name, stream.realm_id)
if old_cache_key != new_cache_key:
cache_delete(old_cache_key)
cache_set(new_cache_key, stream)
cache_set(display_recipient_cache_key(recipient_id), stream.name)
# Delete cache entries for everything else, which is cheaper and
# clearer than trying to set them. display_recipient is the out of
# date field in all cases.
cache_delete_many(to_dict_cache_key_id(message.id) for message in messages)
new_email = encode_email_address(stream, show_sender=True)
# We will tell our users to essentially
# update stream.name = new_name where name = old_name
# and update stream.email = new_email where name = old_name.
# We could optimize this by trying to send one message, but the
# client code really wants one property update at a time, and
# updating stream names is a pretty infrequent operation.
# More importantly, we want to key these updates by id, not name,
# since id is the immutable primary key, and obviously name is not.
data_updates = [
["email_address", new_email],
["name", new_name],
]
for property, value in data_updates:
event = dict(
op="update",
type="stream",
property=property,
value=value,
stream_id=stream.id,
name=old_name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
sender = get_system_bot(settings.NOTIFICATION_BOT)
with override_language(stream.realm.default_language):
internal_send_stream_message(
sender,
stream,
Realm.STREAM_EVENTS_NOTIFICATION_TOPIC,
_("{user_name} renamed stream {old_stream_name} to {new_stream_name}.").format(
user_name=f"@_**{user_profile.full_name}|{user_profile.id}**",
old_stream_name=f"**{old_name}**",
new_stream_name=f"**{new_name}**",
),
)
# Even though the token doesn't change, the web client needs to update the
# email forwarding address to display the correctly-escaped new name.
return {"email_address": new_email}
def do_change_stream_description(stream: Stream, new_description: str) -> None:
stream.description = new_description
stream.rendered_description = render_stream_description(new_description)
stream.save(update_fields=["description", "rendered_description"])
event = dict(
type="stream",
op="update",
property="description",
name=stream.name,
stream_id=stream.id,
value=new_description,
rendered_description=stream.rendered_description,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
def do_change_stream_message_retention_days(
stream: Stream, message_retention_days: Optional[int] = None
) -> None:
stream.message_retention_days = message_retention_days
stream.save(update_fields=["message_retention_days"])
event = dict(
op="update",
type="stream",
property="message_retention_days",
value=message_retention_days,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
def do_create_realm(
string_id: str,
name: str,
*,
emails_restricted_to_domains: Optional[bool] = None,
email_address_visibility: Optional[int] = None,
description: Optional[str] = None,
invite_required: Optional[bool] = None,
plan_type: Optional[int] = None,
org_type: Optional[int] = None,
date_created: Optional[datetime.datetime] = None,
) -> Realm:
if Realm.objects.filter(string_id=string_id).exists():
raise AssertionError(f"Realm {string_id} already exists!")
if not server_initialized():
logging.info("Server not yet initialized. Creating the internal realm first.")
create_internal_realm()
kwargs: Dict[str, Any] = {}
if emails_restricted_to_domains is not None:
kwargs["emails_restricted_to_domains"] = emails_restricted_to_domains
if email_address_visibility is not None:
kwargs["email_address_visibility"] = email_address_visibility
if description is not None:
kwargs["description"] = description
if invite_required is not None:
kwargs["invite_required"] = invite_required
if plan_type is not None:
kwargs["plan_type"] = plan_type
if org_type is not None:
kwargs["org_type"] = org_type
if date_created is not None:
# The date_created parameter is intended only for use by test
# suites that want to backdate the date of a realm's creation.
assert not settings.PRODUCTION
kwargs["date_created"] = date_created
with transaction.atomic():
realm = Realm(string_id=string_id, name=name, **kwargs)
realm.save()
RealmAuditLog.objects.create(
realm=realm, event_type=RealmAuditLog.REALM_CREATED, event_time=realm.date_created
)
# Create stream once Realm object has been saved
notifications_stream = ensure_stream(
realm,
Realm.DEFAULT_NOTIFICATION_STREAM_NAME,
stream_description="Everyone is added to this stream by default. Welcome! :octopus:",
acting_user=None,
)
realm.notifications_stream = notifications_stream
# With the current initial streams situation, the only public
# stream is the notifications_stream.
DefaultStream.objects.create(stream=notifications_stream, realm=realm)
signup_notifications_stream = ensure_stream(
realm,
Realm.INITIAL_PRIVATE_STREAM_NAME,
invite_only=True,
stream_description="A private stream for core team members.",
acting_user=None,
)
realm.signup_notifications_stream = signup_notifications_stream
realm.save(update_fields=["notifications_stream", "signup_notifications_stream"])
if plan_type is None and settings.BILLING_ENABLED:
do_change_plan_type(realm, Realm.LIMITED, acting_user=None)
sender = get_system_bot(settings.NOTIFICATION_BOT)
admin_realm = sender.realm
# Send a notification to the admin realm
signup_message = _("Signups enabled")
try:
signups_stream = get_signups_stream(admin_realm)
topic = realm.display_subdomain
internal_send_stream_message(
sender,
signups_stream,
topic,
signup_message,
)
except Stream.DoesNotExist: # nocoverage
# If the signups stream hasn't been created in the admin
# realm, don't auto-create it to send to it; just do nothing.
pass
return realm
def do_change_notification_settings(
user_profile: UserProfile,
name: str,
value: Union[bool, int, str],
*,
acting_user: Optional[UserProfile],
) -> None:
"""Takes in a UserProfile object, the name of a global notification
preference to update, and the value to update to
"""
old_value = getattr(user_profile, name)
notification_setting_type = UserProfile.notification_setting_types[name]
assert isinstance(
value, notification_setting_type
), f"Cannot update {name}: {value} is not an instance of {notification_setting_type}"
setattr(user_profile, name, value)
# Disabling digest emails should clear a user's email queue
if name == "enable_digest_emails" and not value:
clear_scheduled_emails([user_profile.id], ScheduledEmail.DIGEST)
user_profile.save(update_fields=[name])
event = {
"type": "update_global_notifications",
"user": user_profile.email,
"notification_name": name,
"setting": value,
}
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm,
event_type=RealmAuditLog.USER_NOTIFICATION_SETTINGS_CHANGED,
event_time=event_time,
acting_user=acting_user,
modified_user=user_profile,
extra_data=orjson.dumps(
{
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: value,
"property": name,
}
).decode(),
)
send_event(user_profile.realm, event, [user_profile.id])
def do_change_enter_sends(user_profile: UserProfile, enter_sends: bool) -> None:
user_profile.enter_sends = enter_sends
user_profile.save(update_fields=["enter_sends"])
def do_set_user_display_setting(
user_profile: UserProfile, setting_name: str, setting_value: Union[bool, str, int]
) -> None:
property_type = UserProfile.property_types[setting_name]
assert isinstance(setting_value, property_type)
setattr(user_profile, setting_name, setting_value)
user_profile.save(update_fields=[setting_name])
event = {
"type": "update_display_settings",
"user": user_profile.email,
"setting_name": setting_name,
"setting": setting_value,
}
if setting_name == "default_language":
assert isinstance(setting_value, str)
event["language_name"] = get_language_name(setting_value)
send_event(user_profile.realm, event, [user_profile.id])
# Updates to the timezone display setting are sent to all users
if setting_name == "timezone":
payload = dict(
email=user_profile.email,
user_id=user_profile.id,
timezone=canonicalize_timezone(user_profile.timezone),
)
send_event(
user_profile.realm,
dict(type="realm_user", op="update", person=payload),
active_user_ids(user_profile.realm_id),
)
def lookup_default_stream_groups(
default_stream_group_names: List[str], realm: Realm
) -> List[DefaultStreamGroup]:
default_stream_groups = []
for group_name in default_stream_group_names:
try:
default_stream_group = DefaultStreamGroup.objects.get(name=group_name, realm=realm)
except DefaultStreamGroup.DoesNotExist:
raise JsonableError(_("Invalid default stream group {}").format(group_name))
default_stream_groups.append(default_stream_group)
return default_stream_groups
def notify_default_streams(realm: Realm) -> None:
event = dict(
type="default_streams",
default_streams=streams_to_dicts_sorted(get_default_streams_for_realm(realm.id)),
)
send_event(realm, event, active_non_guest_user_ids(realm.id))
def notify_default_stream_groups(realm: Realm) -> None:
event = dict(
type="default_stream_groups",
default_stream_groups=default_stream_groups_to_dicts_sorted(
get_default_stream_groups(realm)
),
)
send_event(realm, event, active_non_guest_user_ids(realm.id))
def do_add_default_stream(stream: Stream) -> None:
realm_id = stream.realm_id
stream_id = stream.id
if not DefaultStream.objects.filter(realm_id=realm_id, stream_id=stream_id).exists():
DefaultStream.objects.create(realm_id=realm_id, stream_id=stream_id)
notify_default_streams(stream.realm)
def do_remove_default_stream(stream: Stream) -> None:
realm_id = stream.realm_id
stream_id = stream.id
DefaultStream.objects.filter(realm_id=realm_id, stream_id=stream_id).delete()
notify_default_streams(stream.realm)
def do_create_default_stream_group(
realm: Realm, group_name: str, description: str, streams: List[Stream]
) -> None:
default_streams = get_default_streams_for_realm(realm.id)
for stream in streams:
if stream in default_streams:
raise JsonableError(
_(
"'{stream_name}' is a default stream and cannot be added to '{group_name}'",
).format(stream_name=stream.name, group_name=group_name)
)
check_default_stream_group_name(group_name)
(group, created) = DefaultStreamGroup.objects.get_or_create(
name=group_name, realm=realm, description=description
)
if not created:
raise JsonableError(
_(
"Default stream group '{group_name}' already exists",
).format(group_name=group_name)
)
group.streams.set(streams)
notify_default_stream_groups(realm)
def do_add_streams_to_default_stream_group(
realm: Realm, group: DefaultStreamGroup, streams: List[Stream]
) -> None:
default_streams = get_default_streams_for_realm(realm.id)
for stream in streams:
if stream in default_streams:
raise JsonableError(
_(
"'{stream_name}' is a default stream and cannot be added to '{group_name}'",
).format(stream_name=stream.name, group_name=group.name)
)
if stream in group.streams.all():
raise JsonableError(
_(
"Stream '{stream_name}' is already present in default stream group '{group_name}'",
).format(stream_name=stream.name, group_name=group.name)
)
group.streams.add(stream)
group.save()
notify_default_stream_groups(realm)
def do_remove_streams_from_default_stream_group(
realm: Realm, group: DefaultStreamGroup, streams: List[Stream]
) -> None:
for stream in streams:
if stream not in group.streams.all():
raise JsonableError(
_(
"Stream '{stream_name}' is not present in default stream group '{group_name}'",
).format(stream_name=stream.name, group_name=group.name)
)
group.streams.remove(stream)
group.save()
notify_default_stream_groups(realm)
def do_change_default_stream_group_name(
realm: Realm, group: DefaultStreamGroup, new_group_name: str
) -> None:
if group.name == new_group_name:
raise JsonableError(
_("This default stream group is already named '{}'").format(new_group_name)
)
if DefaultStreamGroup.objects.filter(name=new_group_name, realm=realm).exists():
raise JsonableError(_("Default stream group '{}' already exists").format(new_group_name))
group.name = new_group_name
group.save()
notify_default_stream_groups(realm)
def do_change_default_stream_group_description(
realm: Realm, group: DefaultStreamGroup, new_description: str
) -> None:
group.description = new_description
group.save()
notify_default_stream_groups(realm)
def do_remove_default_stream_group(realm: Realm, group: DefaultStreamGroup) -> None:
group.delete()
notify_default_stream_groups(realm)
def get_default_streams_for_realm(realm_id: int) -> List[Stream]:
return [
default.stream
for default in DefaultStream.objects.select_related().filter(realm_id=realm_id)
]
def get_default_subs(user_profile: UserProfile) -> List[Stream]:
# Right now default streams are realm-wide. This wrapper gives us flexibility
# to some day further customize how we set up default streams for new users.
return get_default_streams_for_realm(user_profile.realm_id)
# returns default streams in json serializeable format
def streams_to_dicts_sorted(streams: List[Stream]) -> List[Dict[str, Any]]:
return sorted((stream.to_dict() for stream in streams), key=lambda elt: elt["name"])
def default_stream_groups_to_dicts_sorted(groups: List[DefaultStreamGroup]) -> List[Dict[str, Any]]:
return sorted((group.to_dict() for group in groups), key=lambda elt: elt["name"])
def do_update_user_activity_interval(
user_profile: UserProfile, log_time: datetime.datetime
) -> None:
effective_end = log_time + UserActivityInterval.MIN_INTERVAL_LENGTH
# This code isn't perfect, because with various races we might end
# up creating two overlapping intervals, but that shouldn't happen
# often, and can be corrected for in post-processing
try:
last = UserActivityInterval.objects.filter(user_profile=user_profile).order_by("-end")[0]
# Two intervals overlap iff each interval ends after the other
# begins. In this case, we just extend the old interval to
# include the new interval.
if log_time <= last.end and effective_end >= last.start:
last.end = max(last.end, effective_end)
last.start = min(last.start, log_time)
last.save(update_fields=["start", "end"])
return
except IndexError:
pass
# Otherwise, the intervals don't overlap, so we should make a new one
UserActivityInterval.objects.create(
user_profile=user_profile, start=log_time, end=effective_end
)
@statsd_increment("user_activity")
def do_update_user_activity(
user_profile_id: int, client_id: int, query: str, count: int, log_time: datetime.datetime
) -> None:
(activity, created) = UserActivity.objects.get_or_create(
user_profile_id=user_profile_id,
client_id=client_id,
query=query,
defaults={"last_visit": log_time, "count": count},
)
if not created:
activity.count += count
activity.last_visit = log_time
activity.save(update_fields=["last_visit", "count"])
def send_presence_changed(user_profile: UserProfile, presence: UserPresence) -> None:
# Most presence data is sent to clients in the main presence
# endpoint in response to the user's own presence; this results
# data that is 1-2 minutes stale for who is online. The flaw with
# this plan is when a user comes back online and then immediately
# sends a message, recipients may still see that user as offline!
# We solve that by sending an immediate presence update clients.
#
# See https://zulip.readthedocs.io/en/latest/subsystems/presence.html for
# internals documentation on presence.
user_ids = active_user_ids(user_profile.realm_id)
if len(user_ids) > settings.USER_LIMIT_FOR_SENDING_PRESENCE_UPDATE_EVENTS:
# These immediate presence generate quadratic work for Tornado
# (linear number of users in each event and the frequency of
# users coming online grows linearly with userbase too). In
# organizations with thousands of users, this can overload
# Tornado, especially if much of the realm comes online at the
# same time.
#
# The utility of these live-presence updates goes down as
# organizations get bigger (since one is much less likely to
# be paying attention to the sidebar); so beyond a limit, we
# stop sending them at all.
return
presence_dict = presence.to_dict()
event = dict(
type="presence",
email=user_profile.email,
user_id=user_profile.id,
server_timestamp=time.time(),
presence={presence_dict["client"]: presence_dict},
)
send_event(user_profile.realm, event, user_ids)
def consolidate_client(client: Client) -> Client:
# The web app reports a client as 'website'
# The desktop app reports a client as ZulipDesktop
# due to it setting a custom user agent. We want both
# to count as web users
# Alias ZulipDesktop to website
if client.name in ["ZulipDesktop"]:
return get_client("website")
else:
return client
@statsd_increment("user_presence")
def do_update_user_presence(
user_profile: UserProfile, client: Client, log_time: datetime.datetime, status: int
) -> None:
client = consolidate_client(client)
defaults = dict(
timestamp=log_time,
status=status,
realm_id=user_profile.realm_id,
)
(presence, created) = UserPresence.objects.get_or_create(
user_profile=user_profile,
client=client,
defaults=defaults,
)
stale_status = (log_time - presence.timestamp) > datetime.timedelta(minutes=1, seconds=10)
was_idle = presence.status == UserPresence.IDLE
became_online = (status == UserPresence.ACTIVE) and (stale_status or was_idle)
# If an object was created, it has already been saved.
#
# We suppress changes from ACTIVE to IDLE before stale_status is reached;
# this protects us from the user having two clients open: one active, the
# other idle. Without this check, we would constantly toggle their status
# between the two states.
if not created and stale_status or was_idle or status == presence.status:
# The following block attempts to only update the "status"
# field in the event that it actually changed. This is
# important to avoid flushing the UserPresence cache when the
# data it would return to a client hasn't actually changed
# (see the UserPresence post_save hook for details).
presence.timestamp = log_time
update_fields = ["timestamp"]
if presence.status != status:
presence.status = status
update_fields.append("status")
presence.save(update_fields=update_fields)
if not user_profile.realm.presence_disabled and (created or became_online):
send_presence_changed(user_profile, presence)
def update_user_activity_interval(user_profile: UserProfile, log_time: datetime.datetime) -> None:
event = {"user_profile_id": user_profile.id, "time": datetime_to_timestamp(log_time)}
queue_json_publish("user_activity_interval", event)
def update_user_presence(
user_profile: UserProfile,
client: Client,
log_time: datetime.datetime,
status: int,
new_user_input: bool,
) -> None:
event = {
"user_profile_id": user_profile.id,
"status": status,
"time": datetime_to_timestamp(log_time),
"client": client.name,
}
queue_json_publish("user_presence", event)
if new_user_input:
update_user_activity_interval(user_profile, log_time)
def do_update_user_status(
user_profile: UserProfile, away: Optional[bool], status_text: Optional[str], client_id: int
) -> None:
if away is None:
status = None
elif away:
status = UserStatus.AWAY
else:
status = UserStatus.NORMAL
realm = user_profile.realm
update_user_status(
user_profile_id=user_profile.id,
status=status,
status_text=status_text,
client_id=client_id,
)
event = dict(
type="user_status",
user_id=user_profile.id,
)
if away is not None:
event["away"] = away
if status_text is not None:
event["status_text"] = status_text
send_event(realm, event, active_user_ids(realm.id))
@dataclass
class ReadMessagesEvent:
messages: List[int]
all: bool
type: str = field(default="update_message_flags", init=False)
op: str = field(default="add", init=False)
operation: str = field(default="add", init=False)
flag: str = field(default="read", init=False)
def do_mark_all_as_read(user_profile: UserProfile, client: Client) -> int:
log_statsd_event("bankruptcy")
# First, we clear mobile push notifications. This is safer in the
# event that the below logic times out and we're killed.
all_push_message_ids = (
UserMessage.objects.filter(
user_profile=user_profile,
)
.extra(
where=[UserMessage.where_active_push_notification()],
)
.values_list("message_id", flat=True)[0:10000]
)
do_clear_mobile_push_notifications_for_ids([user_profile.id], all_push_message_ids)
msgs = UserMessage.objects.filter(user_profile=user_profile).extra(
where=[UserMessage.where_unread()],
)
count = msgs.update(
flags=F("flags").bitor(UserMessage.flags.read),
)
event = asdict(
ReadMessagesEvent(
messages=[], # we don't send messages, since the client reloads anyway
all=True,
)
)
event_time = timezone_now()
send_event(user_profile.realm, event, [user_profile.id])
do_increment_logging_stat(
user_profile, COUNT_STATS["messages_read::hour"], None, event_time, increment=count
)
do_increment_logging_stat(
user_profile,
COUNT_STATS["messages_read_interactions::hour"],
None,
event_time,
increment=min(1, count),
)
return count
def do_mark_stream_messages_as_read(
user_profile: UserProfile, stream_recipient_id: int, topic_name: Optional[str] = None
) -> int:
log_statsd_event("mark_stream_as_read")
msgs = UserMessage.objects.filter(
user_profile=user_profile,
)
msgs = msgs.filter(message__recipient_id=stream_recipient_id)
if topic_name:
msgs = filter_by_topic_name_via_message(
query=msgs,
topic_name=topic_name,
)
msgs = msgs.extra(
where=[UserMessage.where_unread()],
)
message_ids = list(msgs.values_list("message_id", flat=True))
count = msgs.update(
flags=F("flags").bitor(UserMessage.flags.read),
)
event = asdict(
ReadMessagesEvent(
messages=message_ids,
all=False,
)
)
event_time = timezone_now()
send_event(user_profile.realm, event, [user_profile.id])
do_clear_mobile_push_notifications_for_ids([user_profile.id], message_ids)
do_increment_logging_stat(
user_profile, COUNT_STATS["messages_read::hour"], None, event_time, increment=count
)
do_increment_logging_stat(
user_profile,
COUNT_STATS["messages_read_interactions::hour"],
None,
event_time,
increment=min(1, count),
)
return count
def do_mark_muted_user_messages_as_read(
user_profile: UserProfile,
muted_user: UserProfile,
) -> int:
messages = UserMessage.objects.filter(
user_profile=user_profile, message__sender=muted_user
).extra(where=[UserMessage.where_unread()])
message_ids = list(messages.values_list("message_id", flat=True))
count = messages.update(
flags=F("flags").bitor(UserMessage.flags.read),
)
event = asdict(
ReadMessagesEvent(
messages=message_ids,
all=False,
)
)
event_time = timezone_now()
send_event(user_profile.realm, event, [user_profile.id])
do_clear_mobile_push_notifications_for_ids([user_profile.id], message_ids)
do_increment_logging_stat(
user_profile, COUNT_STATS["messages_read::hour"], None, event_time, increment=count
)
do_increment_logging_stat(
user_profile,
COUNT_STATS["messages_read_interactions::hour"],
None,
event_time,
increment=min(1, count),
)
return count
def do_update_mobile_push_notification(
message: Message,
prior_mention_user_ids: Set[int],
mentions_user_ids: Set[int],
stream_push_user_ids: Set[int],
) -> None:
# Called during the message edit code path to remove mobile push
# notifications for users who are no longer mentioned following
# the edit. See #15428 for details.
#
# A perfect implementation would also support updating the message
# in a sent notification if a message was edited to mention a
# group rather than a user (or vice versa), though it is likely
# not worth the effort to do such a change.
if not message.is_stream_message():
return
remove_notify_users = prior_mention_user_ids - mentions_user_ids - stream_push_user_ids
do_clear_mobile_push_notifications_for_ids(list(remove_notify_users), [message.id])
def do_clear_mobile_push_notifications_for_ids(
user_profile_ids: List[int], message_ids: List[int]
) -> None:
if len(message_ids) == 0:
return
# This function supports clearing notifications for several users
# only for the message-edit use case where we'll have a single message_id.
assert len(user_profile_ids) == 1 or len(message_ids) == 1
messages_by_user = defaultdict(list)
notifications_to_update = list(
UserMessage.objects.filter(
message_id__in=message_ids,
user_profile_id__in=user_profile_ids,
)
.extra(
where=[UserMessage.where_active_push_notification()],
)
.values_list("user_profile_id", "message_id")
)
for (user_id, message_id) in notifications_to_update:
messages_by_user[user_id].append(message_id)
for (user_profile_id, event_message_ids) in messages_by_user.items():
queue_json_publish(
"missedmessage_mobile_notifications",
{
"type": "remove",
"user_profile_id": user_profile_id,
"message_ids": event_message_ids,
},
)
def do_update_message_flags(
user_profile: UserProfile, client: Client, operation: str, flag: str, messages: List[int]
) -> int:
valid_flags = [item for item in UserMessage.flags if item not in UserMessage.NON_API_FLAGS]
if flag not in valid_flags:
raise JsonableError(_("Invalid flag: '{}'").format(flag))
if flag in UserMessage.NON_EDITABLE_FLAGS:
raise JsonableError(_("Flag not editable: '{}'").format(flag))
if operation not in ("add", "remove"):
raise JsonableError(_("Invalid message flag operation: '{}'").format(operation))
flagattr = getattr(UserMessage.flags, flag)
msgs = UserMessage.objects.filter(user_profile=user_profile, message_id__in=messages)
# This next block allows you to star any message, even those you
# didn't receive (e.g. because you're looking at a public stream
# you're not subscribed to, etc.). The problem is that starring
# is a flag boolean on UserMessage, and UserMessage rows are
# normally created only when you receive a message to support
# searching your personal history. So we need to create one. We
# add UserMessage.flags.historical, so that features that need
# "messages you actually received" can exclude these UserMessages.
if msgs.count() == 0:
if not len(messages) == 1:
raise JsonableError(_("Invalid message(s)"))
if flag != "starred":
raise JsonableError(_("Invalid message(s)"))
# Validate that the user could have read the relevant message
message = access_message(user_profile, messages[0])[0]
# OK, this is a message that you legitimately have access
# to via narrowing to the stream it is on, even though you
# didn't actually receive it. So we create a historical,
# read UserMessage message row for you to star.
UserMessage.objects.create(
user_profile=user_profile,
message=message,
flags=UserMessage.flags.historical | UserMessage.flags.read,
)
if operation == "add":
count = msgs.update(flags=F("flags").bitor(flagattr))
elif operation == "remove":
count = msgs.update(flags=F("flags").bitand(~flagattr))
event = {
"type": "update_message_flags",
"op": operation,
"operation": operation,
"flag": flag,
"messages": messages,
"all": False,
}
send_event(user_profile.realm, event, [user_profile.id])
if flag == "read" and operation == "add":
event_time = timezone_now()
do_clear_mobile_push_notifications_for_ids([user_profile.id], messages)
do_increment_logging_stat(
user_profile, COUNT_STATS["messages_read::hour"], None, event_time, increment=count
)
do_increment_logging_stat(
user_profile,
COUNT_STATS["messages_read_interactions::hour"],
None,
event_time,
increment=min(1, count),
)
return count
class MessageUpdateUserInfoResult(TypedDict):
message_user_ids: Set[int]
mention_user_ids: Set[int]
RESOLVED_TOPIC_PREFIX = "✔ "
def maybe_send_resolve_topic_notifications(
*,
user_profile: UserProfile,
stream: Stream,
old_topic: str,
new_topic: str,
) -> None:
# Note that topics will have already been stripped in check_update_message.
#
# This logic is designed to treat removing a weird "✔ ✔✔ "
# prefix as unresolving the topic.
if old_topic.lstrip(RESOLVED_TOPIC_PREFIX) != new_topic.lstrip(RESOLVED_TOPIC_PREFIX):
return
if new_topic.startswith(RESOLVED_TOPIC_PREFIX) and not old_topic.startswith(
RESOLVED_TOPIC_PREFIX
):
notification_string = _("{user} has marked this topic as resolved.")
elif old_topic.startswith(RESOLVED_TOPIC_PREFIX) and not new_topic.startswith(
RESOLVED_TOPIC_PREFIX
):
notification_string = _("{user} has marked this topic as unresolved.")
else:
# If there's some other weird topic that does not toggle the
# state of "topic starts with RESOLVED_TOPIC_PREFIX", we do
# nothing. Any other logic could result in cases where we send
# these notifications in a non-alternating fashion.
#
# Note that it is still possible for an individual topic to
# have multiple "This topic was marked as resolved"
# notifications in a row: one can send new messages to the
# pre-resolve topic and then resolve the topic created that
# way to get multiple in the resolved topic. And then an
# administrator can the messages in between. We consider this
# to be a fundamental risk of irresponsible message deletion,
# not a bug with the "resolve topics" feature.
return
sender = get_system_bot(settings.NOTIFICATION_BOT)
user_mention = f"@_**{user_profile.full_name}|{user_profile.id}**"
with override_language(stream.realm.default_language):
internal_send_stream_message(
sender,
stream,
new_topic,
notification_string.format(
user=user_mention,
),
)
def send_message_moved_breadcrumbs(
user_profile: UserProfile,
old_stream: Stream,
old_topic: str,
old_thread_notification_string: Optional[str],
new_stream: Stream,
new_topic: Optional[str],
new_thread_notification_string: Optional[str],
) -> None:
# Since moving content between streams is highly disruptive,
# it's worth adding a couple tombstone messages showing what
# happened.
sender = get_system_bot(settings.NOTIFICATION_BOT)
if new_topic is None:
new_topic = old_topic
user_mention = f"@_**{user_profile.full_name}|{user_profile.id}**"
old_topic_link = f"#**{old_stream.name}>{old_topic}**"
new_topic_link = f"#**{new_stream.name}>{new_topic}**"
if new_thread_notification_string is not None:
with override_language(new_stream.realm.default_language):
internal_send_stream_message(
sender,
new_stream,
new_topic,
new_thread_notification_string.format(
old_location=old_topic_link,
user=user_mention,
),
)
if old_thread_notification_string is not None:
with override_language(old_stream.realm.default_language):
# Send a notification to the old stream that the topic was moved.
internal_send_stream_message(
sender,
old_stream,
old_topic,
old_thread_notification_string.format(
user=user_mention,
new_location=new_topic_link,
),
)
def get_user_info_for_message_updates(message_id: int) -> MessageUpdateUserInfoResult:
# We exclude UserMessage.flags.historical rows since those
# users did not receive the message originally, and thus
# probably are not relevant for reprocessed alert_words,
# mentions and similar rendering features. This may be a
# decision we change in the future.
query = UserMessage.objects.filter(
message=message_id,
flags=~UserMessage.flags.historical,
).values("user_profile_id", "flags")
rows = list(query)
message_user_ids = {row["user_profile_id"] for row in rows}
mask = UserMessage.flags.mentioned | UserMessage.flags.wildcard_mentioned
mention_user_ids = {row["user_profile_id"] for row in rows if int(row["flags"]) & mask}
return dict(
message_user_ids=message_user_ids,
mention_user_ids=mention_user_ids,
)
def update_user_message_flags(
rendering_result: MessageRenderingResult, ums: Iterable[UserMessage]
) -> None:
wildcard = rendering_result.mentions_wildcard
mentioned_ids = rendering_result.mentions_user_ids
ids_with_alert_words = rendering_result.user_ids_with_alert_words
changed_ums: Set[UserMessage] = set()
def update_flag(um: UserMessage, should_set: bool, flag: int) -> None:
if should_set:
if not (um.flags & flag):
um.flags |= flag
changed_ums.add(um)
else:
if um.flags & flag:
um.flags &= ~flag
changed_ums.add(um)
for um in ums:
has_alert_word = um.user_profile_id in ids_with_alert_words
update_flag(um, has_alert_word, UserMessage.flags.has_alert_word)
mentioned = um.user_profile_id in mentioned_ids
update_flag(um, mentioned, UserMessage.flags.mentioned)
update_flag(um, wildcard, UserMessage.flags.wildcard_mentioned)
for um in changed_ums:
um.save(update_fields=["flags"])
def update_to_dict_cache(
changed_messages: List[Message], realm_id: Optional[int] = None
) -> List[int]:
"""Updates the message as stored in the to_dict cache (for serving
messages)."""
items_for_remote_cache = {}
message_ids = []
changed_messages_to_dict = MessageDict.to_dict_uncached(changed_messages, realm_id)
for msg_id, msg in changed_messages_to_dict.items():
message_ids.append(msg_id)
key = to_dict_cache_key_id(msg_id)
items_for_remote_cache[key] = (msg,)
cache_set_many(items_for_remote_cache)
return message_ids
# We use transaction.atomic to support select_for_update in the attachment codepath.
@transaction.atomic
def do_update_embedded_data(
user_profile: UserProfile,
message: Message,
content: Optional[str],
rendering_result: MessageRenderingResult,
) -> None:
event: Dict[str, Any] = {"type": "update_message", "message_id": message.id}
changed_messages = [message]
rendered_content: Optional[str] = None
ums = UserMessage.objects.filter(message=message.id)
if content is not None:
update_user_message_flags(rendering_result, ums)
rendered_content = rendering_result.rendered_content
message.rendered_content = rendered_content
message.rendered_content_version = markdown_version
event["content"] = content
event["rendered_content"] = rendered_content
message.save(update_fields=["content", "rendered_content"])
event["message_ids"] = update_to_dict_cache(changed_messages)
def user_info(um: UserMessage) -> Dict[str, Any]:
return {
"id": um.user_profile_id,
"flags": um.flags_list(),
}
send_event(user_profile.realm, event, list(map(user_info, ums)))
class DeleteMessagesEvent(TypedDict, total=False):
type: str
message_ids: List[int]
message_type: str
sender_id: int
recipient_id: int
topic: str
stream_id: int
# We use transaction.atomic to support select_for_update in the attachment codepath.
@transaction.atomic
def do_update_message(
user_profile: UserProfile,
target_message: Message,
new_stream: Optional[Stream],
topic_name: Optional[str],
propagate_mode: str,
send_notification_to_old_thread: bool,
send_notification_to_new_thread: bool,
content: Optional[str],
rendering_result: Optional[MessageRenderingResult],
prior_mention_user_ids: Set[int],
mention_data: Optional[MentionData] = None,
) -> int:
"""
The main function for message editing. A message edit event can
modify:
* the message's content (in which case the caller will have
set both content and rendered_content),
* the topic, in which case the caller will have set topic_name
* or both
With topic edits, propagate_mode determines whether other message
also have their topics edited.
"""
timestamp = timezone_now()
target_message.last_edit_time = timestamp
event: Dict[str, Any] = {
"type": "update_message",
"user_id": user_profile.id,
"edit_timestamp": datetime_to_timestamp(timestamp),
"message_id": target_message.id,
}
edit_history_event: Dict[str, Any] = {
"user_id": user_profile.id,
"timestamp": event["edit_timestamp"],
}
changed_messages = [target_message]
realm = user_profile.realm
stream_being_edited = None
if target_message.is_stream_message():
stream_id = target_message.recipient.type_id
stream_being_edited = get_stream_by_id_in_realm(stream_id, realm)
event["stream_name"] = stream_being_edited.name
ums = UserMessage.objects.filter(message=target_message.id)
if content is not None:
assert rendering_result is not None
# mention_data is required if there's a content edit.
assert mention_data is not None
# add data from group mentions to mentions_user_ids.
for group_id in rendering_result.mentions_user_group_ids:
members = mention_data.get_group_members(group_id)
rendering_result.mentions_user_ids.update(members)
update_user_message_flags(rendering_result, ums)
# One could imagine checking realm.allow_edit_history here and
# modifying the events based on that setting, but doing so
# doesn't really make sense. We need to send the edit event
# to clients regardless, and a client already had access to
# the original/pre-edit content of the message anyway. That
# setting must be enforced on the client side, and making a
# change here simply complicates the logic for clients parsing
# edit history events.
event["orig_content"] = target_message.content
event["orig_rendered_content"] = target_message.rendered_content
edit_history_event["prev_content"] = target_message.content
edit_history_event["prev_rendered_content"] = target_message.rendered_content
edit_history_event[
"prev_rendered_content_version"
] = target_message.rendered_content_version
target_message.content = content
target_message.rendered_content = rendering_result.rendered_content
target_message.rendered_content_version = markdown_version
event["content"] = content
event["rendered_content"] = rendering_result.rendered_content
event["prev_rendered_content_version"] = target_message.rendered_content_version
event["is_me_message"] = Message.is_status_message(
content, rendering_result.rendered_content
)
# target_message.has_image and target_message.has_link will have been
# already updated by Markdown rendering in the caller.
target_message.has_attachment = check_attachment_reference_change(
target_message, rendering_result
)
if target_message.is_stream_message():
if topic_name is not None:
new_topic_name = topic_name
else:
new_topic_name = target_message.topic_name()
stream_topic: Optional[StreamTopicTarget] = StreamTopicTarget(
stream_id=stream_id,
topic_name=new_topic_name,
)
else:
stream_topic = None
info = get_recipient_info(
realm_id=realm.id,
recipient=target_message.recipient,
sender_id=target_message.sender_id,
stream_topic=stream_topic,
possible_wildcard_mention=mention_data.message_has_wildcards(),
)
event["online_push_user_ids"] = list(info["online_push_user_ids"])
event["stream_push_user_ids"] = list(info["stream_push_user_ids"])
event["stream_email_user_ids"] = list(info["stream_email_user_ids"])
event["muted_sender_user_ids"] = list(info["muted_sender_user_ids"])
event["prior_mention_user_ids"] = list(prior_mention_user_ids)
event["presence_idle_user_ids"] = filter_presence_idle_user_ids(info["active_user_ids"])
if rendering_result.mentions_wildcard:
event["wildcard_mention_user_ids"] = list(info["wildcard_mention_user_ids"])
else:
event["wildcard_mention_user_ids"] = []
do_update_mobile_push_notification(
target_message,
prior_mention_user_ids,
rendering_result.mentions_user_ids,
info["stream_push_user_ids"],
)
if topic_name is not None or new_stream is not None:
orig_topic_name = target_message.topic_name()
event["propagate_mode"] = propagate_mode
event["stream_id"] = target_message.recipient.type_id
if new_stream is not None:
assert content is None
assert target_message.is_stream_message()
assert stream_being_edited is not None
edit_history_event["prev_stream"] = stream_being_edited.id
event[ORIG_TOPIC] = orig_topic_name
target_message.recipient_id = new_stream.recipient_id
event["new_stream_id"] = new_stream.id
event["propagate_mode"] = propagate_mode
# When messages are moved from one stream to another, some
# users may lose access to those messages, including guest
# users and users not subscribed to the new stream (if it is a
# private stream). For those users, their experience is as
# though the messages were deleted, and we should send a
# delete_message event to them instead.
subs_to_old_stream = get_active_subscriptions_for_stream_id(
stream_id, include_deactivated_users=True
).select_related("user_profile")
subs_to_new_stream = list(
get_active_subscriptions_for_stream_id(
new_stream.id, include_deactivated_users=True
).select_related("user_profile")
)
old_stream_sub_ids = [user.user_profile_id for user in subs_to_old_stream]
new_stream_sub_ids = [user.user_profile_id for user in subs_to_new_stream]
# Get users who aren't subscribed to the new_stream.
subs_losing_usermessages = [
sub for sub in subs_to_old_stream if sub.user_profile_id not in new_stream_sub_ids
]
# Users who can longer access the message without some action
# from administrators.
subs_losing_access = [
sub
for sub in subs_losing_usermessages
if sub.user_profile.is_guest or not new_stream.is_public()
]
ums = ums.exclude(
user_profile_id__in=[sub.user_profile_id for sub in subs_losing_usermessages]
)
subs_gaining_usermessages = []
if not new_stream.is_history_public_to_subscribers():
# For private streams, with history not public to subscribers,
# We find out users who are not present in the msgs' old stream
# and create new UserMessage for these users so that they can
# access this message.
subs_gaining_usermessages += [
user_id for user_id in new_stream_sub_ids if user_id not in old_stream_sub_ids
]
if topic_name is not None:
topic_name = truncate_topic(topic_name)
target_message.set_topic_name(topic_name)
# These fields have legacy field names.
event[ORIG_TOPIC] = orig_topic_name
event[TOPIC_NAME] = topic_name
event[TOPIC_LINKS] = topic_links(target_message.sender.realm_id, topic_name)
edit_history_event[LEGACY_PREV_TOPIC] = orig_topic_name
update_edit_history(target_message, timestamp, edit_history_event)
delete_event_notify_user_ids: List[int] = []
if propagate_mode in ["change_later", "change_all"]:
assert topic_name is not None or new_stream is not None
assert stream_being_edited is not None
# Other messages should only get topic/stream fields in their edit history.
topic_only_edit_history_event = {
k: v
for (k, v) in edit_history_event.items()
if k
not in [
"prev_content",
"prev_rendered_content",
"prev_rendered_content_version",
]
}
messages_list = update_messages_for_topic_edit(
acting_user=user_profile,
edited_message=target_message,
propagate_mode=propagate_mode,
orig_topic_name=orig_topic_name,
topic_name=topic_name,
new_stream=new_stream,
old_stream=stream_being_edited,
edit_history_event=topic_only_edit_history_event,
last_edit_time=timestamp,
)
changed_messages += messages_list
if new_stream is not None:
assert stream_being_edited is not None
changed_message_ids = [msg.id for msg in changed_messages]
if subs_gaining_usermessages:
ums_to_create = []
for message_id in changed_message_ids:
for user_profile_id in subs_gaining_usermessages:
# The fact that the user didn't have a UserMessage originally means we can infer that the user
# was not mentioned in the original message (even if mention syntax was present, it would not
# take effect for a user who was not subscribed). If we were editing the message's content, we
# would rerender the message and then use the new stream's data to determine whether this is
# a mention of a subscriber; but as we are not doing so, we choose to preserve the "was this
# mention syntax an actual mention" decision made during the original rendering for implementation
# simplicity. As a result, the only flag to consider applying here is read.
um = UserMessageLite(
user_profile_id=user_profile_id,
message_id=message_id,
flags=UserMessage.flags.read,
)
ums_to_create.append(um)
bulk_insert_ums(ums_to_create)
# Delete UserMessage objects for users who will no
# longer have access to these messages. Note: This could be
# very expensive, since it's N guest users x M messages.
UserMessage.objects.filter(
user_profile_id__in=[sub.user_profile_id for sub in subs_losing_usermessages],
message_id__in=changed_message_ids,
).delete()
delete_event: DeleteMessagesEvent = {
"type": "delete_message",
"message_ids": changed_message_ids,
"message_type": "stream",
"stream_id": stream_being_edited.id,
"topic": orig_topic_name,
}
delete_event_notify_user_ids = [sub.user_profile_id for sub in subs_losing_access]
send_event(user_profile.realm, delete_event, delete_event_notify_user_ids)
# This does message.save(update_fields=[...])
save_message_for_edit_use_case(message=target_message)
realm_id: Optional[int] = None
if stream_being_edited is not None:
realm_id = stream_being_edited.realm_id
event["message_ids"] = update_to_dict_cache(changed_messages, realm_id)
def user_info(um: UserMessage) -> Dict[str, Any]:
return {
"id": um.user_profile_id,
"flags": um.flags_list(),
}
# The following blocks arranges that users who are subscribed to a
# stream and can see history from before they subscribed get
# live-update when old messages are edited (e.g. if the user does
# a topic edit themself).
#
# We still don't send an update event to users who are not
# subscribed to this stream and don't have a UserMessage row. This
# means if a non-subscriber is viewing the narrow, they won't get
# a real-time updates. This is a balance between sending
# message-edit notifications for every public stream to every user
# in the organization (too expansive, and also not what we do for
# newly sent messages anyway) and having magical live-updates
# where possible.
users_to_be_notified = list(map(user_info, ums))
if stream_being_edited is not None:
if stream_being_edited.is_history_public_to_subscribers:
subscriptions = get_active_subscriptions_for_stream_id(
stream_id, include_deactivated_users=False
)
# We exclude long-term idle users, since they by
# definition have no active clients.
subscriptions = subscriptions.exclude(user_profile__long_term_idle=True)
# Remove duplicates by excluding the id of users already
# in users_to_be_notified list. This is the case where a
# user both has a UserMessage row and is a current
# Subscriber
subscriptions = subscriptions.exclude(
user_profile_id__in=[um.user_profile_id for um in ums]
)
if new_stream is not None:
assert delete_event_notify_user_ids is not None
subscriptions = subscriptions.exclude(
user_profile_id__in=delete_event_notify_user_ids
)
# All users that are subscribed to the stream must be
# notified when a message is edited
subscriber_ids = set(subscriptions.values_list("user_profile_id", flat=True))
if new_stream is not None:
# TODO: Guest users don't see the new moved topic
# unless breadcrumb message for new stream is
# enabled. Excluding these users from receiving this
# event helps us avoid a error trackeback for our
# clients. We should figure out a way to inform the
# guest users of this new topic if sending a 'message'
# event for these messages is not an option.
#
# Don't send this event to guest subs who are not
# subscribers of the old stream but are subscribed to
# the new stream; clients will be confused.
old_stream_unsubbed_guests = [
sub
for sub in subs_to_new_stream
if sub.user_profile.is_guest and sub.user_profile_id not in subscriber_ids
]
subscriptions = subscriptions.exclude(
user_profile_id__in=[sub.user_profile_id for sub in old_stream_unsubbed_guests]
)
subscriber_ids = set(subscriptions.values_list("user_profile_id", flat=True))
users_to_be_notified += list(map(subscriber_info, sorted(list(subscriber_ids))))
send_event(user_profile.realm, event, users_to_be_notified)
if len(changed_messages) > 0 and new_stream is not None and stream_being_edited is not None:
# Notify users that the topic was moved.
old_thread_notification_string = None
if send_notification_to_old_thread:
old_thread_notification_string = _("This topic was moved by {user} to {new_location}")
new_thread_notification_string = None
if send_notification_to_new_thread:
new_thread_notification_string = _(
"This topic was moved here from {old_location} by {user}"
)
send_message_moved_breadcrumbs(
user_profile,
stream_being_edited,
orig_topic_name,
old_thread_notification_string,
new_stream,
topic_name,
new_thread_notification_string,
)
if (
topic_name is not None
and new_stream is None
and content is None
and len(changed_messages) > 0
):
assert stream_being_edited is not None
maybe_send_resolve_topic_notifications(
user_profile=user_profile,
stream=stream_being_edited,
old_topic=orig_topic_name,
new_topic=topic_name,
)
return len(changed_messages)
def do_delete_messages(realm: Realm, messages: Iterable[Message]) -> None:
# messages in delete_message event belong to the same topic
# or is a single private message, as any other behaviour is not possible with
# the current callers to this method.
messages = list(messages)
message_ids = [message.id for message in messages]
if not message_ids:
return
event: DeleteMessagesEvent = {
"type": "delete_message",
"message_ids": message_ids,
}
sample_message = messages[0]
message_type = "stream"
users_to_notify = []
if not sample_message.is_stream_message():
assert len(messages) == 1
message_type = "private"
ums = UserMessage.objects.filter(message_id__in=message_ids)
users_to_notify = [um.user_profile_id for um in ums]
# TODO: We should plan to remove `sender_id` here.
event["recipient_id"] = sample_message.recipient_id
event["sender_id"] = sample_message.sender_id
archiving_chunk_size = retention.MESSAGE_BATCH_SIZE
if message_type == "stream":
stream_id = sample_message.recipient.type_id
event["stream_id"] = stream_id
event["topic"] = sample_message.topic_name()
subscriptions = get_active_subscriptions_for_stream_id(
stream_id, include_deactivated_users=False
)
# We exclude long-term idle users, since they by definition have no active clients.
subscriptions = subscriptions.exclude(user_profile__long_term_idle=True)
users_to_notify = list(subscriptions.values_list("user_profile_id", flat=True))
archiving_chunk_size = retention.STREAM_MESSAGE_BATCH_SIZE
move_messages_to_archive(message_ids, realm=realm, chunk_size=archiving_chunk_size)
event["message_type"] = message_type
transaction.on_commit(lambda: send_event(realm, event, users_to_notify))
def do_delete_messages_by_sender(user: UserProfile) -> None:
message_ids = list(
Message.objects.filter(sender=user).values_list("id", flat=True).order_by("id")
)
if message_ids:
move_messages_to_archive(message_ids, chunk_size=retention.STREAM_MESSAGE_BATCH_SIZE)
def get_streams_traffic(stream_ids: Set[int]) -> Dict[int, int]:
stat = COUNT_STATS["messages_in_stream:is_bot:day"]
traffic_from = timezone_now() - datetime.timedelta(days=28)
query = StreamCount.objects.filter(property=stat.property, end_time__gt=traffic_from)
query = query.filter(stream_id__in=stream_ids)
traffic_list = query.values("stream_id").annotate(value=Sum("value"))
traffic_dict = {}
for traffic in traffic_list:
traffic_dict[traffic["stream_id"]] = traffic["value"]
return traffic_dict
def round_to_2_significant_digits(number: int) -> int:
return int(round(number, 2 - len(str(number))))
STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS = 7
def get_average_weekly_stream_traffic(
stream_id: int, stream_date_created: datetime.datetime, recent_traffic: Dict[int, int]
) -> Optional[int]:
try:
stream_traffic = recent_traffic[stream_id]
except KeyError:
stream_traffic = 0
stream_age = (timezone_now() - stream_date_created).days
if stream_age >= 28:
average_weekly_traffic = int(stream_traffic // 4)
elif stream_age >= STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS:
average_weekly_traffic = int(stream_traffic * 7 // stream_age)
else:
return None
if average_weekly_traffic == 0 and stream_traffic > 0:
average_weekly_traffic = 1
return round_to_2_significant_digits(average_weekly_traffic)
def get_web_public_subs(realm: Realm) -> SubscriptionInfo:
color_idx = 0
def get_next_color() -> str:
nonlocal color_idx
color = STREAM_ASSIGNMENT_COLORS[color_idx]
color_idx = (color_idx + 1) % len(STREAM_ASSIGNMENT_COLORS)
return color
subscribed = []
for stream in Stream.objects.filter(realm=realm, is_web_public=True, deactivated=False):
stream_dict = stream.to_dict()
# Add versions of the Subscription fields based on a simulated
# new user subscription set.
stream_dict["is_muted"] = False
stream_dict["color"] = get_next_color()
stream_dict["desktop_notifications"] = True
stream_dict["audible_notifications"] = True
stream_dict["push_notifications"] = True
stream_dict["email_notifications"] = True
stream_dict["pin_to_top"] = False
stream_weekly_traffic = get_average_weekly_stream_traffic(
stream.id, stream.date_created, {}
)
stream_dict["stream_weekly_traffic"] = stream_weekly_traffic
stream_dict["email_address"] = ""
subscribed.append(stream_dict)
return SubscriptionInfo(
subscriptions=subscribed,
unsubscribed=[],
never_subscribed=[],
)
def build_stream_dict_for_sub(
user: UserProfile,
sub: Subscription,
stream: Stream,
recent_traffic: Dict[int, int],
) -> Dict[str, object]:
# We first construct a dictionary based on the standard Stream
# and Subscription models' API_FIELDS.
result = {}
for field_name in Stream.API_FIELDS:
if field_name == "id":
result["stream_id"] = stream["id"]
continue
elif field_name == "date_created":
result["date_created"] = datetime_to_timestamp(stream[field_name])
continue
result[field_name] = stream[field_name]
# Copy Subscription.API_FIELDS.
for field_name in Subscription.API_FIELDS:
result[field_name] = sub[field_name]
# Backwards-compatibility for clients that haven't been
# updated for the in_home_view => is_muted API migration.
result["in_home_view"] = not result["is_muted"]
# Backwards-compatibility for clients that haven't been
# updated for the is_announcement_only -> stream_post_policy
# migration.
result["is_announcement_only"] = (
stream["stream_post_policy"] == Stream.STREAM_POST_POLICY_ADMINS
)
# Add a few computed fields not directly from the data models.
result["stream_weekly_traffic"] = get_average_weekly_stream_traffic(
stream["id"], stream["date_created"], recent_traffic
)
result["email_address"] = encode_email_address_helper(
stream["name"], stream["email_token"], show_sender=True
)
# Our caller may add a subscribers field.
return result
def build_stream_dict_for_never_sub(
stream: Stream,
recent_traffic: Dict[int, int],
) -> Dict[str, object]:
result = {}
for field_name in Stream.API_FIELDS:
if field_name == "id":
result["stream_id"] = stream["id"]
continue
elif field_name == "date_created":
result["date_created"] = datetime_to_timestamp(stream[field_name])
continue
result[field_name] = stream[field_name]
result["stream_weekly_traffic"] = get_average_weekly_stream_traffic(
stream["id"], stream["date_created"], recent_traffic
)
# Backwards-compatibility addition of removed field.
result["is_announcement_only"] = (
stream["stream_post_policy"] == Stream.STREAM_POST_POLICY_ADMINS
)
# Our caller may add a subscribers field.
return result
# In general, it's better to avoid using .values() because it makes
# the code pretty ugly, but in this case, it has significant
# performance impact for loading / for users with large numbers of
# subscriptions, so it's worth optimizing.
def gather_subscriptions_helper(
user_profile: UserProfile,
include_subscribers: bool = True,
) -> SubscriptionInfo:
realm = user_profile.realm
all_streams = get_active_streams(realm).values(
*Stream.API_FIELDS,
# The realm_id and recipient_id are generally not needed in the API.
"realm_id",
"recipient_id",
# email_token isn't public to some users with access to
# the stream, so doesn't belong in API_FIELDS.
"email_token",
)
recip_id_to_stream_id = {stream["recipient_id"]: stream["id"] for stream in all_streams}
all_streams_map = {stream["id"]: stream for stream in all_streams}
sub_dicts = (
get_stream_subscriptions_for_user(user_profile)
.values(
*Subscription.API_FIELDS,
"recipient_id",
"active",
)
.order_by("recipient_id")
)
# We only care about subscriptions for active streams.
sub_dicts = [sub for sub in sub_dicts if recip_id_to_stream_id.get(sub["recipient_id"])]
def get_stream_id(sub: Subscription) -> int:
return recip_id_to_stream_id[sub["recipient_id"]]
traffic_stream_ids = {get_stream_id(sub) for sub in sub_dicts}
recent_traffic = get_streams_traffic(stream_ids=traffic_stream_ids)
# Okay, now we finally get to populating our main results, which
# will be these three lists.
subscribed = []
unsubscribed = []
never_subscribed = []
sub_unsub_stream_ids = set()
for sub in sub_dicts:
stream_id = get_stream_id(sub)
sub_unsub_stream_ids.add(stream_id)
stream = all_streams_map[stream_id]
stream_dict = build_stream_dict_for_sub(
user=user_profile,
sub=sub,
stream=stream,
recent_traffic=recent_traffic,
)
# is_active is represented in this structure by which list we include it in.
is_active = sub["active"]
if is_active:
subscribed.append(stream_dict)
else:
unsubscribed.append(stream_dict)
if user_profile.can_access_public_streams():
never_subscribed_stream_ids = set(all_streams_map) - sub_unsub_stream_ids
else:
web_public_stream_ids = {stream["id"] for stream in all_streams if stream["is_web_public"]}
never_subscribed_stream_ids = web_public_stream_ids - sub_unsub_stream_ids
never_subscribed_streams = [
all_streams_map[stream_id] for stream_id in never_subscribed_stream_ids
]
for stream in never_subscribed_streams:
is_public = not stream["invite_only"]
if is_public or user_profile.is_realm_admin:
stream_dict = build_stream_dict_for_never_sub(
stream=stream, recent_traffic=recent_traffic
)
never_subscribed.append(stream_dict)
if include_subscribers:
# The highly optimized bulk_get_subscriber_user_ids wants to know which
# streams we are subscribed to, for validation purposes, and it uses that
# info to know if it's allowed to find OTHER subscribers.
subscribed_stream_ids = {get_stream_id(sub) for sub in sub_dicts if sub["active"]}
subscriber_map = bulk_get_subscriber_user_ids(
all_streams,
user_profile,
subscribed_stream_ids,
)
for lst in [subscribed, unsubscribed, never_subscribed]:
for sub in lst:
sub["subscribers"] = subscriber_map[sub["stream_id"]]
return SubscriptionInfo(
subscriptions=sorted(subscribed, key=lambda x: x["name"]),
unsubscribed=sorted(unsubscribed, key=lambda x: x["name"]),
never_subscribed=sorted(never_subscribed, key=lambda x: x["name"]),
)
def gather_subscriptions(
user_profile: UserProfile,
include_subscribers: bool = False,
) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
helper_result = gather_subscriptions_helper(
user_profile,
include_subscribers=include_subscribers,
)
subscribed = helper_result.subscriptions
unsubscribed = helper_result.unsubscribed
if include_subscribers:
user_ids = set()
for subs in [subscribed, unsubscribed]:
for sub in subs:
if "subscribers" in sub:
for subscriber in sub["subscribers"]:
user_ids.add(subscriber)
email_dict = get_emails_from_user_ids(list(user_ids))
for subs in [subscribed, unsubscribed]:
for sub in subs:
if "subscribers" in sub:
sub["subscribers"] = sorted(
email_dict[user_id] for user_id in sub["subscribers"]
)
return (subscribed, unsubscribed)
def get_active_presence_idle_user_ids(
realm: Realm,
sender_id: int,
message_type: str,
active_users_data: List[UserMessageNotificationsData],
) -> List[int]:
"""
Given a list of active_user_ids, we build up a subset
of those users who fit these criteria:
* They are likely to need notifications.
* They are no longer "present" according to the
UserPresence table.
"""
if realm.presence_disabled:
return []
is_private_message = message_type == "private"
user_ids = set()
for user_data in active_users_data:
alerted = "has_alert_word" in user_data.flags
# We only need to know the presence idle state for a user if this message would be notifiable
# for them if they were indeed idle. Only including those users in the calculation below is a
# very important optimization for open communities with many inactive users.
if user_data.is_notifiable(is_private_message, sender_id, idle=True) or alerted:
user_ids.add(user_data.user_id)
return filter_presence_idle_user_ids(user_ids)
def filter_presence_idle_user_ids(user_ids: Set[int]) -> List[int]:
# Given a set of user IDs (the recipients of a message), accesses
# the UserPresence table to determine which of these users are
# currently idle and should potentially get email notifications
# (and push notifications with with
# user_profile.enable_online_push_notifications=False).
#
# We exclude any presence data from ZulipMobile for the purpose of
# triggering these notifications; the mobile app can more
# effectively do its own client-side filtering of notification
# sounds/etc. for the case that the user is actively doing a PM
# conversation in the app.
if not user_ids:
return []
# Matches presence.js constant
OFFLINE_THRESHOLD_SECS = 140
recent = timezone_now() - datetime.timedelta(seconds=OFFLINE_THRESHOLD_SECS)
rows = (
UserPresence.objects.filter(
user_profile_id__in=user_ids,
status=UserPresence.ACTIVE,
timestamp__gte=recent,
)
.exclude(client__name="ZulipMobile")
.distinct("user_profile_id")
.values("user_profile_id")
)
active_user_ids = {row["user_profile_id"] for row in rows}
idle_user_ids = user_ids - active_user_ids
return sorted(idle_user_ids)
def do_send_confirmation_email(
invitee: PreregistrationUser, referrer: UserProfile, email_language: str
) -> str:
"""
Send the confirmation/welcome e-mail to an invited user.
"""
activation_url = create_confirmation_link(invitee, Confirmation.INVITATION)
context = {
"referrer_full_name": referrer.full_name,
"referrer_email": referrer.delivery_email,
"activate_url": activation_url,
"referrer_realm_name": referrer.realm.name,
}
send_email(
"zerver/emails/invitation",
to_emails=[invitee.email],
from_address=FromAddress.tokenized_no_reply_address(),
language=email_language,
context=context,
realm=referrer.realm,
)
return activation_url
def email_not_system_bot(email: str) -> None:
if is_cross_realm_bot_email(email):
msg = email_reserved_for_system_bots_error(email)
code = msg
raise ValidationError(
msg,
code=code,
params=dict(deactivated=False),
)
def estimate_recent_invites(realms: Collection[Realm], *, days: int) -> int:
"""An upper bound on the number of invites sent in the last `days` days"""
recent_invites = RealmCount.objects.filter(
realm__in=realms,
property="invites_sent::day",
end_time__gte=timezone_now() - datetime.timedelta(days=days),
).aggregate(Sum("value"))["value__sum"]
if recent_invites is None:
return 0
return recent_invites
def check_invite_limit(realm: Realm, num_invitees: int) -> None:
"""Discourage using invitation emails as a vector for carrying spam."""
msg = _(
"You do not have enough remaining invites for today. "
"Please contact {email} to have your limit raised. "
"No invitations were sent."
).format(email=settings.ZULIP_ADMINISTRATOR)
if not settings.OPEN_REALM_CREATION:
return
recent_invites = estimate_recent_invites([realm], days=1)
if num_invitees + recent_invites > realm.max_invites:
raise InvitationError(msg, [], sent_invitations=False)
default_max = settings.INVITES_DEFAULT_REALM_DAILY_MAX
newrealm_age = datetime.timedelta(days=settings.INVITES_NEW_REALM_DAYS)
if realm.date_created <= timezone_now() - newrealm_age:
# If this isn't a "newly-created" realm, we're done. The
# remaining code applies an aggregate limit across all
# "new" realms, to address sudden bursts of spam realms.
return
if realm.max_invites > default_max:
# If a user is on a realm where we've bumped up
# max_invites, then we exempt them from invite limits.
return
new_realms = Realm.objects.filter(
date_created__gte=timezone_now() - newrealm_age,
_max_invites__lte=default_max,
).all()
for days, count in settings.INVITES_NEW_REALM_LIMIT_DAYS:
recent_invites = estimate_recent_invites(new_realms, days=days)
if num_invitees + recent_invites > count:
raise InvitationError(msg, [], sent_invitations=False)
def do_invite_users(
user_profile: UserProfile,
invitee_emails: Collection[str],
streams: Collection[Stream],
invite_as: int = PreregistrationUser.INVITE_AS["MEMBER"],
) -> None:
num_invites = len(invitee_emails)
check_invite_limit(user_profile.realm, num_invites)
if settings.BILLING_ENABLED:
from corporate.lib.registration import check_spare_licenses_available_for_inviting_new_users
check_spare_licenses_available_for_inviting_new_users(user_profile.realm, num_invites)
realm = user_profile.realm
if not realm.invite_required:
# Inhibit joining an open realm to send spam invitations.
min_age = datetime.timedelta(days=settings.INVITES_MIN_USER_AGE_DAYS)
if user_profile.date_joined > timezone_now() - min_age and not user_profile.is_realm_admin:
raise InvitationError(
_(
"Your account is too new to send invites for this organization. "
"Ask an organization admin, or a more experienced user."
),
[],
sent_invitations=False,
)
good_emails: Set[str] = set()
errors: List[Tuple[str, str, bool]] = []
validate_email_allowed_in_realm = get_realm_email_validator(user_profile.realm)
for email in invitee_emails:
if email == "":
continue
email_error = validate_email_is_valid(
email,
validate_email_allowed_in_realm,
)
if email_error:
errors.append((email, email_error, False))
else:
good_emails.add(email)
"""
good_emails are emails that look ok so far,
but we still need to make sure they're not
gonna conflict with existing users
"""
error_dict = get_existing_user_errors(user_profile.realm, good_emails)
skipped: List[Tuple[str, str, bool]] = []
for email in error_dict:
msg, deactivated = error_dict[email]
skipped.append((email, msg, deactivated))
good_emails.remove(email)
validated_emails = list(good_emails)
if errors:
raise InvitationError(
_("Some emails did not validate, so we didn't send any invitations."),
errors + skipped,
sent_invitations=False,
)
if skipped and len(skipped) == len(invitee_emails):
# All e-mails were skipped, so we didn't actually invite anyone.
raise InvitationError(
_("We weren't able to invite anyone."), skipped, sent_invitations=False
)
# We do this here rather than in the invite queue processor since this
# is used for rate limiting invitations, rather than keeping track of
# when exactly invitations were sent
do_increment_logging_stat(
user_profile.realm,
COUNT_STATS["invites_sent::day"],
None,
timezone_now(),
increment=len(validated_emails),
)
# Now that we are past all the possible errors, we actually create
# the PreregistrationUser objects and trigger the email invitations.
for email in validated_emails:
# The logged in user is the referrer.
prereg_user = PreregistrationUser(
email=email, referred_by=user_profile, invited_as=invite_as, realm=user_profile.realm
)
prereg_user.save()
stream_ids = [stream.id for stream in streams]
prereg_user.streams.set(stream_ids)
event = {
"prereg_id": prereg_user.id,
"referrer_id": user_profile.id,
"email_language": user_profile.realm.default_language,
}
queue_json_publish("invites", event)
if skipped:
raise InvitationError(
_(
"Some of those addresses are already using Zulip, "
"so we didn't send them an invitation. We did send "
"invitations to everyone else!"
),
skipped,
sent_invitations=True,
)
notify_invites_changed(user_profile)
def do_get_user_invites(user_profile: UserProfile) -> List[Dict[str, Any]]:
if user_profile.is_realm_admin:
prereg_users = filter_to_valid_prereg_users(
PreregistrationUser.objects.filter(referred_by__realm=user_profile.realm)
)
else:
prereg_users = filter_to_valid_prereg_users(
PreregistrationUser.objects.filter(referred_by=user_profile)
)
invites = []
for invitee in prereg_users:
invites.append(
dict(
email=invitee.email,
invited_by_user_id=invitee.referred_by.id,
invited=datetime_to_timestamp(invitee.invited_at),
id=invitee.id,
invited_as=invitee.invited_as,
is_multiuse=False,
)
)
if not user_profile.is_realm_admin:
# We do not return multiuse invites to non-admin users.
return invites
lowest_datetime = timezone_now() - datetime.timedelta(
days=settings.INVITATION_LINK_VALIDITY_DAYS
)
multiuse_confirmation_objs = Confirmation.objects.filter(
realm=user_profile.realm, type=Confirmation.MULTIUSE_INVITE, date_sent__gte=lowest_datetime
)
for confirmation_obj in multiuse_confirmation_objs:
invite = confirmation_obj.content_object
invites.append(
dict(
invited_by_user_id=invite.referred_by.id,
invited=datetime_to_timestamp(confirmation_obj.date_sent),
id=invite.id,
link_url=confirmation_url(
confirmation_obj.confirmation_key,
user_profile.realm,
Confirmation.MULTIUSE_INVITE,
),
invited_as=invite.invited_as,
is_multiuse=True,
)
)
return invites
def do_create_multiuse_invite_link(
referred_by: UserProfile, invited_as: int, streams: Sequence[Stream] = []
) -> str:
realm = referred_by.realm
invite = MultiuseInvite.objects.create(realm=realm, referred_by=referred_by)
if streams:
invite.streams.set(streams)
invite.invited_as = invited_as
invite.save()
notify_invites_changed(referred_by)
return create_confirmation_link(invite, Confirmation.MULTIUSE_INVITE)
def do_revoke_user_invite(prereg_user: PreregistrationUser) -> None:
email = prereg_user.email
# Delete both the confirmation objects and the prereg_user object.
# TODO: Probably we actually want to set the confirmation objects
# to a "revoked" status so that we can give the invited user a better
# error message.
content_type = ContentType.objects.get_for_model(PreregistrationUser)
Confirmation.objects.filter(content_type=content_type, object_id=prereg_user.id).delete()
prereg_user.delete()
clear_scheduled_invitation_emails(email)
notify_invites_changed(prereg_user)
def do_revoke_multi_use_invite(multiuse_invite: MultiuseInvite) -> None:
content_type = ContentType.objects.get_for_model(MultiuseInvite)
Confirmation.objects.filter(content_type=content_type, object_id=multiuse_invite.id).delete()
multiuse_invite.delete()
notify_invites_changed(multiuse_invite.referred_by)
def do_resend_user_invite_email(prereg_user: PreregistrationUser) -> int:
# These are two structurally for the caller's code path.
assert prereg_user.referred_by is not None
assert prereg_user.realm is not None
check_invite_limit(prereg_user.referred_by.realm, 1)
prereg_user.invited_at = timezone_now()
prereg_user.save()
do_increment_logging_stat(
prereg_user.realm, COUNT_STATS["invites_sent::day"], None, prereg_user.invited_at
)
clear_scheduled_invitation_emails(prereg_user.email)
# We don't store the custom email body, so just set it to None
event = {
"prereg_id": prereg_user.id,
"referrer_id": prereg_user.referred_by.id,
"email_language": prereg_user.referred_by.realm.default_language,
}
queue_json_publish("invites", event)
return datetime_to_timestamp(prereg_user.invited_at)
def notify_realm_emoji(realm: Realm) -> None:
event = dict(type="realm_emoji", op="update", realm_emoji=realm.get_emoji())
send_event(realm, event, active_user_ids(realm.id))
def check_add_realm_emoji(
realm: Realm, name: str, author: UserProfile, image_file: File
) -> Optional[RealmEmoji]:
realm_emoji = RealmEmoji(realm=realm, name=name, author=author)
realm_emoji.full_clean()
realm_emoji.save()
emoji_file_name = get_emoji_file_name(image_file.name, realm_emoji.id)
# The only user-controlled portion of 'emoji_file_name' is an extension,
# which can not contain '..' or '/' or '\', making it difficult to exploit
emoji_file_name = mark_sanitized(emoji_file_name)
emoji_uploaded_successfully = False
try:
upload_emoji_image(image_file, emoji_file_name, author)
emoji_uploaded_successfully = True
finally:
if not emoji_uploaded_successfully:
realm_emoji.delete()
return None
else:
realm_emoji.file_name = emoji_file_name
realm_emoji.save(update_fields=["file_name"])
notify_realm_emoji(realm_emoji.realm)
return realm_emoji
def do_remove_realm_emoji(realm: Realm, name: str) -> None:
emoji = RealmEmoji.objects.get(realm=realm, name=name, deactivated=False)
emoji.deactivated = True
emoji.save(update_fields=["deactivated"])
notify_realm_emoji(realm)
def notify_alert_words(user_profile: UserProfile, words: Sequence[str]) -> None:
event = dict(type="alert_words", alert_words=words)
send_event(user_profile.realm, event, [user_profile.id])
def do_add_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> None:
words = add_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, words)
def do_remove_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> None:
words = remove_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, words)
def do_mute_topic(
user_profile: UserProfile,
stream: Stream,
topic: str,
date_muted: Optional[datetime.datetime] = None,
) -> None:
if date_muted is None:
date_muted = timezone_now()
add_topic_mute(user_profile, stream.id, stream.recipient_id, topic, date_muted)
event = dict(type="muted_topics", muted_topics=get_topic_mutes(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
def do_unmute_topic(user_profile: UserProfile, stream: Stream, topic: str) -> None:
remove_topic_mute(user_profile, stream.id, topic)
event = dict(type="muted_topics", muted_topics=get_topic_mutes(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
def do_mute_user(
user_profile: UserProfile,
muted_user: UserProfile,
date_muted: Optional[datetime.datetime] = None,
) -> None:
if date_muted is None:
date_muted = timezone_now()
add_user_mute(user_profile, muted_user, date_muted)
do_mark_muted_user_messages_as_read(user_profile, muted_user)
event = dict(type="muted_users", muted_users=get_user_mutes(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
RealmAuditLog.objects.create(
realm=user_profile.realm,
acting_user=user_profile,
modified_user=user_profile,
event_type=RealmAuditLog.USER_MUTED,
event_time=date_muted,
extra_data=orjson.dumps({"muted_user_id": muted_user.id}).decode(),
)
def do_unmute_user(mute_object: MutedUser) -> None:
user_profile = mute_object.user_profile
muted_user = mute_object.muted_user
mute_object.delete()
event = dict(type="muted_users", muted_users=get_user_mutes(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
RealmAuditLog.objects.create(
realm=user_profile.realm,
acting_user=user_profile,
modified_user=user_profile,
event_type=RealmAuditLog.USER_UNMUTED,
event_time=timezone_now(),
extra_data=orjson.dumps({"unmuted_user_id": muted_user.id}).decode(),
)
def do_mark_hotspot_as_read(user: UserProfile, hotspot: str) -> None:
UserHotspot.objects.get_or_create(user=user, hotspot=hotspot)
event = dict(type="hotspots", hotspots=get_next_hotspots(user))
send_event(user.realm, event, [user.id])
def notify_linkifiers(realm: Realm) -> None:
realm_linkifiers = linkifiers_for_realm(realm.id)
event = dict(type="realm_linkifiers", realm_linkifiers=realm_linkifiers)
send_event(realm, event, active_user_ids(realm.id))
# Below is code for backwards compatibility. The now deprecated
# "realm_filters" event-type is used by older clients, and uses
# tuples.
realm_filters = realm_filters_for_realm(realm.id)
event = dict(type="realm_filters", realm_filters=realm_filters)
send_event(realm, event, active_user_ids(realm.id))
# NOTE: Regexes must be simple enough that they can be easily translated to JavaScript
# RegExp syntax. In addition to JS-compatible syntax, the following features are available:
# * Named groups will be converted to numbered groups automatically
# * Inline-regex flags will be stripped, and where possible translated to RegExp-wide flags
def do_add_linkifier(realm: Realm, pattern: str, url_format_string: str) -> int:
pattern = pattern.strip()
url_format_string = url_format_string.strip()
linkifier = RealmFilter(realm=realm, pattern=pattern, url_format_string=url_format_string)
linkifier.full_clean()
linkifier.save()
notify_linkifiers(realm)
return linkifier.id
def do_remove_linkifier(
realm: Realm, pattern: Optional[str] = None, id: Optional[int] = None
) -> None:
if pattern is not None:
RealmFilter.objects.get(realm=realm, pattern=pattern).delete()
else:
RealmFilter.objects.get(realm=realm, id=id).delete()
notify_linkifiers(realm)
def do_update_linkifier(realm: Realm, id: int, pattern: str, url_format_string: str) -> None:
pattern = pattern.strip()
url_format_string = url_format_string.strip()
linkifier = RealmFilter.objects.get(realm=realm, id=id)
linkifier.pattern = pattern
linkifier.url_format_string = url_format_string
linkifier.full_clean()
linkifier.save(update_fields=["pattern", "url_format_string"])
notify_linkifiers(realm)
def get_emails_from_user_ids(user_ids: Sequence[int]) -> Dict[int, str]:
# We may eventually use memcached to speed this up, but the DB is fast.
return UserProfile.emails_from_ids(user_ids)
def do_add_realm_domain(realm: Realm, domain: str, allow_subdomains: bool) -> (RealmDomain):
realm_domain = RealmDomain.objects.create(
realm=realm, domain=domain, allow_subdomains=allow_subdomains
)
event = dict(
type="realm_domains",
op="add",
realm_domain=dict(
domain=realm_domain.domain, allow_subdomains=realm_domain.allow_subdomains
),
)
send_event(realm, event, active_user_ids(realm.id))
return realm_domain
def do_change_realm_domain(realm_domain: RealmDomain, allow_subdomains: bool) -> None:
realm_domain.allow_subdomains = allow_subdomains
realm_domain.save(update_fields=["allow_subdomains"])
event = dict(
type="realm_domains",
op="change",
realm_domain=dict(
domain=realm_domain.domain, allow_subdomains=realm_domain.allow_subdomains
),
)
send_event(realm_domain.realm, event, active_user_ids(realm_domain.realm_id))
def do_remove_realm_domain(
realm_domain: RealmDomain, *, acting_user: Optional[UserProfile]
) -> None:
realm = realm_domain.realm
domain = realm_domain.domain
realm_domain.delete()
if RealmDomain.objects.filter(realm=realm).count() == 0 and realm.emails_restricted_to_domains:
# If this was the last realm domain, we mark the realm as no
# longer restricted to domain, because the feature doesn't do
# anything if there are no domains, and this is probably less
# confusing than the alternative.
do_set_realm_property(realm, "emails_restricted_to_domains", False, acting_user=acting_user)
event = dict(type="realm_domains", op="remove", domain=domain)
send_event(realm, event, active_user_ids(realm.id))
def notify_realm_playgrounds(realm: Realm) -> None:
event = dict(type="realm_playgrounds", realm_playgrounds=get_realm_playgrounds(realm))
send_event(realm, event, active_user_ids(realm.id))
def do_add_realm_playground(realm: Realm, **kwargs: Any) -> int:
realm_playground = RealmPlayground(realm=realm, **kwargs)
# We expect full_clean to always pass since a thorough input validation
# is performed in the view (using check_url, check_pygments_language, etc)
# before calling this function.
realm_playground.full_clean()
realm_playground.save()
notify_realm_playgrounds(realm)
return realm_playground.id
def do_remove_realm_playground(realm: Realm, realm_playground: RealmPlayground) -> None:
realm_playground.delete()
notify_realm_playgrounds(realm)
def get_occupied_streams(realm: Realm) -> QuerySet:
# TODO: Make a generic stub for QuerySet
"""Get streams with subscribers"""
exists_expression = Exists(
Subscription.objects.filter(
active=True,
is_user_active=True,
user_profile__realm=realm,
recipient_id=OuterRef("recipient_id"),
),
)
occupied_streams = (
Stream.objects.filter(realm=realm, deactivated=False)
.annotate(occupied=exists_expression)
.filter(occupied=True)
)
return occupied_streams
def get_web_public_streams(realm: Realm) -> List[Dict[str, Any]]:
query = Stream.objects.filter(realm=realm, deactivated=False, is_web_public=True)
streams = Stream.get_client_data(query)
return streams
def do_get_streams(
user_profile: UserProfile,
include_public: bool = True,
include_web_public: bool = False,
include_subscribed: bool = True,
include_all_active: bool = False,
include_default: bool = False,
include_owner_subscribed: bool = False,
) -> List[Dict[str, Any]]:
# This function is only used by API clients now.
if include_all_active and not user_profile.is_realm_admin:
raise JsonableError(_("User not authorized for this query"))
include_public = include_public and user_profile.can_access_public_streams()
# Start out with all active streams in the realm.
query = Stream.objects.filter(realm=user_profile.realm, deactivated=False)
if include_all_active:
streams = Stream.get_client_data(query)
else:
# We construct a query as the or (|) of the various sources
# this user requested streams from.
query_filter: Optional[Q] = None
def add_filter_option(option: Q) -> None:
nonlocal query_filter
if query_filter is None:
query_filter = option
else:
query_filter |= option
if include_subscribed:
subscribed_stream_ids = get_subscribed_stream_ids_for_user(user_profile)
recipient_check = Q(id__in=set(subscribed_stream_ids))
add_filter_option(recipient_check)
if include_public:
invite_only_check = Q(invite_only=False)
add_filter_option(invite_only_check)
if include_web_public:
web_public_check = Q(is_web_public=True)
add_filter_option(web_public_check)
if include_owner_subscribed and user_profile.is_bot:
bot_owner = user_profile.bot_owner
assert bot_owner is not None
owner_stream_ids = get_subscribed_stream_ids_for_user(bot_owner)
owner_subscribed_check = Q(id__in=set(owner_stream_ids))
add_filter_option(owner_subscribed_check)
if query_filter is not None:
query = query.filter(query_filter)
streams = Stream.get_client_data(query)
else:
# Don't bother going to the database with no valid sources
streams = []
streams.sort(key=lambda elt: elt["name"])
if include_default:
is_default = {}
default_streams = get_default_streams_for_realm(user_profile.realm_id)
for default_stream in default_streams:
is_default[default_stream.id] = True
for stream in streams:
stream["is_default"] = is_default.get(stream["stream_id"], False)
return streams
def notify_attachment_update(
user_profile: UserProfile, op: str, attachment_dict: Dict[str, Any]
) -> None:
event = {
"type": "attachment",
"op": op,
"attachment": attachment_dict,
"upload_space_used": user_profile.realm.currently_used_upload_space_bytes(),
}
send_event(user_profile.realm, event, [user_profile.id])
def do_claim_attachments(message: Message, potential_path_ids: List[str]) -> bool:
claimed = False
for path_id in potential_path_ids:
user_profile = message.sender
is_message_realm_public = False
is_message_web_public = False
if message.is_stream_message():
stream = Stream.objects.get(id=message.recipient.type_id)
is_message_realm_public = stream.is_public()
is_message_web_public = stream.is_web_public
if not validate_attachment_request(user_profile, path_id):
# Technically, there are 2 cases here:
# * The user put something in their message that has the form
# of an upload, but doesn't correspond to a file that doesn't
# exist. validate_attachment_request will return None.
# * The user is trying to send a link to a file they don't have permission to
# access themselves. validate_attachment_request will return False.
#
# Either case is unusual and suggests a UI bug that got
# the user in this situation, so we log in these cases.
logging.warning(
"User %s tried to share upload %s in message %s, but lacks permission",
user_profile.id,
path_id,
message.id,
)
continue
claimed = True
attachment = claim_attachment(
user_profile, path_id, message, is_message_realm_public, is_message_web_public
)
notify_attachment_update(user_profile, "update", attachment.to_dict())
return claimed
def do_delete_old_unclaimed_attachments(weeks_ago: int) -> None:
old_unclaimed_attachments = get_old_unclaimed_attachments(weeks_ago)
for attachment in old_unclaimed_attachments:
delete_message_image(attachment.path_id)
attachment.delete()
def check_attachment_reference_change(
message: Message, rendering_result: MessageRenderingResult
) -> bool:
# For a unsaved message edit (message.* has been updated, but not
# saved to the database), adjusts Attachment data to correspond to
# the new content.
prev_attachments = {a.path_id for a in message.attachment_set.all()}
new_attachments = set(rendering_result.potential_attachment_path_ids)
if new_attachments == prev_attachments:
return bool(prev_attachments)
to_remove = list(prev_attachments - new_attachments)
if len(to_remove) > 0:
attachments_to_update = Attachment.objects.filter(path_id__in=to_remove).select_for_update()
message.attachment_set.remove(*attachments_to_update)
to_add = list(new_attachments - prev_attachments)
if len(to_add) > 0:
do_claim_attachments(message, to_add)
return message.attachment_set.exists()
def notify_realm_custom_profile_fields(realm: Realm) -> None:
fields = custom_profile_fields_for_realm(realm.id)
event = dict(type="custom_profile_fields", fields=[f.as_dict() for f in fields])
send_event(realm, event, active_user_ids(realm.id))
def try_add_realm_default_custom_profile_field(
realm: Realm, field_subtype: str
) -> CustomProfileField:
field_data = DEFAULT_EXTERNAL_ACCOUNTS[field_subtype]
custom_profile_field = CustomProfileField(
realm=realm,
name=field_data["name"],
field_type=CustomProfileField.EXTERNAL_ACCOUNT,
hint=field_data["hint"],
field_data=orjson.dumps(dict(subtype=field_subtype)).decode(),
)
custom_profile_field.save()
custom_profile_field.order = custom_profile_field.id
custom_profile_field.save(update_fields=["order"])
notify_realm_custom_profile_fields(realm)
return custom_profile_field
def try_add_realm_custom_profile_field(
realm: Realm,
name: str,
field_type: int,
hint: str = "",
field_data: Optional[ProfileFieldData] = None,
) -> CustomProfileField:
custom_profile_field = CustomProfileField(realm=realm, name=name, field_type=field_type)
custom_profile_field.hint = hint
if (
custom_profile_field.field_type == CustomProfileField.SELECT
or custom_profile_field.field_type == CustomProfileField.EXTERNAL_ACCOUNT
):
custom_profile_field.field_data = orjson.dumps(field_data or {}).decode()
custom_profile_field.save()
custom_profile_field.order = custom_profile_field.id
custom_profile_field.save(update_fields=["order"])
notify_realm_custom_profile_fields(realm)
return custom_profile_field
def do_remove_realm_custom_profile_field(realm: Realm, field: CustomProfileField) -> None:
"""
Deleting a field will also delete the user profile data
associated with it in CustomProfileFieldValue model.
"""
field.delete()
notify_realm_custom_profile_fields(realm)
def do_remove_realm_custom_profile_fields(realm: Realm) -> None:
CustomProfileField.objects.filter(realm=realm).delete()
def try_update_realm_custom_profile_field(
realm: Realm,
field: CustomProfileField,
name: str,
hint: str = "",
field_data: Optional[ProfileFieldData] = None,
) -> None:
field.name = name
field.hint = hint
if (
field.field_type == CustomProfileField.SELECT
or field.field_type == CustomProfileField.EXTERNAL_ACCOUNT
):
field.field_data = orjson.dumps(field_data or {}).decode()
field.save()
notify_realm_custom_profile_fields(realm)
def try_reorder_realm_custom_profile_fields(realm: Realm, order: List[int]) -> None:
order_mapping = {_[1]: _[0] for _ in enumerate(order)}
custom_profile_fields = CustomProfileField.objects.filter(realm=realm)
for custom_profile_field in custom_profile_fields:
if custom_profile_field.id not in order_mapping:
raise JsonableError(_("Invalid order mapping."))
for custom_profile_field in custom_profile_fields:
custom_profile_field.order = order_mapping[custom_profile_field.id]
custom_profile_field.save(update_fields=["order"])
notify_realm_custom_profile_fields(realm)
def notify_user_update_custom_profile_data(
user_profile: UserProfile, field: Dict[str, Union[int, str, List[int], None]]
) -> None:
data = dict(id=field["id"])
if field["type"] == CustomProfileField.USER:
data["value"] = orjson.dumps(field["value"]).decode()
else:
data["value"] = field["value"]
if field["rendered_value"]:
data["rendered_value"] = field["rendered_value"]
payload = dict(user_id=user_profile.id, custom_profile_field=data)
event = dict(type="realm_user", op="update", person=payload)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm.id))
def do_update_user_custom_profile_data_if_changed(
user_profile: UserProfile,
data: List[Dict[str, Union[int, str, List[int]]]],
) -> None:
with transaction.atomic():
for custom_profile_field in data:
field_value, created = CustomProfileFieldValue.objects.get_or_create(
user_profile=user_profile, field_id=custom_profile_field["id"]
)
if not created and field_value.value == str(custom_profile_field["value"]):
# If the field value isn't actually being changed to a different one,
# we have nothing to do here for this field.
# Note: field_value.value is a TextField() so we need to cast field['value']
# to a string for the comparison in this if.
continue
field_value.value = custom_profile_field["value"]
if field_value.field.is_renderable():
field_value.rendered_value = render_stream_description(
str(custom_profile_field["value"])
)
field_value.save(update_fields=["value", "rendered_value"])
else:
field_value.save(update_fields=["value"])
notify_user_update_custom_profile_data(
user_profile,
{
"id": field_value.field_id,
"value": field_value.value,
"rendered_value": field_value.rendered_value,
"type": field_value.field.field_type,
},
)
def check_remove_custom_profile_field_value(user_profile: UserProfile, field_id: int) -> None:
try:
custom_profile_field = CustomProfileField.objects.get(realm=user_profile.realm, id=field_id)
field_value = CustomProfileFieldValue.objects.get(
field=custom_profile_field, user_profile=user_profile
)
field_value.delete()
notify_user_update_custom_profile_data(
user_profile,
{
"id": field_id,
"value": None,
"rendered_value": None,
"type": custom_profile_field.field_type,
},
)
except CustomProfileField.DoesNotExist:
raise JsonableError(_("Field id {id} not found.").format(id=field_id))
except CustomProfileFieldValue.DoesNotExist:
pass
def do_send_create_user_group_event(user_group: UserGroup, members: List[UserProfile]) -> None:
event = dict(
type="user_group",
op="add",
group=dict(
name=user_group.name,
members=[member.id for member in members],
description=user_group.description,
id=user_group.id,
),
)
send_event(user_group.realm, event, active_user_ids(user_group.realm_id))
def check_add_user_group(
realm: Realm, name: str, initial_members: List[UserProfile], description: str
) -> None:
try:
user_group = create_user_group(name, initial_members, realm, description=description)
do_send_create_user_group_event(user_group, initial_members)
except django.db.utils.IntegrityError:
raise JsonableError(_("User group '{}' already exists.").format(name))
def do_send_user_group_update_event(user_group: UserGroup, data: Dict[str, str]) -> None:
event = dict(type="user_group", op="update", group_id=user_group.id, data=data)
send_event(user_group.realm, event, active_user_ids(user_group.realm_id))
def do_update_user_group_name(user_group: UserGroup, name: str) -> None:
try:
user_group.name = name
user_group.save(update_fields=["name"])
except django.db.utils.IntegrityError:
raise JsonableError(_("User group '{}' already exists.").format(name))
do_send_user_group_update_event(user_group, dict(name=name))
def do_update_user_group_description(user_group: UserGroup, description: str) -> None:
user_group.description = description
user_group.save(update_fields=["description"])
do_send_user_group_update_event(user_group, dict(description=description))
def do_update_outgoing_webhook_service(
bot_profile: UserProfile, service_interface: int, service_payload_url: str
) -> None:
# TODO: First service is chosen because currently one bot can only have one service.
# Update this once multiple services are supported.
service = get_bot_services(bot_profile.id)[0]
service.base_url = service_payload_url
service.interface = service_interface
service.save()
send_event(
bot_profile.realm,
dict(
type="realm_bot",
op="update",
bot=dict(
user_id=bot_profile.id,
services=[
dict(
base_url=service.base_url, interface=service.interface, token=service.token
)
],
),
),
bot_owner_user_ids(bot_profile),
)
def do_update_bot_config_data(bot_profile: UserProfile, config_data: Dict[str, str]) -> None:
for key, value in config_data.items():
set_bot_config(bot_profile, key, value)
updated_config_data = get_bot_config(bot_profile)
send_event(
bot_profile.realm,
dict(
type="realm_bot",
op="update",
bot=dict(
user_id=bot_profile.id,
services=[dict(config_data=updated_config_data)],
),
),
bot_owner_user_ids(bot_profile),
)
def get_service_dicts_for_bot(user_profile_id: int) -> List[Dict[str, Any]]:
user_profile = get_user_profile_by_id(user_profile_id)
services = get_bot_services(user_profile_id)
service_dicts: List[Dict[str, Any]] = []
if user_profile.bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
service_dicts = [
{
"base_url": service.base_url,
"interface": service.interface,
"token": service.token,
}
for service in services
]
elif user_profile.bot_type == UserProfile.EMBEDDED_BOT:
try:
service_dicts = [
{
"config_data": get_bot_config(user_profile),
"service_name": services[0].name,
}
]
# A ConfigError just means that there are no config entries for user_profile.
except ConfigError:
pass
return service_dicts
def get_service_dicts_for_bots(
bot_dicts: List[Dict[str, Any]], realm: Realm
) -> Dict[int, List[Dict[str, Any]]]:
bot_profile_ids = [bot_dict["id"] for bot_dict in bot_dicts]
bot_services_by_uid: Dict[int, List[Service]] = defaultdict(list)
for service in Service.objects.filter(user_profile_id__in=bot_profile_ids):
bot_services_by_uid[service.user_profile_id].append(service)
embedded_bot_ids = [
bot_dict["id"] for bot_dict in bot_dicts if bot_dict["bot_type"] == UserProfile.EMBEDDED_BOT
]
embedded_bot_configs = get_bot_configs(embedded_bot_ids)
service_dicts_by_uid: Dict[int, List[Dict[str, Any]]] = {}
for bot_dict in bot_dicts:
bot_profile_id = bot_dict["id"]
bot_type = bot_dict["bot_type"]
services = bot_services_by_uid[bot_profile_id]
service_dicts: List[Dict[str, Any]] = []
if bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
service_dicts = [
{
"base_url": service.base_url,
"interface": service.interface,
"token": service.token,
}
for service in services
]
elif bot_type == UserProfile.EMBEDDED_BOT:
if bot_profile_id in embedded_bot_configs.keys():
bot_config = embedded_bot_configs[bot_profile_id]
service_dicts = [
{
"config_data": bot_config,
"service_name": services[0].name,
}
]
service_dicts_by_uid[bot_profile_id] = service_dicts
return service_dicts_by_uid
def get_owned_bot_dicts(
user_profile: UserProfile, include_all_realm_bots_if_admin: bool = True
) -> List[Dict[str, Any]]:
if user_profile.is_realm_admin and include_all_realm_bots_if_admin:
result = get_bot_dicts_in_realm(user_profile.realm)
else:
result = UserProfile.objects.filter(
realm=user_profile.realm, is_bot=True, bot_owner=user_profile
).values(*bot_dict_fields)
services_by_ids = get_service_dicts_for_bots(result, user_profile.realm)
return [
{
"email": botdict["email"],
"user_id": botdict["id"],
"full_name": botdict["full_name"],
"bot_type": botdict["bot_type"],
"is_active": botdict["is_active"],
"api_key": botdict["api_key"],
"default_sending_stream": botdict["default_sending_stream__name"],
"default_events_register_stream": botdict["default_events_register_stream__name"],
"default_all_public_streams": botdict["default_all_public_streams"],
"owner_id": botdict["bot_owner_id"],
"avatar_url": avatar_url_from_dict(botdict),
"services": services_by_ids[botdict["id"]],
}
for botdict in result
]
def do_send_user_group_members_update_event(
event_name: str, user_group: UserGroup, user_ids: List[int]
) -> None:
event = dict(type="user_group", op=event_name, group_id=user_group.id, user_ids=user_ids)
send_event(user_group.realm, event, active_user_ids(user_group.realm_id))
def bulk_add_members_to_user_group(user_group: UserGroup, user_profiles: List[UserProfile]) -> None:
memberships = [
UserGroupMembership(user_group_id=user_group.id, user_profile=user_profile)
for user_profile in user_profiles
]
UserGroupMembership.objects.bulk_create(memberships)
user_ids = [up.id for up in user_profiles]
do_send_user_group_members_update_event("add_members", user_group, user_ids)
def remove_members_from_user_group(user_group: UserGroup, user_profiles: List[UserProfile]) -> None:
UserGroupMembership.objects.filter(
user_group_id=user_group.id, user_profile__in=user_profiles
).delete()
user_ids = [up.id for up in user_profiles]
do_send_user_group_members_update_event("remove_members", user_group, user_ids)
def do_send_delete_user_group_event(realm: Realm, user_group_id: int, realm_id: int) -> None:
event = dict(type="user_group", op="remove", group_id=user_group_id)
send_event(realm, event, active_user_ids(realm_id))
def check_delete_user_group(user_group_id: int, user_profile: UserProfile) -> None:
user_group = access_user_group_by_id(user_group_id, user_profile)
user_group.delete()
do_send_delete_user_group_event(user_profile.realm, user_group_id, user_profile.realm.id)
def do_send_realm_reactivation_email(realm: Realm, *, acting_user: Optional[UserProfile]) -> None:
url = create_confirmation_link(realm, Confirmation.REALM_REACTIVATION)
RealmAuditLog.objects.create(
realm=realm,
acting_user=acting_user,
event_type=RealmAuditLog.REALM_REACTIVATION_EMAIL_SENT,
event_time=timezone_now(),
)
context = {"confirmation_url": url, "realm_uri": realm.uri, "realm_name": realm.name}
language = realm.default_language
send_email_to_admins(
"zerver/emails/realm_reactivation",
realm,
from_address=FromAddress.tokenized_no_reply_address(),
from_name=FromAddress.security_email_from_name(language=language),
language=language,
context=context,
)
def do_set_zoom_token(user: UserProfile, token: Optional[Dict[str, object]]) -> None:
user.zoom_token = token
user.save(update_fields=["zoom_token"])
send_event(
user.realm,
dict(type="has_zoom_token", value=token is not None),
[user.id],
)
def notify_realm_export(user_profile: UserProfile) -> None:
# In the future, we may want to send this event to all realm admins.
event = dict(type="realm_export", exports=get_realm_exports_serialized(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
def do_delete_realm_export(user_profile: UserProfile, export: RealmAuditLog) -> None:
# Give mypy a hint so it knows `orjson.loads`
# isn't being passed an `Optional[str]`.
export_extra_data = export.extra_data
assert export_extra_data is not None
export_data = orjson.loads(export_extra_data)
export_path = export_data.get("export_path")
if export_path:
# Allow removal even if the export failed.
delete_export_tarball(export_path)
export_data.update(deleted_timestamp=timezone_now().timestamp())
export.extra_data = orjson.dumps(export_data).decode()
export.save(update_fields=["extra_data"])
notify_realm_export(user_profile)
def get_topic_messages(user_profile: UserProfile, stream: Stream, topic_name: str) -> List[Message]:
query = UserMessage.objects.filter(
user_profile=user_profile,
message__recipient=stream.recipient,
).order_by("id")
return [um.message for um in filter_by_topic_name_via_message(query, topic_name)]
|
the-stack_106_13828
|
from time import sleep
#from test.test_flask import TestFlaskBase
from test_flask import flask_test
from flask import url_for
# class TestTicketAdicionar(TestFlaskBase):
class TestTicketAdicionar:
def test_adicionar_deve_retornar_o_payload_igual_ao_enviado(self,flask_test):
ticket_data = {'position': 1, 'subject': 'devolucao'}
response = flask_test.client.post(url_for('tickets.adicionar'),json=ticket_data)
print(response.json)
assert 1 == response.json['id']
assert ticket_data['position'] == response.json['position']
assert ticket_data['subject'] == response.json['subject']
def test_adicionar_deve_retornar_erro_quando_o_payload_for_incompleto(self, flask_test):
ticket_data = {'subject': 'devolucao'}
response = flask_test.client.post(url_for('tickets.adicionar'), json=ticket_data)
return_data = {'position': ['Missing data for required field.']}
assert return_data == response.json
def test_adicionar_deve_retornar_erro_quando_o_payload_tiver_id(self, flask_test):
ticket_data = {'id':1,'position': 1, 'subject': 'devolucao'}
response = flask_test.client.post(url_for('tickets.adicionar'), json=ticket_data)
return_data = {'id': ['Nao envie o id!']}
assert return_data == response.json
# class TestTicketMostrar(TestFlaskBase):
class TestTicketMostrar:
def test_mostrar_deve_retornar_uma_query_vazia(self,flask_test):
response = flask_test.client.get(url_for('tickets.mostrar'))
assert [] == response.json
def test_mostrar_deve_retornar_um_query_com_elemento_inserido(self,flask_test):
ticket_data = {'position': 1, 'subject': 'devolucao'}
flask_test.client.post(url_for('tickets.adicionar'), json=ticket_data)
flask_test.client.post(url_for('tickets.adicionar'), json=ticket_data)
response = flask_test.client.get(url_for('tickets.mostrar'))
assert 2 == len(response.json)
# class TestTicketDeletar(TestFlaskBase):
class TestTicketDeletar:
# def test_deletar_deve_retornar_deletado_quando_nao_encontrar_registro(self):
# response = self.client.get(url_for('tickets.deletar',identificador=1))
#
# self.assertEqual(response.json, f"Ticket de id=1 deletado!!")
def test_deletar_deve_retornar_deletado_quando_encontrar_registro_na_base(self,flask_test):
ticket_data = {'position': 1, 'subject': 'devolucao'}
flask_test.client.post(url_for('tickets.adicionar'), json=ticket_data)
response = flask_test.client.get(url_for('tickets.deletar', identificador=1))
assert response.json == f"Ticket de id=1 deletado!!"
# class TestTicketModificar(TestFlaskBase):
class TestTicketModificar:
def test_modificar_(self, flask_test):
ticket_inicial = {'position': 1, 'subject': 'devolucao'}
ticket_final = {'id':1,'position': 1, 'subject': 'preco_errado'}
flask_test.client.post(url_for('tickets.adicionar'), json=ticket_inicial)
response = flask_test.client.post(url_for('tickets.modificar',identificador=1,),json=ticket_final)
assert ticket_final['id'] ==response.json['id']
assert ticket_final['position'] == response.json['position']
assert ticket_final['subject'] == response.json['subject']
#class TestTicketChangeDates(TestFlaskBase):
class TestTicketChangeDates:
def test_modificar_start(self,flask_test):
ticket = {'position': 1, 'subject': 'devolucao'}
flask_test.client.post(url_for('tickets.adicionar'), json=ticket)
sleep(2)
response = flask_test.client.get(url_for('tickets.add_date_called',identificador=1))
assert 1 == response.json['id']
assert ticket['position'] == response.json['position']
assert ticket['subject'] == response.json['subject']
assert None != response.json['date_called']
def test_modificar_end(self,flask_test):
ticket = {'position': 1, 'subject': 'devolucao'}
flask_test.client.post(url_for('tickets.adicionar'), json=ticket)
sleep(2)
response = flask_test.client.get(url_for('tickets.add_date_end',identificador=1,))
assert 1 == response.json['id']
assert ticket['position'] == response.json['position']
assert ticket['subject'] ==response.json['subject']
assert None != response.json['date_end']
|
the-stack_106_13829
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# SOURCE: pip install pyTelegramBotAPI
import telebot
from config import TOKEN
bot = telebot.TeleBot(TOKEN)
data_year = {
'01.01': 'Новый год',
'23.02': '23 февраля',
}
# SOURCE: https://ru.stackoverflow.com/questions/1264757
def func(message):
text = message.text
result = data_year.get(text, "В этот день праздников нет. Иди работать!")
bot.send_message(message.from_user.id, result)
@bot.message_handler(content_types=['text'])
def get_text_messages(message):
data = bot.send_message(message.from_user.id, "Введите дату в формате Д.ММ и нажмите ENTER")
bot.register_next_step_handler(data, func)
bot.enable_save_next_step_handlers(delay=2)
bot.load_next_step_handlers()
bot.polling(none_stop=True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.