code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0013_merge'),
]
operations = [
migrations.AlterField(
model_name='user',
name='public',
field=models.BooleanField(default=True, help_text=b'Determines whether or not your profile is open to the public'),
preserve_default=True,
),
]
| joshsamara/game-website | core/migrations/0014_auto_20150413_1639.py | Python | mit | 496 |
#!/usr/bin/env python
import subprocess
import sys
import signal
import datetime
import argparse
import requests
import calendar
import json
import zlib
import time
def get_median (values):
sv = sorted(values)
if len(sv) % 2 == 1:
return sv[(len(sv) + 1) / 2 - 1]
else:
lower = sv[len(sv) / 2 - 1]
upper = sv[len(sv) / 2]
return float(lower + upper) / 2.0
def parse_args ():
parser = argparse.ArgumentParser()
parser.add_argument('test_name', help = "test file path")
parser.add_argument('-b', '--executable-path', default = 'build/dev/rtmp_load', help = "rtmp_load executable path")
parser.add_argument('-sh', '--stat-host', default = '10.40.25.155:7778', help = "stat server host[:port]")
return parser.parse_args()
def parse_line (line):
parts = line.strip().split()
try:
t = datetime.datetime.strptime(parts[0] + ' ' + parts[1], '%Y-%m-%d %H:%M:%S.%f')
except:
print >> sys.stderr, "Invalid line:", line
raise
rtype = parts[2]
return rtype, t, parts
def mistress_server_start_test (test_name, stat_host):
with open(test_name, 'rb') as f:
test_source = f.read()
resp = requests.post('http://%s/new_test' % stat_host, params = {
'worker_num': 1,
'project_id': 'rtmp',
'delayed_start_time': calendar.timegm(datetime.datetime.utcnow().utctimetuple()),
}, data = test_source)
assert resp.status_code == requests.codes.ok
return int(resp.text)
class stypes (object):
CONCUR_USERS_NUM_MAX = 2
START_SESSION = 3
RESPONSE_TIME = 4
RESPONSE_STATUS = 5
REQUEST_SENT = 6
CONNECT_TIME = 7
CONCUR_USERS_NUM_MIN = 8
CONNECT_ERROR = 9
RESPONSE_ERROR = 10
CONCUR_CONNS_NUM_MIN = 11
CONCUR_CONNS_NUM_MAX = 12
FINISH_TEST = 13
def main ():
args = parse_args()
test_id = mistress_server_start_test(args.test_name, args.stat_host)
def send_stat (data, step):
data = zlib.compress(json.dumps({'node': 1, 'step': step, 'data': data}))
requests.post('http://%s/add_stats/%s' % (args.stat_host, test_id), data = data)
cmd = [args.executable_path, args.test_name]
rtmp_load_proc = subprocess.Popen(cmd, shell = False, stdout = subprocess.PIPE, bufsize = -1)
# cmd = "strace -f -o strace.log %s %s" % (args.executable_path, args.test_name)
# rtmp_load_proc = subprocess.Popen(cmd, shell = True, stdout = subprocess.PIPE, bufsize = -1)
try:
proc_terminated = False
second = datetime.timedelta(seconds = 1)
buf_buffered_frame_num = []
# buf_first_frame_latency = []
concur_threads = 0
buf_thread_started_num = 0
stat_buf = []
step = 1
print >> sys.stderr, "columns:\nbuf_thread_started_num, concur_threads, med_buffered_frame_num, med_first_frame_latency:"
start_time = datetime.datetime.utcnow()
while True:
line = rtmp_load_proc.stdout.readline()
if not line:
break
rtype, t, parts = parse_line(line)
if rtype == '@buf_frame_num':
n = int(parts[3])
buf_buffered_frame_num.append(n)
if n < 0:
stat_buf.append({'type': stypes.RESPONSE_ERROR, 'value': "not enough frames"})
elif rtype == '@starting_thread':
buf_thread_started_num += 1
concur_threads += 1
elif rtype == '@stopping_thread':
stat_buf.append({'type': stypes.RESPONSE_ERROR, 'value': "frame receiving error: %s" % " ".join(parts[3:])})
concur_threads -= 1
# elif rtype == '@diff':
# #TODO ?
# pass
elif rtype == '@first_frame':
msec = int(parts[3]) * 1000 + int(int(parts[4]) / 1000000) #TODO is it correct?
# buf_first_frame_latency.append(msec)
stat_buf.append({'type': stypes.RESPONSE_TIME, 'value': ('first frame, ms', msec)})
stat_buf.append({'type': stypes.RESPONSE_STATUS, 'value': ('first frame, rate', 200)})
elif rtype == '@error':
stat_buf.append({'type': stypes.RESPONSE_ERROR, 'value': " ".join(parts[3:])})
else:
raise RuntimeError("Invalid event type: %s" % rtype)
now = datetime.datetime.utcnow()
d = now - start_time
if d >= second: #TODO reminder
# print "reminder", d - second
start_time = now
# process metrics
med_buffered_frame_num = get_median(buf_buffered_frame_num) if buf_buffered_frame_num else 0
# med_first_frame_latency = get_median(buf_first_frame_latency) if buf_first_frame_latency else 0
# print buf_thread_started_num, concur_threads, med_buffered_frame_num, med_first_frame_latency #.......
print buf_thread_started_num, concur_threads, med_buffered_frame_num
stat_buf.extend([
{'type': stypes.START_SESSION, 'value': buf_thread_started_num},
{'type': stypes.CONCUR_CONNS_NUM_MIN, 'value': concur_threads},
{'type': stypes.CONCUR_CONNS_NUM_MAX, 'value': concur_threads},
{'type': stypes.CONCUR_USERS_NUM_MIN, 'value': concur_threads},
{'type': stypes.CONCUR_USERS_NUM_MAX, 'value': concur_threads},
{'type': stypes.REQUEST_SENT, 'value': med_buffered_frame_num},
])
send_stat(stat_buf, step)
step += 1
#reset buffers
buf_thread_started_num = 0
buf_buffered_frame_num = []
# buf_first_frame_latency = []
stat_buf = []
proc_terminated = True
rtmp_load_proc.poll()
if rtmp_load_proc.returncode != 0:
print >> sys.stderr, "rtmp_load returned non-zero exit code %s" % (rtmp_load_proc.returncode if rtmp_load_proc.returncode is not None else "None (possibly segfault)")
sys.exit(1)
finally:
if not proc_terminated:
try:
rtmp_load_proc.send_signal(signal.SIGINT)
except Exception as e:
print >> sys.stderr, "failed to send sigint: %s" % e
if stat_buf:
print >> sys.stderr, "last data in stat buf: %s", stat_buf
print >> sys.stderr, "possibly latest error: %s", stat_buf[-1]
send_stat(stat_buf, step)
send_stat([{'type': stypes.FINISH_TEST, 'value': 1}], step)
if __name__ == '__main__':
main() | fillest/rtmp_load | run.py | Python | mit | 5,727 |
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class ContactsAppConfig(AppConfig):
name = 'apps.contacts'
verbose_name = _('Contacts')
| samupl/simpleERP | apps/contacts/app_config.py | Python | mit | 189 |
import importlib.util
import logging
import os
import warnings
from os import getenv
from typing import Any, Optional, Mapping, ClassVar
log = logging.getLogger(__name__)
default_settings_dict = {
'connect_timeout_seconds': 15,
'read_timeout_seconds': 30,
'max_retry_attempts': 3,
'base_backoff_ms': 25,
'region': None,
'max_pool_connections': 10,
'extra_headers': None,
}
OVERRIDE_SETTINGS_PATH = getenv('PYNAMODB_CONFIG', '/etc/pynamodb/global_default_settings.py')
def _load_module(name, path):
# https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly
spec = importlib.util.spec_from_file_location(name, path)
module = importlib.util.module_from_spec(spec) # type: ignore
spec.loader.exec_module(module) # type: ignore
return module
override_settings = {}
if os.path.isfile(OVERRIDE_SETTINGS_PATH):
override_settings = _load_module('__pynamodb_override_settings__', OVERRIDE_SETTINGS_PATH)
if hasattr(override_settings, 'session_cls') or hasattr(override_settings, 'request_timeout_seconds'):
warnings.warn("The `session_cls` and `request_timeout_second` options are no longer supported")
log.info('Override settings for pynamo available {}'.format(OVERRIDE_SETTINGS_PATH))
else:
log.info('Override settings for pynamo not available {}'.format(OVERRIDE_SETTINGS_PATH))
log.info('Using Default settings value')
def get_settings_value(key: str) -> Any:
"""
Fetches the value from the override file.
If the value is not present, then tries to fetch the values from constants.py
"""
if hasattr(override_settings, key):
return getattr(override_settings, key)
if key in default_settings_dict:
return default_settings_dict[key]
return None
class OperationSettings:
"""
Settings applicable to an individual operation.
When set, the settings in this object supersede the global and model settings.
"""
default: ClassVar['OperationSettings']
def __init__(self, *, extra_headers: Optional[Mapping[str, Optional[str]]] = None) -> None:
"""
Initializes operation settings.
:param extra_headers: if set, extra headers to add to the HTTP request. The headers are merged
on top of extra headers derived from settings or models' Meta classes. To delete a header, set its value
to `None`.
"""
self.extra_headers = extra_headers
OperationSettings.default = OperationSettings()
| pynamodb/PynamoDB | pynamodb/settings.py | Python | mit | 2,509 |
#-*- coding: utf-8 -*-
"""
envois.test
~~~~~~~~~~~~
nosetests for the envois pkg
:copyright: (c) 2012 by Mek
:license: BSD, see LICENSE for more details.
"""
import os
import json
import unittest
from envois import invoice
jsn = {"seller": {"name": "Lambda Labs, Inc.", "address": {"street": "857 Clay St. Suite 206", "city": "San Francisco", "state": "CA", "zip": "94108", "phone": "(555) 555-5555", "email": "[email protected]" }, "account": {"swift": "...", "number": "...", "name": "Lambda Labs Inc.", "same_address": True}}, "buyer": {"name": "Foo Corp", "address": {"street": "88 Foo Road, Foo Place", "city": "Fooville", "state": "BA", "zip": "31337"}, "logo": "http://lambdal.com/images/lambda-labs-logo.png"}, "items": [{"description": "Facial Detection & Landmark Recognition Perpetual License", "qty": 1, "unit_price": 32768}], "terms": {"days": 30, "string": ""}}
class Envois_Test(unittest.TestCase):
def test_invoice(self):
invoice.make_invoice(jsn)
| lambdal/envois | test/test_envois.py | Python | mit | 1,000 |
# Taken from https://github.com/salesforce/awd-lstm-lm/blob/master/weight_drop.py
import torch
from torch.nn import Parameter
from functools import wraps
class WeightDrop(torch.nn.Module):
def __init__(self, module, weights, dropout=0, variational=False):
super(WeightDrop, self).__init__()
self.module = module
self.weights = weights
self.dropout = dropout
self.variational = variational
self._setup()
def _dummy(*args, **kwargs):
# We need to replace flatten_parameters with a nothing function
return
def _setup(self):
# Terrible temporary solution to an issue regarding compacting weights re: CUDNN RNN
if issubclass(type(self.module), torch.nn.RNNBase):
self.module.flatten_parameters = self._dummy
for name_w in self.weights:
print('Applying weight drop of {} to {}'.format(self.dropout, name_w))
w = getattr(self.module, name_w)
del self.module._parameters[name_w]
self.module.register_parameter(name_w + '_raw', Parameter(w.data))
def _setweights(self):
for name_w in self.weights:
raw_w = getattr(self.module, name_w + '_raw')
w = None
if self.variational:
mask = torch.autograd.Variable(torch.ones(raw_w.size(0), 1))
if raw_w.is_cuda: mask = mask.cuda()
mask = torch.nn.functional.dropout(mask, p=self.dropout, training=True)
w = mask.expand_as(raw_w) * raw_w
else:
w = torch.nn.functional.dropout(raw_w, p=self.dropout, training=self.training)
setattr(self.module, name_w, w)
def forward(self, *args):
self._setweights()
return self.module.forward(*args)
| eladhoffer/seq2seq.pytorch | seq2seq/models/modules/weight_drop.py | Python | mit | 1,803 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 27 12:16:26 2017
@author: Anand A Joshi, Divya Varadarajan
"""
import glob
from os.path import isfile, split
import configparser
config_file = u'/big_disk/ajoshi/ABIDE2/study.cfg'
Config = configparser.ConfigParser()
Config.read(config_file)
Config.sections()
STUDY_DIR = Config.get('CSESVREG', 'STUDY_DIR')
NPROC = int(Config.get('CSESVREG', 'NPROC'))
BST_INSTALL = Config.get('CSESVREG', 'BST_INSTALL')
SVREG_ATLAS = Config.get('CSESVREG', 'SVREG_ATLAS')
SVREG_FLAGS = Config.get('CSESVREG', 'SVREG_FLAGS')
CSE_EXE = Config.get('CSESVREG', 'CSE_EXE')
SVREG_EXE = Config.get('CSESVREG', 'SVREG_EXE')
sublist = lst = glob.glob(STUDY_DIR+'/*')
SMOOTHNESS = '6'
ind = 0
cmdln1 = []
cmdln2 = []
incom = 0
com = 0
for sub in sublist:
img = sub + '/anat/t1.roiwise.stats.txt'
subpath, filename = split(img)
outsurfname = subpath + '/t1.heat_sol_comp.mat'
# print img
if not isfile(outsurfname):
incom += 1
print outsurfname
continue
com += 1
print str(incom) + ' remaining ' + str(com) + ' done'
| ajoshiusc/brainsuite-workflows | utility_scripts/main_check_remaining.py | Python | mit | 1,120 |
import json
from django.template.loader import render_to_string
from django.http import HttpResponse
from django.utils import timezone
from django.shortcuts import render_to_response
from cir.models import *
import claim_views
from cir.phase_control import PHASE_CONTROL
import utils
def get_statement_comment_list(request):
response = {}
context = {}
forum = Forum.objects.get(id = request.session['forum_id'])
thread_comments = ForumComment.objects.filter(forum = forum)
print thread_comments
context['comments'] = thread_comments
response['forum_comment'] = render_to_string("phase5/forum-comment.html", context)
return HttpResponse(json.dumps(response), mimetype='application/json')
def put_statement_comment(request):
response = {}
context = {}
author = request.user
parent_id = request.REQUEST.get('parent_id')
text = request.REQUEST.get('text')
created_at = timezone.now()
forum = Forum.objects.get(id = request.session['forum_id'])
if parent_id == "": #root node
newForumComment = ForumComment(author = author, text = text, forum = forum, created_at = created_at)
else:
parent = ForumComment.objects.get(id = parent_id)
newForumComment = ForumComment(author = author, text = text, forum = forum, parent = parent, created_at = created_at)
newForumComment.save()
return HttpResponse(json.dumps(response), mimetype='application/json')
def vote_issue(request):
reason = request.REQUEST.get('reason')
author = request.user
forum = Forum.objects.get(id = request.session['forum_id'])
support = True
if (request.REQUEST.get('support') == "false"): support = False
vote, created = ForumVote.objects.get_or_create(forum = forum, author = author)
vote.reason = reason
vote.support = support
vote.save()
response = {}
return HttpResponse(json.dumps(response), mimetype='application/json')
def render_support_bar(request):
author = request.user
forum = Forum.objects.get(id = request.session['forum_id'])
response = {}
response["num_support"] = ForumVote.objects.filter(forum = forum, support = True).count()
response["num_oppose"] = ForumVote.objects.filter(forum = forum, support = False).count()
if request.user.is_authenticated():
response["my_num_support"] = ForumVote.objects.filter(forum = forum, support = True, author = author).count()
response["my_num_oppose"] = ForumVote.objects.filter(forum = forum, support = False, author = author).count()
return HttpResponse(json.dumps(response), mimetype='application/json')
def view_vote_result(request):
author = request.user
forum = Forum.objects.get(id = request.session['forum_id'])
response = {}
context = {}
context["entries"] = ForumVote.objects.filter(forum = forum)
response["vote_result_table"] = render_to_string('phase5/vote-result-table.html', context)
return HttpResponse(json.dumps(response), mimetype='application/json') | xsunfeng/cir | cir/phase5.py | Python | mit | 3,009 |
from setuptools import setup
# Based on
# https://python-packaging.readthedocs.io/en/latest/minimal.html
def readme():
with open('README.md','r') as fr:
return fr.read()
setup(name='docker_machinator',
version='0.1',
description='A tool for managing docker machines from multiple'
'workstations',
long_description=readme(),
entry_points={
'console_scripts': [
'dmachinator = docker_machinator.dmachinator:main',
],
},
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Topic :: Security',
],
keywords='docker machine dmachinator secure on-disk',
url='https://github.com/realcr/docker_machinator',
author='real',
author_email='[email protected]',
license='MIT',
packages=['docker_machinator'],
install_requires=[
'sstash',
],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
include_package_data=True,
zip_safe=False)
| realcr/docker_machinator | setup.py | Python | mit | 1,193 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Message'
db.create_table('firstclass_message', (
('key', self.gf('django.db.models.fields.CharField')(max_length=40, primary_key=True)),
('data', self.gf('django.db.models.fields.TextField')(default='{}')),
))
db.send_create_signal('firstclass', ['Message'])
def backwards(self, orm):
# Deleting model 'Message'
db.delete_table('firstclass_message')
models = {
'firstclass.message': {
'Meta': {'object_name': 'Message'},
'data': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'})
}
}
complete_apps = ['firstclass'] | bennylope/django-firstclass | firstclass/south_migrations/0001_initial.py | Python | mit | 1,013 |
#%%
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
print(mnist.train.images.shape, mnist.train.labels.shape)
print(mnist.test.images.shape, mnist.test.labels.shape)
print(mnist.validation.images.shape, mnist.validation.labels.shape)
import tensorflow as tf
sess = tf.InteractiveSession()
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
tf.global_variables_initializer().run()
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
train_step.run({x: batch_xs, y_: batch_ys})
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(accuracy.eval({x: mnist.test.images, y_: mnist.test.labels}))
| fx2003/tensorflow-study | TensorFlow实战/《TensorFlow实战》代码/3_2_HelloWorld.py | Python | mit | 1,784 |
from flask import Flask
from flask import render_template
from .. import app
@app.route('/')
def index():
user = {'first_name': 'Lance', 'last_name': 'Anderson'}
return render_template('index.html', user=user)
@app.route('/user/<user_id>/board/<board_id>')
@app.route('/new_board')
def board(user_id=None, board_id=None):
user = {'first_name': 'Lance', 'last_name': 'Anderson'}
return render_template('board.html', user=user)
| Lancea12/sudoku_solver | sudoku/views/index.py | Python | mit | 445 |
raise NotImplementedError("getopt is not yet implemented in Skulpt")
| ArcherSys/ArcherSys | skulpt/src/lib/getopt.py | Python | mit | 69 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# MODIFIED FROM ORIGINAL VERSION
#
# This file is not the same as in pypi. It includes a pull request to fix py3
# incompabilities that never ended up getting merged.
###############################################################################
import os
from ctypes import CDLL, c_char_p, c_int, c_void_p, c_uint, c_double, byref, Structure, get_errno,\
POINTER, c_short, c_size_t, create_string_buffer
from ctypes.util import find_library
from psistats.libsensors.lib import stdc
version_info = (0, 0, 3)
__version__ = '.'.join(map(str, version_info))
__date__ = '2014-08-17'
__author__ = "Marc 'BlackJack' Rintsch"
__contact__ = '[email protected]'
__license__ = 'LGPL v2.1'
API_VERSION = 4
DEFAULT_CONFIG_FILENAME = '/etc/sensors3.conf'
LIB_FILENAME = os.environ.get('SENSORS_LIB') or find_library('sensors')
SENSORS_LIB = CDLL(LIB_FILENAME)
VERSION = c_char_p.in_dll(SENSORS_LIB, 'libsensors_version').value
MAJOR_VERSION = version_info[0]
class SensorsError(Exception):
def __init__(self, message, error_number=None):
Exception.__init__(self, message)
self.error_number = error_number
def _error_check(result, _func, _arguments):
if result < 0:
raise SensorsError(_strerror(result), result)
return result
_strerror = SENSORS_LIB.sensors_strerror
_strerror.argtypes = [c_int]
_strerror.restype = c_char_p
_init = SENSORS_LIB.sensors_init
_init.argtypes = [c_void_p]
_init.restype = c_int
_init.errcheck = _error_check
cleanup = SENSORS_LIB.sensors_cleanup
cleanup.argtypes = None
cleanup.restype = None
SENSORS_FEATURE_IN = 0x00
SENSORS_FEATURE_FAN = 0x01
SENSORS_FEATURE_TEMP = 0x02
SENSORS_FEATURE_POWER = 0x03
SENSORS_FEATURE_ENERGY = 0x04
SENSORS_FEATURE_CURR = 0x05
SENSORS_FEATURE_HUMIDITY = 0x06
# SENSORS_FEATURE_MAX_MAIN
SENSORS_FEATURE_VID = 0x10
SENSORS_FEATURE_INTRUSION = 0x11
#SENSORS_FEATURE_MAX_OTHER,
SENSORS_FEATURE_BEEP_ENABLE = 0x18
#SENSORS_FEATURE_MAX,
#SENSORS_FEATURE_UNKNOWN = INT_MAX
def init(config_filename=DEFAULT_CONFIG_FILENAME):
file_p = stdc.fopen(config_filename.encode('utf-8'), b'r')
if file_p is None:
error_number = get_errno()
raise OSError(error_number, os.strerror(error_number), config_filename)
try:
_init(file_p)
finally:
stdc.fclose(file_p)
class Subfeature(Structure):
_fields_ = [
('name', c_char_p),
('number', c_int),
('type', c_int),
('mapping', c_int),
('flags', c_uint),
]
def __repr__(self):
return '<%s name=%r number=%d type=%d mapping=%d flags=%08x>' % (
self.__class__.__name__,
self.name,
self.number,
self.type,
self.mapping,
self.flags
)
def get_value(self):
result = c_double()
_get_value(byref(self.parent.chip), self.number, byref(result))
return result.value
SUBFEATURE_P = POINTER(Subfeature)
class Feature(Structure):
_fields_ = [
('name', c_char_p),
('number', c_int),
('type', c_int),
('_first_subfeature', c_int),
('_padding1', c_int),
]
def __repr__(self):
return '<%s name=%r number=%r type=%r>' % (
self.__class__.__name__,
self.name,
self.number,
self.type
)
def __iter__(self):
number = c_int(0)
while True:
result_p = _get_all_subfeatures(
byref(self.chip),
byref(self),
byref(number)
)
if not result_p:
break
result = result_p.contents
result.chip = self.chip
result.parent = self
yield result
@property
def label(self):
#
# TODO Maybe this is a memory leak!
#
return _get_label(byref(self.chip), byref(self)).decode('utf-8')
def get_value(self):
#
# TODO Is the first always the correct one for all feature types?
#
return next(iter(self)).get_value()
FEATURE_P = POINTER(Feature)
class Bus(Structure):
TYPE_ANY = -1
NR_ANY = -1
_fields_ = [
('type', c_short),
('nr', c_short),
]
def __str__(self):
return (
'*' if self.type == self.TYPE_ANY
else _get_adapter_name(byref(self)).decode('utf-8')
)
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.type, self.nr)
@property
def has_wildcards(self):
return self.type == self.TYPE_ANY or self.nr == self.NR_ANY
BUS_P = POINTER(Bus)
class Chip(Structure):
#
# TODO Move common stuff into `AbstractChip` class.
#
_fields_ = [
('prefix', c_char_p),
('bus', Bus),
('addr', c_int),
('path', c_char_p),
]
PREFIX_ANY = None
ADDR_ANY = -1
def __new__(cls, *args):
result = super(Chip, cls).__new__(cls)
if args:
_parse_chip_name(args[0].encode('utf-8'), byref(result))
return result
def __init__(self, *_args):
Structure.__init__(self)
#
# Need to bind the following to the instance so it is available in
# `__del__()` when the interpreter shuts down.
#
self._free_chip_name = _free_chip_name
self.byref = byref
def __del__(self):
if self._b_needsfree_:
self._free_chip_name(self.byref(self))
def __repr__(self):
return '<%s prefix=%r bus=%r addr=%r path=%r>' % (
(
self.__class__.__name__,
self.prefix,
self.bus,
self.addr,
self.path
)
)
def __str__(self):
buffer_size = 200
result = create_string_buffer(buffer_size)
used = _snprintf_chip_name(result, len(result), byref(self))
assert used < buffer_size
return result.value.decode('utf-8')
def __iter__(self):
number = c_int(0)
while True:
result_p = _get_features(byref(self), byref(number))
if not result_p:
break
result = result_p.contents
result.chip = self
yield result
@property
def adapter_name(self):
return str(self.bus)
@property
def has_wildcards(self):
return (
self.prefix == self.PREFIX_ANY
or self.addr == self.ADDR_ANY
or self.bus.has_wildcards
)
CHIP_P = POINTER(Chip)
_parse_chip_name = SENSORS_LIB.sensors_parse_chip_name
_parse_chip_name.argtypes = [c_char_p, CHIP_P]
_parse_chip_name.restype = c_int
_parse_chip_name.errcheck = _error_check
_free_chip_name = SENSORS_LIB.sensors_free_chip_name
_free_chip_name.argtypes = [CHIP_P]
_free_chip_name.restype = None
_snprintf_chip_name = SENSORS_LIB.sensors_snprintf_chip_name
_snprintf_chip_name.argtypes = [c_char_p, c_size_t, CHIP_P]
_snprintf_chip_name.restype = c_int
_snprintf_chip_name.errcheck = _error_check
_get_adapter_name = SENSORS_LIB.sensors_get_adapter_name
_get_adapter_name.argtypes = [BUS_P]
_get_adapter_name.restype = c_char_p
_get_label = SENSORS_LIB.sensors_get_label
_get_label.argtypes = [CHIP_P, FEATURE_P]
_get_label.restype = c_char_p
_get_value = SENSORS_LIB.sensors_get_value
_get_value.argtypes = [CHIP_P, c_int, POINTER(c_double)]
_get_value.restype = c_int
_get_value.errcheck = _error_check
#
# TODO sensors_set_value()
# TODO sensors_do_chip_sets()
#
_get_detected_chips = SENSORS_LIB.sensors_get_detected_chips
_get_detected_chips.argtypes = [CHIP_P, POINTER(c_int)]
_get_detected_chips.restype = CHIP_P
_get_features = SENSORS_LIB.sensors_get_features
_get_features.argtypes = [CHIP_P, POINTER(c_int)]
_get_features.restype = FEATURE_P
_get_all_subfeatures = SENSORS_LIB.sensors_get_all_subfeatures
_get_all_subfeatures.argtypes = [CHIP_P, FEATURE_P, POINTER(c_int)]
_get_all_subfeatures.restype = SUBFEATURE_P
#
# TODO sensors_get_subfeature() ?
#
def iter_detected_chips(chip_name='*-*'):
chip = Chip(chip_name)
number = c_int(0)
while True:
result = _get_detected_chips(byref(chip), byref(number))
if not result:
break
yield result.contents
| psistats/linux-client | psistats/libsensors/lib/sensors.py | Python | mit | 8,525 |
# Regular expressions are a powerful tool for pattern matching when you
# know the general format of what you're trying to find but want to keep
# it loose in terms of actual content: think finding email addresses or
# phone numbers based on what they have in common with each other. Python
# has a standard library that deals with it.
import re
#
records = [
'April 13, 2013 Cyberdyne Systems $4,000.00 18144 El Camino '
'Real, Sunnyvale, CA 94087 (408) 555-1234 [email protected] '
'December 2, 2018 December 14, 2018',
'May 4, 2013 Sam Fuzz, Inc. $6,850.50 939 Walnut St, San '
'Carlos, CA 94070 (408) 555-0304 [email protected] January 28'
', 2016 February 15, 2016']
# Find the word 'Sunnyvale' in the first record with re.search()
re.search('Sunnyvale', records[0]).group()
# Find the first date in the first record. Let's pick apart the pattern:
# 1. \w matches upper/lowercase A-Z and digits 0-9, good for text.
# 2. {3,} matches three or more (shortest possible month is May)
# 3. \s matches whitespace, good for spaces and tabs
# 4. {1} matches exactly one
# 5. \d matches 0-9
# 6. {1,2} matches at least one, but no more than 2
# 7. , matches the comma in the date
# 8. \s{1}: again, one space or tab
# 9. \d{4} matches four digits.
re.search('\w{3,}\s{1}\d{1,2},\s{1}\d{4}', records[0]).group()
# Do the same thing but wrap some parentheses around the month, day and year
# patterns and re.search().group(0) to return the whole date.
date_match = re.search('(\w{3,})\s{1}(\d{1,2}),\s{1}(\d{4})', records[0])
date_match.group(0)
# Try 1, 2 and 3 to cycle through month, day and year.
date_match.group(1)
date_match.group(2)
date_match.group(3)
# Grab all the dates in the first record with re.findall().
all_dates = re.findall('\w{3,}\s{1}\d{1,2},\s{1}\d{4}', records[0])
# Print them out with a for loop
for date in all_dates:
print date
# Pick out and print dollar amounts from the records.
# . matches any character, * matches any number of times
for record in records:
money_match = re.search('\$.*\.\d{2}', record)
print money_match.group()
# Try to do the same thing for the phone numbers.
for record in records:
ph_match = re.search('\(\d{3}\)\s\d{3}-\d{4}', record)
print ph_match.group()
# How would I isolate something like a company name that's totally variable?
# Think about the hooks you have on either side; the pattern you want to
# match here has to do with what's around it.
for record in records:
company_match = re.search('\d{4}\s(.+)\s\$', record)
print company_match.group(1)
# We can also substitute based on a pattern. Give everyone an '.info'
# email address via print and re.sub().
for record in records:
print re.sub('\.\w{3}', '.info', record)
# If you have multiple character possibilities that act as delimiters for a
# string you want to break apart, re.split() can come in handy.
my_list = ['OCT-2010', 'NOV/2011', 'FEB 2012', 'MAR/2012']
for item in my_list:
print re.split('-|/|\s', item)
| ireapps/coding-for-journalists | 2_web_scrape/completed/fun_with_regex_done.py | Python | mit | 3,044 |
# Generated by Django 2.2.11 on 2020-11-09 17:00
import daphne_context.utils
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('daphne_context', '0010_userinformation_mycroft_connection'),
]
operations = [
migrations.RemoveField(
model_name='userinformation',
name='mycroft_session',
),
migrations.CreateModel(
name='MycroftUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mycroft_session', models.CharField(default=daphne_context.utils.generate_mycroft_session, max_length=9)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| seakers/daphne_brain | daphne_context/migrations/0011_auto_20201109_1100.py | Python | mit | 1,011 |
import matplotlib.transforms
import numpy
def has_legend(axes):
return axes.get_legend() is not None
def get_legend_text(obj):
"""Check if line is in legend."""
leg = obj.axes.get_legend()
if leg is None:
return None
keys = [h.get_label() for h in leg.legendHandles if h is not None]
values = [t.get_text() for t in leg.texts]
label = obj.get_label()
d = dict(zip(keys, values))
if label in d:
return d[label]
return None
def transform_to_data_coordinates(obj, xdata, ydata):
"""The coordinates might not be in data coordinates, but could be sometimes in axes
coordinates. For example, the matplotlib command
axes.axvline(2)
will have the y coordinates set to 0 and 1, not to the limits. Therefore, a
two-stage transform has to be applied:
1. first transforming to display coordinates, then
2. from display to data.
"""
if obj.axes is not None and obj.get_transform() != obj.axes.transData:
points = numpy.array([xdata, ydata]).T
transform = matplotlib.transforms.composite_transform_factory(
obj.get_transform(), obj.axes.transData.inverted()
)
return transform.transform(points).T
return xdata, ydata
| m-rossi/matplotlib2tikz | tikzplotlib/_util.py | Python | mit | 1,258 |
import functools
from time import strftime
import tensorflow as tf
# lazy_property: no need for if $ not None logic
def lazy_property(function):
attribute = '_cache_' + function.__name__
@property
@functools.wraps(function)
def decorator(self):
if not hasattr(self, attribute):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return decorator
def timestamp() -> str:
return strftime('%Y%m%d-%H%M%S')
# from https://gist.github.com/danijar/8663d3bbfd586bffecf6a0094cd116f2:
def doublewrap(function):
"""
A decorator decorator, allowing to use the decorator to be used without
parentheses if not arguments are provided. All arguments must be optional.
"""
@functools.wraps(function)
def decorator(*args, **kwargs):
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
return function(args[0])
else:
return lambda wrapee: function(wrapee, *args, **kwargs)
return decorator
@doublewrap
def define_scope(function, scope=None, *args, **kwargs):
"""
A decorator for functions that define TensorFlow operations. The wrapped
function will only be executed once. Subsequent calls to it will directly
return the result so that operations are added to the graph only once.
The operations added by the function live within a tf.variable_scope(). If
this decorator is used with arguments, they will be forwarded to the
variable scope. The scope name defaults to the name of the wrapped
function.
"""
attribute = '_cache_' + function.__name__
name = scope or function.__name__
@property
@functools.wraps(function)
def decorator(self):
if not hasattr(self, attribute):
with tf.variable_scope(name, *args, **kwargs):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return decorator
def unzip(iterable):
return zip(*iterable)
def single(list):
first = list[0]
assert (len(list) == 1)
return first
| JuliusKunze/thalnet | util.py | Python | mit | 2,098 |
class Solution(object):
def multiply(self, A, B):
"""
:type A: List[List[int]]
:type B: List[List[int]]
:rtype: List[List[int]]
"""
p = len(B[0])
C = [[0 for _ in xrange(p)] for _ in xrange(len(A))]
for i in xrange(len(A)):
for j in xrange(len(B)):
if A[i][j] != 0:
for k in xrange(p):
C[i][k] += A[i][j] * B[j][k]
return C
| quake0day/oj | Sparse Matrix Multiplication.py | Python | mit | 535 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This is a doctest example with Numpy arrays.
For more information about doctest, see
https://docs.python.org/3/library/doctest.html (reference)
and
www.fil.univ-lille1.fr/~L1S2API/CoursTP/tp_doctest.html (nice examples in
French).
To run doctest, execute this script (thanks to the
`if __name__ == "__main__": import doctest ; doctest.testmod()` directives)
or execute the following command in a terminal::
python3 -m doctest datapipe/io/images.py
"""
import numpy as np
def example1():
"""A very basic doctest example.
Notes
-----
The numpy module is imported at the end of this file, in the test::
if __name__ == "__main__":
import doctest
import numpy
doctest.testmod()
Examples
--------
>>> numpy.array([1, 2, 3])
array([1, 2, 3])
"""
pass
def example2():
"""A very basic doctest example to test values returned by this function.
Examples
--------
>>> example2()
array([1, 2, 3])
"""
return numpy.array([1, 2, 3])
def example3(a):
"""A very basic example.
Examples
--------
>>> a = numpy.array([3, 1, 2])
>>> example3(a)
>>> a
array([1, 2, 3])
"""
a.sort()
def example4(a):
"""Replace *in-place* `NaN` values in `a` by zeros.
Replace `NaN` ("Not a Number") values in `a` by zeros.
Parameters
----------
image : array_like
The image to process. `NaN` values are replaced **in-place** thus this
function changes the provided object.
Returns
-------
array_like
Returns a boolean mask array indicating whether values in `a`
initially contained `NaN` values (`True`) of not (`False`). This array
is defined by the instruction `np.isnan(a)`.
Notes
-----
`NaN` values are replaced **in-place** in the provided `a`
parameter.
Examples
--------
>>> a = numpy.array([1., 2., numpy.nan])
>>> a
array([ 1., 2., nan])
>>> example4(a)
array([False, False, True], dtype=bool)
Be careful with white space! The following will work...
>>> a
array([ 1., 2., 0.])
but this one would't
# >>> a
# array([ 1., 2., 0.])
As an alternative, the `doctest: +NORMALIZE_WHITESPACE` can be used (see
https://docs.python.org/3/library/doctest.html#doctest.NORMALIZE_WHITESPACE
and http://www.fil.univ-lille1.fr/~L1S2API/CoursTP/tp_doctest.html)
>>> a
... # doctest: +NORMALIZE_WHITESPACE
array([ 1., 2., 0.])
but the space before the '1' is still required...
"""
nan_mask = np.isnan(a)
a[nan_mask] = 0
return nan_mask
if __name__ == "__main__":
import doctest
import numpy
doctest.testmod()
| jeremiedecock/snippets | python/doctest/numpy_example.py | Python | mit | 2,806 |
# coding: utf-8
# In[1]:
import matplotlib.pyplot as plt #import modules
import matplotlib.patches as mpatches
import numpy as np
#get_ipython().magic(u'matplotlib inline') # set to inline for ipython
# In[2]:
water = [0,2,2,3,1.5,1.5,3,2,2,2,2,2.5,2] #arrange data from lab
alc = [0,2.5,2.5,2.5,2.5,3,2.5,2.5]
weight = [20.9+(0.41*5*x) for x in range(0,13)] #generate weight array, based on mass of paper clips, in grams
actWater = [(22)+sum(water[:x+1]) for x in range(0,len(water))] #cumulitive sum of water displacement
actalc = [(28)+sum(alc[:x+1]) for x in range(0,len(alc))] #cumultive sum of alc displacement
slopeAlc, intercept = np.polyfit(weight[:len(actalc)], actalc, 1) #mL/g find avereage slope of alc, have to invert to find densitiy
slopeWater, interssss = np.polyfit(weight, actWater, 1) #repeat for water
print slopeWater,slopeAlc #print values
densityWater = 1/(slopeWater * 0.001) #invert and convert to kg/m^3
densityAlc = 1/(slopeAlc * 0.001)
print densityWater, densityAlc #print them
# In[3]:
actualWater = 1000 # finding percent errors in densities kg/m^3
actualAlc = 789
pErrorWater = (abs(actualWater-densityWater)/actualWater) * 100 #find percent errors
pErrorAlc = (abs(actualAlc-densityAlc)/actualAlc) *100
print pErrorWater, pErrorAlc #print percent errors
# In[4]:
plt.figure() #create figure
plt.plot(weight,actWater,"o") # plot scatter of water vs weight (ml/g)
plt.plot(weight[:len(actalc)],actalc,"o") #plot scatter of actcalc
plt.xlabel("Mass (g)") #add labels
plt.ylabel("Displacement (mL)") #add labels
plt.show() #show figure
# In[5]:
x = [0,1,2,3,4] ##TESTING np.polyfit
y = [0,0.5,1,1.5,2]
plt.figure()
plt.plot(y,x)
slope,inter = np.polyfit(y,x,1)
print slope
# In[9]:
densityAlc * (1/100.0**3) *1000 ##TESTING CONVERSION OF DENSITY
# In[ ]:
| aknh9189/code | physicsScripts/flotation/flotation.py | Python | mit | 1,813 |
from __future__ import with_statement
from cuisine import *
from fabric.api import env
from fabric.colors import *
from fabric.utils import puts
from fabric.context_managers import cd, settings, prefix
from flabric import ApplicationContext as AppContext
from flabric.ubuntu import UbuntuServer
import os, flabric
class Server(UbuntuServer):
"""nginx, uWSGI and supervisor server"""
def setup(self):
with mode_sudo():
group_ensure('admin')
for u in [('ubuntu', '/home/ubuntu')]:
puts(green('Ensuring user: ' + u[0]))
user_ensure(u[0], home=u[1])
group_user_ensure('admin', 'ubuntu')
puts(green("Updating /etc/sudoers"))
file_update(
'/etc/sudoers',
lambda _: text_ensure_line(_,
'%admin ALL=(ALL) ALL',
'ubuntu ALL=(ALL) NOPASSWD:ALL'
))
puts(green("Adding your public key to ubuntu user"))
ssh_authorize('ubuntu', file_local_read('~/.ssh/id_rsa.pub'))
puts(green('Updating repository info for nginx'))
file_update(
'/etc/apt/sources.list',
lambda _: text_ensure_line(_,
'deb http://nginx.org/packages/ubuntu/ lucid nginx',
'deb-src http://nginx.org/packages/ubuntu/ lucid nginx'
))
puts(green('Adding singing key for nginx'))
keys = run('apt-key list')
if not 'nginx' in keys:
run('wget http://nginx.org/keys/nginx_signing.key')
run('apt-key add nginx_signing.key')
run('apt-get update -qq')
for p in ['build-essential',
'libmysqlclient-dev',
'libxml2-dev',
'libjpeg62-dev',
'python-dev',
'python-setuptools',
'python-mysqldb',
'python-pip',
'mysql-client',
'git-core',
'nginx']:
puts(green('Installing: ' + p))
package_ensure(p)
puts(green('Linking libraries'))
for l in [('/usr/lib/x86_64-linux-gnu/libfreetype.so',
'/usr/lib/libfreetype.so'),
('/usr/lib/x86_64-linux-gnu/libz.so',
'/usr/lib/libz.so'),
('/usr/lib/x86_64-linux-gnu/libjpeg.so',
'/usr/lib/libjpeg.so')]:
file_link(l[0], l[1])
for p in ['virtualenv',
'virtualenvwrapper',
'supervisor',
'uwsgi']:
puts(green('Installing: ' + p))
run('pip install ' + p)
puts(green('Configuring supervisor and nginx'))
tdir = os.path.dirname(__file__)
for f in [('/etc/supervisord.conf', 'supervisord.conf.tmpl'),
('/etc/nginx/nginx.conf', 'nginx.conf.tmpl'),
('/etc/init.d/supervisor', 'supervisor.tmpl')]:
fn = f[0]
contents = file_local_read(os.path.join(tdir, 'templates', f[1]))
if not file_exists(fn):
file_write(fn, contents)
else:
file_update(fn, lambda _:contents)
puts(green('Create supervisor config folder'))
dir_ensure('/etc/supervisor')
run('chmod +x /etc/init.d/supervisor')
run('update-rc.d supervisor defaults')
run('/etc/init.d/supervisor start')
puts(green('Server setup complete!'))
puts(green('Add sites to nginx by linking configuration files in /etc/nginx/sites-enabled.'))
puts(green('Add uWSGI processes to supervisor by linking configuration files in /etc/supervisor/apps-enabled.'))
def restart(self):
puts(green('Restarting server'))
for c in ['nginx', 'supervisor']:
sudo('/etc/init.d/%s restart' % c)
def start(self):
puts(green('Starting server'))
for c in ['nginx', 'supervisor']:
sudo('/etc/init.d/%s start' % c)
def stop(self):
puts(green('Stoping server'))
for c in ['nginx', 'supervisor']:
sudo('/etc/init.d/%s stop' % c)
def create_app_context(self, ctx):
with settings(user=ctx.user):
puts(green('Creating app context under user: ' + env.user))
tdir = os.path.dirname(__file__)
for f in ['bashrc', 'bash_profile', 'profile']:
lfn = os.path.join(tdir, 'templates', '%s.tmpl' % f)
contents = file_local_read(lfn) % ctx.__dict__
rfn = '/home/%s/.%s' % (ctx.user, f)
file_ensure(rfn, owner=ctx.user, group=ctx.user)
file_update(rfn, lambda _:contents)
dir_ensure('/home/%s/sites' % ctx.user)
for d in ctx.required_dirs:
dir_ensure(d)
def upload_app(self, ctx):
ctx.pre_upload()
with settings(user=ctx.user):
env.remote_bundle = '/tmp/' + os.path.basename(env.local_bundle)
file_upload(env.remote_bundle, env.local_bundle)
run('rm -rf ' + ctx.src_dir)
dir_ensure(ctx.src_dir)
with cd(ctx.src_dir):
run('tar -xvf ' + env.remote_bundle)
ctx.post_upload()
def upload_config(self, ctx):
with settings(user=ctx.user):
for c in [(env.nginx_template,'nginx'),
(env.supervisor_template, 'supervisor')]:
fn = '%s/%s.conf' % (ctx.etc_dir, c[1])
contents = file_local_read(c[0]) % ctx.__dict__
if file_exists(fn):
file_update(fn, lambda _:contents)
else:
file_write(fn, contents)
with mode_sudo():
for c in [('/etc/nginx/conf.d', 'nginx'),
('/etc/supervisor', 'supervisor')]:
source = '%s/%s.conf' % (ctx.etc_dir, c[1])
destination = '%s/%s.conf' % (c[0], ctx.name)
if file_exists(destination) and (not file_is_link(destination)):
run('rm ' + destination)
file_link(source, destination)
class ApplicationContext(AppContext):
def __init__(self, name='default', user='ubuntu'):
super(ApplicationContext, self).__init__(name, user)
self.virtualenv = '/home/%s/.virtualenv/%s' % (self.user, self.name)
self.root_dir = '/home/%s/sites/%s' % (self.user, self.name)
self.releases_dir = self.root_dir + '/releases'
self.src_dir = self.releases_dir + '/current'
self.etc_dir = self.root_dir + '/etc'
self.log_dir = self.root_dir + '/log'
self.run_dir = self.root_dir + '/run'
@property
def required_dirs(self):
return [self.root_dir, self.releases_dir, self.src_dir,
self.etc_dir, self.log_dir, self.run_dir]
def pre_upload(self):
pass
def post_upload(self):
run('rmvirtualenv ' + self.name)
run('mkvirtualenv ' + self.name)
with settings(user=self.user):
with cd(self.src_dir):
with prefix('workon ' + self.name):
run('pip install -r requirements.txt') | mattupstate/flabric | flabric/ubuntu/nginx_uwsgi_supervisor.py | Python | mit | 7,722 |
import json_creator
class Message:
def __init__(self, sent_to, sent_from, text):
self.sent_from = sent_from
self.text = text
self.sent_to = sent_to
def parse_message(self, message_dict):
self.sent_from = json_creator.get_message_sendfrom(message_dict)
self.sent_to = json_creator.get_message_sendto(message_dict)
self.text = json_creator.get_message_text(message_dict)
def get_message(self):
return json_creator.get_message(self.sent_to, self.sent_from,
self.text)
class Chat:
def __init__(self):
self.clients = []
self.messages = []
def add_client(self, user):
self.clients.append(user)
def remove_client(self, user):
self.clients.remove(user)
class Client:
def __init__(self):
self.chat = []
self.username = ''
def set_username(self, username):
self.username = username
def send_message(self, sendto, message):
new_message = Message(sendto, self.username, message)
| ndmoroz/dont-wanna-talk | jim_chat.py | Python | mit | 1,075 |
"""
Settings for testing the application.
"""
import os
DEBUG = True
DJANGO_RDFLIB_DEVELOP = True
DB_PATH = os.path.abspath(os.path.join(__file__, '..', '..', '..', 'rdflib_django.db'))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': DB_PATH,
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
SITE_ID = 1
STATIC_URL = '/static/'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
'rdflib_django',
)
ROOT_URLCONF = 'rdflib_django.urls'
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'': {
'handlers': ['console'],
'propagate': True,
'level': 'INFO',
},
}
}
| publysher/rdflib-django | src/rdflib_django/testsettings.py | Python | mit | 1,265 |
import Image
import json
south = 51.416;
north = 51.623;
west = -0.415;
east = 0.179;
if __name__ == "__main__":
x = Image.fromstring("RGBA", (2668,1494), open("output_pixel_data").read())
x.save("lit-map.png", "PNG")
| samphippen/london | render.py | Python | mit | 232 |
# coding=utf-8
import threading
server = None
web_server_ip = "0.0.0.0"
web_server_port = "8000"
web_server_template = "www"
def initialize_web_server(config):
'''
Setup the web server, retrieving the configuration parameters
and starting the web server thread
'''
global web_server_ip, web_server_port, web_server_template
# Check for custom web server address
compositeWebServerAddress = config.get('BOT', 'customWebServerAddress', '0.0.0.0').split(":")
# associate web server ip address
web_server_ip = compositeWebServerAddress[0]
# check for IP:PORT legacy format
if (len(compositeWebServerAddress) > 1):
# associate web server port
web_server_port = compositeWebServerAddress[1]
else:
# Check for custom web server port
web_server_port = config.get('BOT', 'customWebServerPort', '8000')
# Check for custom web server template
web_server_template = config.get('BOT', 'customWebServerTemplate', 'www')
print('Starting WebServer at {0} on port {1} with template {2}'
.format(web_server_ip, web_server_port, web_server_template))
thread = threading.Thread(target=start_web_server)
thread.deamon = True
thread.start()
def start_web_server():
'''
Start the web server
'''
import SimpleHTTPServer
import SocketServer
import socket
try:
port = int(web_server_port)
host = web_server_ip
# Do not attempt to fix code warnings in the below class, it is perfect.
class QuietHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
# quiet server logs
def log_message(self, format, *args):
return
# serve from www folder under current working dir
def translate_path(self, path):
return SimpleHTTPServer.SimpleHTTPRequestHandler.translate_path(self, '/' + web_server_template + path)
global server
SocketServer.TCPServer.allow_reuse_address = True
server = SocketServer.TCPServer((host, port), QuietHandler)
if host == "0.0.0.0":
# Get all addresses that we could listen on the port specified
addresses = [i[4][0] for i in socket.getaddrinfo(socket.gethostname().split('.')[0], port)]
addresses = [i for i in addresses if ':' not in i] # Filter out all IPv6 addresses
addresses.append('127.0.0.1') # getaddrinfo doesn't always get localhost
hosts = list(set(addresses)) # Make list unique
else:
hosts = [host]
serving_msg = "http://{0}:{1}/lendingbot.html".format(hosts[0], port)
for host in hosts[1:]:
serving_msg += ", http://{0}:{1}/lendingbot.html".format(host, port)
print('Started WebServer, lendingbot status available at {0}'.format(serving_msg))
server.serve_forever()
except Exception as ex:
ex.message = ex.message if ex.message else str(ex)
print('Failed to start WebServer: {0}'.format(ex.message))
def stop_web_server():
'''
Stop the web server
'''
try:
print("Stopping WebServer")
threading.Thread(target=server.shutdown).start()
except Exception as ex:
ex.message = ex.message if ex.message else str(ex)
print("Failed to stop WebServer: {0}".format(ex.message))
| hiei171/lendingbotpoloniex | modules/WebServer.py | Python | mit | 3,367 |
#!/usr/bin/env python2
""" Quick helper to add HTML5 DOCTYPE and <title> to every testcase. """
import os
import re
import sys
def fixhtml(folder):
changed = 0
for dirpath, _, filenames in os.walk(folder):
for file in filenames:
name, ext = os.path.splitext(file)
if ext != '.html':
continue
path = '%s/%s' % (dirpath, file)
title = ' '.join(name.split('-'))
shouldbe = '<!DOCTYPE html>\n<title>%s</title>\n' % title
with open(path, 'r') as f:
content = f.read()
if content.startswith(shouldbe):
continue
changed += 1
content = re.sub('\s*<!DOCTYPE[^>]*>\s*<title>[^<]*</title>\s*', '',
content)
with open(path, 'w') as f:
f.write(shouldbe + content)
return changed
if __name__ == '__main__':
folder = '.' if len(sys.argv) < 2 else sys.argv[1]
changed = fixhtml(folder)
print('Fixed %d files.' % changed)
| qll/autoCSP | testsuite/testcases/fixhtml.py | Python | mit | 1,054 |
import subprocess
import sys
import shutil
import os
import os.path
import unittest
import json
import getpass
import requests
import zipfile
import argparse
import io
import tempfile
import time
import lark
from osspeak import version
OSSPEAK_MAIN_PATH = os.path.join('osspeak', 'main.py')
OSSPEAK_SRC_FOLDER = 'osspeak'
DIST_FOLDER = 'dist'
WSR_SRC_FOLDER = os.path.join('engines', 'RecognizerIO', 'RecognizerIO', 'bin', 'Debug')
WSR_DEST_FOLDER = os.path.join(DIST_FOLDER, 'engines', 'wsr')
TEST_DIR = os.path.join('osspeak', 'tests')
API_URL = 'https://github.com/api/v3/repos/osspeak/osspeak/releases'
UPLOAD_URL = 'https://github.com/api/uploads/repos/osspeak/osspeak/releases'
def main():
cl_args = parse_cl_args()
# tests_passed = run_tests()
# if not tests_passed:
# print('Unit test(s) failed')
# return
build_osspeak()
if cl_args.release:
create_github_release()
def parse_cl_args():
parser = argparse.ArgumentParser()
parser.add_argument('--release', action='store_true')
return parser.parse_args()
def build_osspeak():
lark_path = os.path.dirname(os.path.abspath(lark.__file__))
grammar_path = os.path.join(lark_path, 'grammars', 'common.lark')
dest_path = os.path.join('lark', 'grammars')
subprocess.call(['pyinstaller', OSSPEAK_MAIN_PATH, '--clean', '-F',
'--paths', OSSPEAK_SRC_FOLDER, '--add-data', f'{grammar_path};{dest_path}', '-n', 'osspeak'])
copy_engines()
def copy_engines():
if os.path.exists(WSR_DEST_FOLDER):
shutil.rmtree(WSR_DEST_FOLDER)
shutil.copytree(WSR_SRC_FOLDER, WSR_DEST_FOLDER)
def create_github_release():
username = input('Github username: ')
pwd = getpass.getpass('Github password: ')
release_version = f'v{version.version}'
auth = username, pwd
data = {
"tag_name": release_version,
"name": release_version,
}
response = requests.post(
'https://api.github.com/repos/osspeak/osspeak/releases',
data=json.dumps(data),
auth=auth
)
if not response.ok:
print('Error uploading release to GitHub:')
print(response.text)
return
response_data = json.loads(response.text)
upload_url = response_data['upload_url'].split('{')[0]
upload_release_folder(upload_url, auth)
def run_tests():
loader = unittest.TestLoader()
test_suite = loader.discover(TEST_DIR, top_level_dir=OSSPEAK_SRC_FOLDER)
result = unittest.TextTestRunner(verbosity=2).run(test_suite)
return result.wasSuccessful()
def upload_release_folder(upload_url, auth, zip_name='windows-cli.zip'):
zip_bytes = write_release_zip()
headers = {
'Content-Type': 'application/zip',
'name': 'windows-cli.zip'
}
response = requests.post(
f'{upload_url}?name={zip_name}',
data=zip_bytes,
auth=auth,
headers=headers
)
def write_release_zip():
fd, fname = tempfile.mkstemp(suffix='.zip')
shutil.make_archive(fname[:-4], root_dir='dist', format='zip')
with open(fname, 'rb') as f:
zip_bytes = f.read()
os.close(fd)
os.remove(fname)
return zip_bytes
if __name__ == '__main__':
main() | osspeak/osspeak | buildit.py | Python | mit | 3,203 |
T = int(raw_input())
for test in xrange(T):
N = int(raw_input())
a, b, result = 0, 1, 0
c = a+b
while c < N:
if c%2 == 0:
result += c
a,b = b,c
c = a+b
print result | MayankAgarwal/euler_py | 002/euler002.py | Python | mit | 236 |
import numbers
import warnings
from functools import wraps, partial
from typing import List, Callable
import logging
import numpy as np
import jax
import jax.numpy as jnp
import jax.scipy as scipy
from jax.core import Tracer
from jax.interpreters.xla import DeviceArray
from jax.scipy.sparse.linalg import cg
from jax import random
from phi.math import SolveInfo, Solve, DType
from ..math.backend._dtype import to_numpy_dtype, from_numpy_dtype
from phi.math.backend import Backend, ComputeDevice
from phi.math.backend._backend import combined_dim, SolveResult
class JaxBackend(Backend):
def __init__(self):
Backend.__init__(self, "Jax", default_device=None)
try:
self.rnd_key = jax.random.PRNGKey(seed=0)
except RuntimeError as err:
warnings.warn(f"{err}")
self.rnd_key = None
def prefers_channels_last(self) -> bool:
return True
def list_devices(self, device_type: str or None = None) -> List[ComputeDevice]:
devices = []
for jax_dev in jax.devices():
jax_dev_type = jax_dev.platform.upper()
if device_type is None or device_type == jax_dev_type:
description = f"id={jax_dev.id}"
devices.append(ComputeDevice(self, jax_dev.device_kind, jax_dev_type, -1, -1, description, jax_dev))
return devices
# def set_default_device(self, device: ComputeDevice or str):
# if device == 'CPU':
# jax.config.update('jax_platform_name', 'cpu')
# elif device == 'GPU':
# jax.config.update('jax_platform_name', 'gpu')
# else:
# raise NotImplementedError()
def _check_float64(self):
if self.precision == 64:
if not jax.config.read('jax_enable_x64'):
jax.config.update('jax_enable_x64', True)
assert jax.config.read('jax_enable_x64'), "FP64 is disabled for Jax."
def seed(self, seed: int):
self.rnd_key = jax.random.PRNGKey(seed)
def as_tensor(self, x, convert_external=True):
self._check_float64()
if self.is_tensor(x, only_native=convert_external):
array = x
else:
array = jnp.array(x)
# --- Enforce Precision ---
if not isinstance(array, numbers.Number):
if self.dtype(array).kind == float:
array = self.to_float(array)
elif self.dtype(array).kind == complex:
array = self.to_complex(array)
return array
def is_tensor(self, x, only_native=False):
if isinstance(x, jnp.ndarray) and not isinstance(x, np.ndarray): # NumPy arrays inherit from Jax arrays
return True
# if scipy.sparse.issparse(x): # TODO
# return True
if isinstance(x, jnp.bool_):
return True
# --- Above considered native ---
if only_native:
return False
# --- Non-native types ---
if isinstance(x, np.ndarray):
return True
if isinstance(x, (numbers.Number, bool, str)):
return True
if isinstance(x, (tuple, list)):
return all([self.is_tensor(item, False) for item in x])
return False
def is_available(self, tensor):
return not isinstance(tensor, Tracer)
def numpy(self, x):
return np.array(x)
def to_dlpack(self, tensor):
from jax import dlpack
return dlpack.to_dlpack(tensor)
def from_dlpack(self, capsule):
from jax import dlpack
return dlpack.from_dlpack(capsule)
def copy(self, tensor, only_mutable=False):
return jnp.array(tensor, copy=True)
sqrt = staticmethod(jnp.sqrt)
exp = staticmethod(jnp.exp)
sin = staticmethod(jnp.sin)
cos = staticmethod(jnp.cos)
tan = staticmethod(jnp.tan)
log = staticmethod(jnp.log)
log2 = staticmethod(jnp.log2)
log10 = staticmethod(jnp.log10)
isfinite = staticmethod(jnp.isfinite)
abs = staticmethod(jnp.abs)
sign = staticmethod(jnp.sign)
round = staticmethod(jnp.round)
ceil = staticmethod(jnp.ceil)
floor = staticmethod(jnp.floor)
nonzero = staticmethod(jnp.nonzero)
flip = staticmethod(jnp.flip)
stop_gradient = staticmethod(jax.lax.stop_gradient)
transpose = staticmethod(jnp.transpose)
equal = staticmethod(jnp.equal)
tile = staticmethod(jnp.tile)
stack = staticmethod(jnp.stack)
concat = staticmethod(jnp.concatenate)
zeros_like = staticmethod(jnp.zeros_like)
ones_like = staticmethod(jnp.ones_like)
maximum = staticmethod(jnp.maximum)
minimum = staticmethod(jnp.minimum)
clip = staticmethod(jnp.clip)
shape = staticmethod(jnp.shape)
staticshape = staticmethod(jnp.shape)
imag = staticmethod(jnp.imag)
real = staticmethod(jnp.real)
conj = staticmethod(jnp.conjugate)
einsum = staticmethod(jnp.einsum)
cumsum = staticmethod(jnp.cumsum)
def jit_compile(self, f: Callable) -> Callable:
def run_jit_f(*args):
logging.debug(f"JaxBackend: running jit-compiled '{f.__name__}' with shapes {[arg.shape for arg in args]} and dtypes {[arg.dtype.name for arg in args]}")
return self.as_registered.call(jit_f, *args, name=f"run jit-compiled '{f.__name__}'")
run_jit_f.__name__ = f"Jax-Jit({f.__name__})"
jit_f = jax.jit(f)
return run_jit_f
def block_until_ready(self, values):
if isinstance(values, DeviceArray):
values.block_until_ready()
if isinstance(values, (tuple, list)):
for v in values:
self.block_until_ready(v)
def functional_gradient(self, f, wrt: tuple or list, get_output: bool):
if get_output:
@wraps(f)
def aux_f(*args):
output = f(*args)
if isinstance(output, (tuple, list)) and len(output) == 1:
output = output[0]
result = (output[0], output[1:]) if isinstance(output, (tuple, list)) else (output, None)
if result[0].ndim > 0:
result = jnp.sum(result[0]), result[1]
return result
jax_grad_f = jax.value_and_grad(aux_f, argnums=wrt, has_aux=True)
@wraps(f)
def unwrap_outputs(*args):
(loss, aux), grads = jax_grad_f(*args)
return (loss, *aux, *grads) if aux is not None else (loss, *grads)
return unwrap_outputs
else:
@wraps(f)
def nonaux_f(*args):
output = f(*args)
result = output[0] if isinstance(output, (tuple, list)) else output
if result.ndim > 0:
result = jnp.sum(result)
return result
return jax.grad(nonaux_f, argnums=wrt, has_aux=False)
def custom_gradient(self, f: Callable, gradient: Callable) -> Callable:
jax_fun = jax.custom_vjp(f) # custom vector-Jacobian product (reverse-mode differentiation)
def forward(*x):
y = f(*x)
return y, (x, y)
def backward(x_y, dy):
x, y = x_y
dx = gradient(x, y, dy)
return tuple(dx)
jax_fun.defvjp(forward, backward)
return jax_fun
def divide_no_nan(self, x, y):
return jnp.nan_to_num(x / y, copy=True, nan=0)
def random_uniform(self, shape):
self._check_float64()
self.rnd_key, subkey = jax.random.split(self.rnd_key)
return random.uniform(subkey, shape, dtype=to_numpy_dtype(self.float_type))
def random_normal(self, shape):
self._check_float64()
self.rnd_key, subkey = jax.random.split(self.rnd_key)
return random.normal(subkey, shape, dtype=to_numpy_dtype(self.float_type))
def range(self, start, limit=None, delta=1, dtype: DType = DType(int, 32)):
if limit is None:
start, limit = 0, start
return jnp.arange(start, limit, delta, to_numpy_dtype(dtype))
def pad(self, value, pad_width, mode='constant', constant_values=0):
assert mode in ('constant', 'symmetric', 'periodic', 'reflect', 'boundary'), mode
if mode == 'constant':
constant_values = jnp.array(constant_values, dtype=value.dtype)
return jnp.pad(value, pad_width, 'constant', constant_values=constant_values)
else:
if mode in ('periodic', 'boundary'):
mode = {'periodic': 'wrap', 'boundary': 'edge'}[mode]
return jnp.pad(value, pad_width, mode)
def reshape(self, value, shape):
return jnp.reshape(value, shape)
def sum(self, value, axis=None, keepdims=False):
if isinstance(value, (tuple, list)):
assert axis == 0
return sum(value[1:], value[0])
return jnp.sum(value, axis=axis, keepdims=keepdims)
def prod(self, value, axis=None):
if not isinstance(value, jnp.ndarray):
value = jnp.array(value)
if value.dtype == bool:
return jnp.all(value, axis=axis)
return jnp.prod(value, axis=axis)
def where(self, condition, x=None, y=None):
if x is None or y is None:
return jnp.argwhere(condition)
return jnp.where(condition, x, y)
def zeros(self, shape, dtype: DType = None):
self._check_float64()
return jnp.zeros(shape, dtype=to_numpy_dtype(dtype or self.float_type))
def ones(self, shape, dtype: DType = None):
self._check_float64()
return jnp.ones(shape, dtype=to_numpy_dtype(dtype or self.float_type))
def meshgrid(self, *coordinates):
self._check_float64()
coordinates = [self.as_tensor(c) for c in coordinates]
return jnp.meshgrid(*coordinates, indexing='ij')
def linspace(self, start, stop, number):
self._check_float64()
return jnp.linspace(start, stop, number, dtype=to_numpy_dtype(self.float_type))
def mean(self, value, axis=None, keepdims=False):
return jnp.mean(value, axis, keepdims=keepdims)
def tensordot(self, a, a_axes: tuple or list, b, b_axes: tuple or list):
return jnp.tensordot(a, b, (a_axes, b_axes))
def mul(self, a, b):
# if scipy.sparse.issparse(a): # TODO sparse?
# return a.multiply(b)
# elif scipy.sparse.issparse(b):
# return b.multiply(a)
# else:
return Backend.mul(self, a, b)
def matmul(self, A, b):
return jnp.stack([A.dot(b[i]) for i in range(b.shape[0])])
def while_loop(self, loop: Callable, values: tuple):
if all(self.is_available(t) for t in values):
while jnp.any(values[0]):
values = loop(*values)
return values
else:
cond = lambda vals: jnp.any(vals[0])
body = lambda vals: loop(*vals)
return jax.lax.while_loop(cond, body, values)
def max(self, x, axis=None, keepdims=False):
return jnp.max(x, axis, keepdims=keepdims)
def min(self, x, axis=None, keepdims=False):
return jnp.min(x, axis, keepdims=keepdims)
def conv(self, value, kernel, zero_padding=True):
assert kernel.shape[0] in (1, value.shape[0])
assert value.shape[1] == kernel.shape[2], f"value has {value.shape[1]} channels but kernel has {kernel.shape[2]}"
assert value.ndim + 1 == kernel.ndim
# AutoDiff may require jax.lax.conv_general_dilated
if zero_padding:
result = np.zeros((value.shape[0], kernel.shape[1], *value.shape[2:]), dtype=to_numpy_dtype(self.float_type))
else:
valid = [value.shape[i + 2] - kernel.shape[i + 3] + 1 for i in range(value.ndim - 2)]
result = np.zeros([value.shape[0], kernel.shape[1], *valid], dtype=to_numpy_dtype(self.float_type))
mode = 'same' if zero_padding else 'valid'
for b in range(value.shape[0]):
b_kernel = kernel[min(b, kernel.shape[0] - 1)]
for o in range(kernel.shape[1]):
for i in range(value.shape[1]):
result[b, o, ...] += scipy.signal.correlate(value[b, i, ...], b_kernel[o, i, ...], mode=mode)
return result
def expand_dims(self, a, axis=0, number=1):
for _i in range(number):
a = jnp.expand_dims(a, axis)
return a
def cast(self, x, dtype: DType):
if self.is_tensor(x, only_native=True) and from_numpy_dtype(x.dtype) == dtype:
return x
else:
return jnp.array(x, to_numpy_dtype(dtype))
def batched_gather_nd(self, values, indices):
assert indices.shape[-1] == self.ndims(values) - 2
batch_size = combined_dim(values.shape[0], indices.shape[0])
results = []
for b in range(batch_size):
b_values = values[min(b, values.shape[0] - 1)]
b_indices = self.unstack(indices[min(b, indices.shape[0] - 1)], -1)
results.append(b_values[b_indices])
return jnp.stack(results)
def std(self, x, axis=None, keepdims=False):
return jnp.std(x, axis, keepdims=keepdims)
def boolean_mask(self, x, mask, axis=0):
slices = [mask if i == axis else slice(None) for i in range(len(x.shape))]
return x[tuple(slices)]
def any(self, boolean_tensor, axis=None, keepdims=False):
if isinstance(boolean_tensor, (tuple, list)):
boolean_tensor = jnp.stack(boolean_tensor)
return jnp.any(boolean_tensor, axis=axis, keepdims=keepdims)
def all(self, boolean_tensor, axis=None, keepdims=False):
if isinstance(boolean_tensor, (tuple, list)):
boolean_tensor = jnp.stack(boolean_tensor)
return jnp.all(boolean_tensor, axis=axis, keepdims=keepdims)
def scatter(self, base_grid, indices, values, mode: str):
base_grid, values = self.auto_cast(base_grid, values)
batch_size = combined_dim(combined_dim(indices.shape[0], values.shape[0]), base_grid.shape[0])
spatial_dims = tuple(range(base_grid.ndim - 2))
dnums = jax.lax.ScatterDimensionNumbers(update_window_dims=(1,), # channel dim of updates (batch dim removed)
inserted_window_dims=spatial_dims, # no idea what this does but spatial_dims seems to work
scatter_dims_to_operand_dims=spatial_dims) # spatial dims of base_grid (batch dim removed)
scatter = jax.lax.scatter_add if mode == 'add' else jax.lax.scatter
result = []
for b in range(batch_size):
b_grid = base_grid[b, ...]
b_indices = indices[min(b, indices.shape[0] - 1), ...]
b_values = values[min(b, values.shape[0] - 1), ...]
result.append(scatter(b_grid, b_indices, b_values, dnums))
return jnp.stack(result)
def quantile(self, x, quantiles):
return jnp.quantile(x, quantiles, axis=-1)
def fft(self, x, axes: tuple or list):
x = self.to_complex(x)
if not axes:
return x
if len(axes) == 1:
return np.fft.fft(x, axis=axes[0]).astype(x.dtype)
elif len(axes) == 2:
return np.fft.fft2(x, axes=axes).astype(x.dtype)
else:
return np.fft.fftn(x, axes=axes).astype(x.dtype)
def ifft(self, k, axes: tuple or list):
if not axes:
return k
if len(axes) == 1:
return np.fft.ifft(k, axis=axes[0]).astype(k.dtype)
elif len(axes) == 2:
return np.fft.ifft2(k, axes=axes).astype(k.dtype)
else:
return np.fft.ifftn(k, axes=axes).astype(k.dtype)
def dtype(self, array) -> DType:
if isinstance(array, int):
return DType(int, 32)
if isinstance(array, float):
return DType(float, 64)
if isinstance(array, complex):
return DType(complex, 128)
if not isinstance(array, jnp.ndarray):
array = jnp.array(array)
return from_numpy_dtype(array.dtype)
def linear_solve(self, method: str, lin, y, x0, rtol, atol, max_iter, trj: bool) -> SolveResult or List[SolveResult]:
if method == 'auto' and not trj and not self.is_available(y):
return self.conjugate_gradient(lin, y, x0, rtol, atol, max_iter, trj)
else:
return Backend.linear_solve(self, method, lin, y, x0, rtol, atol, max_iter, trj)
| tum-pbs/PhiFlow | phi/jax/_jax_backend.py | Python | mit | 16,438 |
def urepr(x):
import re, unicodedata
def toname(m):
try:
return r"\N{%s}" % unicodedata.name(unichr(int(m.group(1), 16)))
except ValueError:
return m.group(0)
return re.sub(
r"\\[xu]((?<=x)[0-9a-f]{2}|(?<=u)[0-9a-f]{4})",
toname,
repr(x)
)
def displayhook(x):
if x is not None:
print urepr(x)
def install():
import sys
sys.displayhook = displayhook
def uninstall():
import sys
sys.displayhook = sys.__displayhook__
| ActiveState/code | recipes/Python/541082_Unicode_repr/recipe-541082.py | Python | mit | 528 |
def no_extreme(listed):
"""
Takes a list and chops off extreme ends
"""
del listed[0]
del listed[-1:]
return listed
def better_no_extreme(listed):
"""
why better ? For starters , does not modify original list
"""
return listed[1:-1]
t = ['a','b','c']
print t
print '\n'
print 'pop any element : by t.pop(1) or t.remove(\'b\') or del t[1]'
del t[1]
print t
st = ['a','b','c','d','e','f']
print st
del st[1:3]
print 'del t[1:3] works as well : ', st
print 'Mid part is : ',no_extreme(st)
str = raw_input("\nEnter a string to be converted to list : ")
listr = list(str)
print listr
str2=raw_input("\nEnter a line to be separated into words : ")
listr2 = str2.split()#separated at spaces
print listr2
print 'You can split a line into words by changing the parameter as str2.split(parameter)'
print 'this splits at - '
print 'joining statement : '
delimeter=' '
print delimeter.join(listr2)
print '\nNote: 2 identical lists are 2 objects ,so l_a is l_b for identical lists still says False'
print 'This does not happen for strings etc'
print 'l_a is l_b only true if assigned as l_b = l_a'
print '\n t.append(x) returns None , whereas t+[y] is not None'
print '\n Never t = t[1:] as empty , same goes for t.sort()'
print '\nDO\'s : t.append(x)\n t = t+[x] \n '
print 'Keep copy of original just in case : orig = t[:] \nt.sort()'
print '\nDONT\'s : t.append([x])\n t = t.append(x) \n t + [x] \n t = t + x' | kaustubhhiware/hiPy | think_python/lists2.py | Python | mit | 1,424 |
import os, sys
import random
import time
import feedparser
import itertools
import HTMLParser
from feed import Feed
if os.getcwd().rstrip(os.sep).endswith('feeds'):
os.chdir('..')
sys.path.insert(0, os.getcwd())
from gui_client import new_rpc
import web
import reddit
class RSSFeed(Feed):
def __init__(self):
self.title = 'RSS Feed'
self.streams = []
self.wait_range = (60, 70)
self.max_error_wait = 600
self.max_subs = 0
self.urls = set()
def configure(self):
pass
def watch(self, new_streams=None):
self.configure()
self.web = web.Web()
try:
self.rpc = new_rpc(self.title)
except:
self.rpc = None
print 'Warning: Running without RPC'
if new_streams is None:
new_streams = []
streams = self.streams + new_streams
for url in itertools.cycle(streams):
print url
self.check_feed(url)
time.sleep(random.randint(*self.wait_range))
def check_feed(self, url):
for fail_count in itertools.count():
try:
datad = feedparser.parse(url)
except:
print 'Parse error for', url
time.sleep(min(2 ** fail_count, self.max_error_wait))
else:
break
try:
posts = datad['items']
except:
print 'No items field for', url
posts = []
for post in posts:
self.check_post(post)
def check_post(self, post):
if ('link' not in post):
return False
url = self.url_pre_filter(post['link'])
try:
req = self.web.get(url)
url = req.geturl()
except:
print 'URL retrieval error for ', url
return False
url = self.url_post_filter(url)
if (url in self.urls) or not url.startswith('http://'):
return False
self.urls.add(url)
feed_title = self.default_title_filter(post.get('title', ''))
page_title = self.default_title_filter(self.web.title(req))
title = self.title_filter(page_title, feed_title)
if self.rpc is not None:
subreddit = self.rpc.get_title_subreddit(title)
keywords = self.rpc.get_title_keywords(title)
if self.rpc.get_link_posted_count(url, title) <= self.max_subs:
stats = self.rpc.get_learned_stats(title, keywords)
self.rpc.gui_link_add(self.title, title, url, subreddit, keywords, **stats)
try:
req.close()
except:
pass
print title
print url
def url_pre_filter(self, url):
return url
def url_post_filter(self, url):
return url
def default_title_filter(self, title):
h = HTMLParser.HTMLParser()
return h.unescape(title)
def title_filter(self, page_title, feed_title):
return page_title
if __name__ == '__main__':
f = RSSFeed()
f.watch(['http://www.physorg.com/rss-feed/'])
| pjurik2/pykarma | feeds/rss.py | Python | mit | 3,205 |
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--mat", type=str, help="mat file with observations X and side info", required=True)
parser.add_argument("--epochs", type=int, help="number of epochs", default = 2000)
parser.add_argument("--hsize", type=int, help="size of the hidden layer", default = 30)
parser.add_argument("--batch-size", type=int, help="batch size", default = 512)
args = parser.parse_args()
import tensorflow as tf
import scipy.io
import numpy as np
import chemblnet as cn
import chemblnet.vbutils as vb
data = scipy.io.matlab.loadmat(args.mat)
label = data["X"]
Fu = data["Fu"].todense()
Fv = data["Fv"].todense()
# 109, 167, 168, 204, 214, 215
Ytrain, Ytest = cn.make_train_test(label, 0.5)
Ytrain = Ytrain.tocsr()
Ytest = Ytest.tocsr()
# learning parameters
Y_prec = 1.5
h1_size = args.hsize
batch_size = args.batch_size
lrate = 1e-1
lrate_decay = 1.0
print("Data file: %s" % args.mat)
print("Y size: [%d, %d]" % (label.shape[0], label.shape[1]))
print("Num row feat: %d" % Fu.shape[1])
print("Num col feat: %d" % Fv.shape[1])
print("Test stdev: %.4f" % np.std( Ytest.data ))
print("-----------------------")
print("Num epochs: %d" % args.epochs)
print("Hidden size: %d" % args.hsize)
print("Learning rate: %.1e" % lrate)
print("Batch size: %d" % batch_size)
print("-----------------------")
extra_info = False
## y_val is a vector of values and y_coord gives their coordinates
y_val = tf.placeholder(tf.float32, name="y_val")
y_coord = tf.placeholder(tf.int32, shape=[None, 2], name="y_coord")
#y_idx_u = tf.placeholder(tf.int64)
#y_idx_v = tf.placeholder(tf.int64)
x_u = tf.placeholder(tf.float32, shape=[None, Fu.shape[1]], name="x_u")
x_v = tf.placeholder(tf.float32, shape=[None, Fv.shape[1]], name="x_v")
u_idx = tf.placeholder(tf.int64, name="u_idx")
#v_idx = tf.placeholder(tf.int64, name="v_idx")
learning_rate = tf.placeholder(tf.float32, name = "learning_rate")
## ratio of total training points to mini-batch training points, for the current batch
tb_ratio = tf.placeholder(tf.float32, name = "tb_ratio")
bsize = tf.placeholder(tf.float32, name = "bsize")
## model
#beta_u = vb.NormalGammaUni("beta_u", shape = [Fu.shape[1], h1_size], initial_stdev = 0.1, fixed_prec = False)
#beta_v = vb.NormalGammaUni("beta_v", shape = [Fv.shape[1], h1_size], initial_stdev = 0.1, fixed_prec = False)
U = vb.NormalGammaUni("U", shape = [Ytrain.shape[0], h1_size], initial_stdev = 1.0, fixed_prec = False)
V = vb.NormalGammaUni("V", shape = [Ytrain.shape[1], h1_size], initial_stdev = 1.0, fixed_prec = False)
global_mean = tf.constant(Ytrain.data.mean(), dtype=tf.float32)
## means
Umean_b = tf.gather(U.mean, u_idx)
Vmean_b = V.mean
#h_u = tf.matmul(x_u, beta_u.mean) + Umean_b
#h_u = tf.matmul(x_u, beta_u.mean) + Umean_b
h_u = Umean_b
h_v = Vmean_b
y_pred = tf.matmul(h_u, h_v, transpose_b=True)
y_pred_b = global_mean + tf.gather_nd(y_pred, y_coord)
y_sse = tf.reduce_sum( tf.square(y_val - y_pred_b) )
y_loss = Y_prec / 2.0 * y_sse
## variance
Uvar_b = tf.exp(tf.gather(U.logvar, u_idx))
Vvar_b = V.var
#h_u_var = tf.matmul(tf.square(x_u), beta_u.var) + Uvar_b
#h_v_var = tf.matmul(tf.square(x_v), beta_v.var) + Vvar_b
h_u_var = Uvar_b
h_v_var = Vvar_b
y_var = Y_prec / 2.0 * tf.matmul(h_u_var, h_v_var + tf.square(h_v), transpose_b=True) + Y_prec / 2.0 * tf.matmul(tf.square(h_u), h_v_var, transpose_b=True)
var_loss = tf.gather_nd(y_var, y_coord)
L_D = tb_ratio * (y_loss + var_loss)
#L_prior = beta_u.prec_div() + beta_v.prec_div() + U.prec_div() + V.prec_div() + beta_u.normal_div() + beta_v.normal_div() + U.normal_div_partial(Umean_b, Uvar_b, bsize) + V.normal_div()
L_prior = U.prec_div() + V.prec_div() + U.normal_div() + V.normal_div()
loss = L_D + L_prior
train_op = tf.train.AdagradOptimizer(learning_rate).minimize(loss)
#train_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)
#train_op = tf.train.MomentumOptimizer(1e-7, 0.90).minimize(loss)
######################################################
def select_y(X, row_idx):
Xtmp = X[row_idx]
return np.column_stack(Xtmp.nonzero()), Xtmp.data.astype(np.float32), [0, 0]
rIdx = np.random.permutation(Ytrain.shape[0])
# ---------- test data ------------- #
Yte_coord, Yte_values, Yte_shape = select_y(Ytest, np.arange(Ytest.shape[0]))
# ------- train data (all) --------- #
Ytr_coord, Ytr_values, Ytr_shape = select_y(Ytrain, np.arange(Ytrain.shape[0]))
sess = tf.Session()
if True:
sess.run(tf.global_variables_initializer())
for epoch in range(args.epochs):
rIdx = np.random.permutation(Ytrain.shape[0])
## mini-batch loop
for start in np.arange(0, Ytrain.shape[0], batch_size):
if start + batch_size > Ytrain.shape[0]:
break
idx = rIdx[start : start + batch_size]
by_coord, by_values, by_shape = select_y(Ytrain, idx)
sess.run(train_op, feed_dict={x_u: Fu[idx,:],
x_v: Fv,
y_coord: by_coord,
y_val: by_values,
u_idx: idx,
tb_ratio: Ytrain.shape[0] / float(len(idx)),#Ytrain.nnz / float(by_values.shape[0]),
learning_rate: lrate,
bsize: batch_size
})
## TODO: check from here
## epoch's Ytest error
if epoch % 1 == 0:
test_y_pred = sess.run(y_pred_b,
feed_dict = {x_u: Fu,
x_v: Fv,
y_coord: Yte_coord,
y_val: Yte_values,
u_idx: np.arange(Ytrain.shape[0])})
test_rmse = np.sqrt(np.mean(np.square(test_y_pred - Yte_values)))
train_y_pred = sess.run(y_pred_b,
feed_dict = {x_u: Fu,
x_v: Fv,
y_coord: Ytr_coord,
y_val: Ytr_values,
u_idx: np.arange(Ytrain.shape[0])})
train_rmse = np.sqrt(np.mean(np.square(train_y_pred - Ytr_values)))
#L_D_tr, loss_tr, beta_u, beta_v = sess.run([L_D, loss, beta.prec_div(), beta.normal_div()],
# feed_dict={x_indices: Xi,
# x_shape: Xs,
# x_ids_val: Xv,
# x_idx_comp: Xindices,
# y_idx_comp: Ytr_idx_comp,
# y_idx_prot: Ytr_idx_prot,
# y_val: Ytr_val,
# tb_ratio: 1.0,
# bsize: Ytrain.shape[0]
# })
# beta_l2 = np.sqrt(sess.run(tf.nn.l2_loss(beta.mean)))
# beta_std_min = np.sqrt(sess.run(tf.reduce_min(beta.var)))
# beta_prec = sess.run(beta.prec)
# V_prec = sess.run(V.prec)
# V_l2 = np.sqrt(sess.run(tf.nn.l2_loss(V.mean)))
# Z_prec = sess.run(Z.prec)
# #W2_l2 = sess.run(tf.nn.l2_loss(W2))
# test_rmse = np.sqrt( test_sse / Yte_val.shape[0])
# train_rmse = np.sqrt( train_sse / Ytr_val.shape[0])
if epoch % 20 == 0:
print("Epoch\tRMSE(te, tr)\t\t|")
print("%3d.\t%.5f %.5f\t|" % (epoch, test_rmse, train_rmse))
if extra_info:
#print("beta: [%s]" % beta.summarize(sess))
#print("Z: [%s]" % Z.summarize(sess))
print("V: [%s]" % V.summarize(sess))
| jaak-s/chemblnet | models/vaffl.py | Python | mit | 7,837 |
import os.path
import subprocess
import pkg_resources
import setuptools # pylint: disable=unused-import
def get_package_revision(package_name):
# type: (str) -> str
"""Determine the Git commit hash for the Shopify package.
If the package is installed in "develop" mode the SHA is retrieved using Git. Otherwise it will be retrieved from
the package's Egg metadata. Returns an empty string if the package is not installed or does not contain revision
information.
"""
egg_info = pkg_resources.working_set.find(pkg_resources.Requirement.parse(package_name))
if egg_info is None:
return ''
if os.path.exists(os.path.join(egg_info.location, '.git')):
return str(subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=egg_info.location).decode()).strip()
if egg_info.has_metadata('git_sha.txt'):
return egg_info.get_metadata('git_sha.txt')
return ''
def write_package_revision(cmd, _, filename):
# type: (setuptools.Command, str, str) -> None
"""Write the Git commit hash for the package that is currently being built.
If the build is not occurring from a Git checkout the current revision must be stored in a text file named
"REVISION".
This function should not be called except via setuptools, by specifying an 'egg_info.writers' entrypoint as follows:
setuptools.setup(
name='test_packaging',
...
install_requires=[
'shopify_python'
],
...
entry_points={
'egg_info.writers': [
'git_sha.txt = shopify_python.packaging:write_package_revision',
],
}
...
)
"""
git_sha = None
if os.path.exists('.git'):
git_sha = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode().strip()
elif os.path.exists('REVISION'):
with open('REVISION') as revision_file:
git_sha = revision_file.read().strip()
if git_sha is not None:
cmd.write_or_delete_file("Git SHA", filename, git_sha)
| Shopify/shopify_python | shopify_python/packaging.py | Python | mit | 2,108 |
#!/usr/bin/env python3
from setuptools import setup
setup(
name='SecFS',
version='0.1.0',
description='6.858 final project --- an encrypted and authenticated file system',
long_description= open('README.md', 'r').read(),
author='Jon Gjengset',
author_email='[email protected]',
maintainer='MIT PDOS',
maintainer_email='[email protected]',
url='https://github.com/mit-pdos/6.858-secfs',
packages=['secfs', 'secfs.store'],
install_requires=['llfuse', 'Pyro4', 'serpent', 'cryptography'],
scripts=['bin/secfs-server', 'bin/secfs-fuse'],
license='MIT',
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Topic :: Education",
"Topic :: Security",
"Topic :: System :: Filesystems",
]
)
| mit-pdos/secfs-skeleton | setup.py | Python | mit | 883 |
from django.conf.urls import url
from blog import views
urlpatterns = [
url(r'^view$', views.archive),
url(r'^$', views.welcome),
url(r'^create', views.create_blogpost),
]
| kalicodextu/djangoblog | blog/urls.py | Python | mit | 186 |
"""Helper to handle a set of topics to subscribe to."""
from __future__ import annotations
from collections import deque
from collections.abc import Callable
import datetime as dt
from functools import wraps
from typing import Any
import attr
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from homeassistant.util import dt as dt_util
from .const import ATTR_DISCOVERY_PAYLOAD, ATTR_DISCOVERY_TOPIC
from .models import MessageCallbackType, PublishPayloadType
DATA_MQTT_DEBUG_INFO = "mqtt_debug_info"
STORED_MESSAGES = 10
def initialize(hass: HomeAssistant):
"""Initialize MQTT debug info."""
hass.data[DATA_MQTT_DEBUG_INFO] = {"entities": {}, "triggers": {}}
def log_messages(
hass: HomeAssistant, entity_id: str
) -> Callable[[MessageCallbackType], MessageCallbackType]:
"""Wrap an MQTT message callback to support message logging."""
def _log_message(msg):
"""Log message."""
debug_info = hass.data[DATA_MQTT_DEBUG_INFO]
messages = debug_info["entities"][entity_id]["subscriptions"][
msg.subscribed_topic
]["messages"]
if msg not in messages:
messages.append(msg)
def _decorator(msg_callback: MessageCallbackType) -> MessageCallbackType:
@wraps(msg_callback)
def wrapper(msg: Any) -> None:
"""Log message."""
_log_message(msg)
msg_callback(msg)
setattr(wrapper, "__entity_id", entity_id)
return wrapper
return _decorator
@attr.s(slots=True, frozen=True)
class TimestampedPublishMessage:
"""MQTT Message."""
topic: str = attr.ib()
payload: PublishPayloadType = attr.ib()
qos: int = attr.ib()
retain: bool = attr.ib()
timestamp: dt.datetime = attr.ib(default=None)
def log_message(
hass: HomeAssistant,
entity_id: str,
topic: str,
payload: PublishPayloadType,
qos: int,
retain: bool,
) -> None:
"""Log an outgoing MQTT message."""
debug_info = hass.data[DATA_MQTT_DEBUG_INFO]
entity_info = debug_info["entities"].setdefault(
entity_id, {"subscriptions": {}, "discovery_data": {}, "transmitted": {}}
)
if topic not in entity_info["transmitted"]:
entity_info["transmitted"][topic] = {
"messages": deque([], STORED_MESSAGES),
}
msg = TimestampedPublishMessage(
topic, payload, qos, retain, timestamp=dt_util.utcnow()
)
entity_info["transmitted"][topic]["messages"].append(msg)
def add_subscription(hass, message_callback, subscription):
"""Prepare debug data for subscription."""
if entity_id := getattr(message_callback, "__entity_id", None):
debug_info = hass.data[DATA_MQTT_DEBUG_INFO]
entity_info = debug_info["entities"].setdefault(
entity_id, {"subscriptions": {}, "discovery_data": {}, "transmitted": {}}
)
if subscription not in entity_info["subscriptions"]:
entity_info["subscriptions"][subscription] = {
"count": 0,
"messages": deque([], STORED_MESSAGES),
}
entity_info["subscriptions"][subscription]["count"] += 1
def remove_subscription(hass, message_callback, subscription):
"""Remove debug data for subscription if it exists."""
entity_id = getattr(message_callback, "__entity_id", None)
if entity_id and entity_id in hass.data[DATA_MQTT_DEBUG_INFO]["entities"]:
hass.data[DATA_MQTT_DEBUG_INFO]["entities"][entity_id]["subscriptions"][
subscription
]["count"] -= 1
if not hass.data[DATA_MQTT_DEBUG_INFO]["entities"][entity_id]["subscriptions"][
subscription
]["count"]:
hass.data[DATA_MQTT_DEBUG_INFO]["entities"][entity_id]["subscriptions"].pop(
subscription
)
def add_entity_discovery_data(hass, discovery_data, entity_id):
"""Add discovery data."""
debug_info = hass.data[DATA_MQTT_DEBUG_INFO]
entity_info = debug_info["entities"].setdefault(
entity_id, {"subscriptions": {}, "discovery_data": {}, "transmitted": {}}
)
entity_info["discovery_data"] = discovery_data
def update_entity_discovery_data(hass, discovery_payload, entity_id):
"""Update discovery data."""
entity_info = hass.data[DATA_MQTT_DEBUG_INFO]["entities"][entity_id]
entity_info["discovery_data"][ATTR_DISCOVERY_PAYLOAD] = discovery_payload
def remove_entity_data(hass, entity_id):
"""Remove discovery data."""
if entity_id in hass.data[DATA_MQTT_DEBUG_INFO]["entities"]:
hass.data[DATA_MQTT_DEBUG_INFO]["entities"].pop(entity_id)
def add_trigger_discovery_data(hass, discovery_hash, discovery_data, device_id):
"""Add discovery data."""
debug_info = hass.data[DATA_MQTT_DEBUG_INFO]
debug_info["triggers"][discovery_hash] = {
"device_id": device_id,
"discovery_data": discovery_data,
}
def update_trigger_discovery_data(hass, discovery_hash, discovery_payload):
"""Update discovery data."""
trigger_info = hass.data[DATA_MQTT_DEBUG_INFO]["triggers"][discovery_hash]
trigger_info["discovery_data"][ATTR_DISCOVERY_PAYLOAD] = discovery_payload
def remove_trigger_discovery_data(hass, discovery_hash):
"""Remove discovery data."""
hass.data[DATA_MQTT_DEBUG_INFO]["triggers"].pop(discovery_hash)
def _info_for_entity(hass: HomeAssistant, entity_id: str) -> dict[str, Any]:
mqtt_debug_info = hass.data[DATA_MQTT_DEBUG_INFO]
entity_info = mqtt_debug_info["entities"][entity_id]
subscriptions = [
{
"topic": topic,
"messages": [
{
"payload": str(msg.payload),
"qos": msg.qos,
"retain": msg.retain,
"time": msg.timestamp,
"topic": msg.topic,
}
for msg in subscription["messages"]
],
}
for topic, subscription in entity_info["subscriptions"].items()
]
transmitted = [
{
"topic": topic,
"messages": [
{
"payload": str(msg.payload),
"qos": msg.qos,
"retain": msg.retain,
"time": msg.timestamp,
"topic": msg.topic,
}
for msg in subscription["messages"]
],
}
for topic, subscription in entity_info["transmitted"].items()
]
discovery_data = {
"topic": entity_info["discovery_data"].get(ATTR_DISCOVERY_TOPIC, ""),
"payload": entity_info["discovery_data"].get(ATTR_DISCOVERY_PAYLOAD, ""),
}
return {
"entity_id": entity_id,
"subscriptions": subscriptions,
"discovery_data": discovery_data,
"transmitted": transmitted,
}
def _info_for_trigger(hass: HomeAssistant, trigger_key: str) -> dict[str, Any]:
mqtt_debug_info = hass.data[DATA_MQTT_DEBUG_INFO]
trigger = mqtt_debug_info["triggers"][trigger_key]
discovery_data = None
if trigger["discovery_data"] is not None:
discovery_data = {
"topic": trigger["discovery_data"][ATTR_DISCOVERY_TOPIC],
"payload": trigger["discovery_data"][ATTR_DISCOVERY_PAYLOAD],
}
return {"discovery_data": discovery_data, "trigger_key": trigger_key}
def info_for_config_entry(hass):
"""Get debug info for all entities and triggers."""
mqtt_info = {"entities": [], "triggers": []}
mqtt_debug_info = hass.data[DATA_MQTT_DEBUG_INFO]
for entity_id in mqtt_debug_info["entities"]:
mqtt_info["entities"].append(_info_for_entity(hass, entity_id))
for trigger_key in mqtt_debug_info["triggers"]:
mqtt_info["triggers"].append(_info_for_trigger(hass, trigger_key))
return mqtt_info
def info_for_device(hass, device_id):
"""Get debug info for a device."""
mqtt_info = {"entities": [], "triggers": []}
entity_registry = er.async_get(hass)
entries = er.async_entries_for_device(
entity_registry, device_id, include_disabled_entities=True
)
mqtt_debug_info = hass.data[DATA_MQTT_DEBUG_INFO]
for entry in entries:
if entry.entity_id not in mqtt_debug_info["entities"]:
continue
mqtt_info["entities"].append(_info_for_entity(hass, entry.entity_id))
for trigger_key, trigger in mqtt_debug_info["triggers"].items():
if trigger["device_id"] != device_id:
continue
mqtt_info["triggers"].append(_info_for_trigger(hass, trigger_key))
return mqtt_info
| rohitranjan1991/home-assistant | homeassistant/components/mqtt/debug_info.py | Python | mit | 8,644 |
def is_palindrome(data):
if isinstance(data, list):
data = ''.join(c.lower() for c in ''.join(data) if c.isalpha())
if isinstance(data, str):
return "Palindrome" if data == data[::-1] else "Not a palindrome"
else:
return "Invalid input"
if __name__ == "__main__":
with open("input/input4.txt", "r") as file:
num, *lines = file.read().splitlines()
print(is_palindrome(lines)) | marcardioid/DailyProgrammer | solutions/232_Easy/solution.py | Python | mit | 427 |
from typing import Any, Dict, List, Sequence, Tuple
from gym3.types import ValType
class Env:
"""
An interface for reinforcement learning environments.
:param ob_space: ValType representing the valid observations generated by the environment
:param ac_space: ValType representing the valid actions that an agent can take in the environment
:param num: number of simultaneous episodes
"""
def __init__(self, ob_space: ValType, ac_space: ValType, num: int):
self.ob_space = ob_space
self.ac_space = ac_space
self.num = num
def observe(self) -> Tuple[Any, Any, Any]:
"""
Structured data that's accessible to the agent
Can be called zero or more times per act() call; it is idempotent and does
not affect the environment. Returns the observation after the last act()
call has affected the environment or the initial state if no act()
call has been made.
The following snippet shows what the start of a rollout looks like, and
the usual convention for indexing the values:
env = Env()
# initial reward is ignored; didn't depend on actions
_reward, ob_0, first_0 = env.observe()
env.act(ac_0)
reward_0, ob_1, first_1 = env.observe()
env.act(ac_1)
Note that the initial reward `_reward` is ignored by algorithms, since it
didn't depend on the actions. And that first_0 is always True since the
environment was just created.
:returns:
reward: (float array) last reward, shaped (num,)
ob: observation matching ob_space, with (num,) prepended to the shape of each leaf
first: (bool array) whether new episode just started, shaped (num,)
"""
raise NotImplementedError
def get_info(self) -> List[Dict]:
"""
Return unstructured diagnostics that aren't accessible to the agent
Per-episode stats, rendered images, etc.
Corresponds to same timestep as the observation from observe().
:returns: a list of dictionaries with length `self.num`
"""
return [{} for _ in range(self.num)]
def act(self, ac: Any) -> None:
"""
Apply an action
:param ac: action matching ac_space, with (num,) prepended to the shape of each leaf
"""
raise NotImplementedError
def callmethod(
self, method: str, *args: Sequence[Any], **kwargs: Sequence[Any]
) -> List[Any]:
"""
Call a method on the underlying python object that offers the Gym3 interface.
By default, this is the Env instance (self), but this can be overridden if you are
encapsulating another object or if your wrapper wishes to handle the method call.
:param method: name of method to call on the base object
:param *args, **kwargs: the value of each argument must be a list with length equal to `self.num`
:returns: A list of objects, with length equal to `self.num`
"""
return getattr(self, method)(*args, **kwargs)
| openai/gym3 | gym3/env.py | Python | mit | 3,105 |
# -*- coding:utf-8 -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 10
_modified_time = 1440369075.543512
_enable_loop = True
_template_filename = u'themes/monospace/templates/index.tmpl'
_template_uri = u'index.tmpl'
_source_encoding = 'utf-8'
_exports = [u'content']
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
ns = runtime.TemplateNamespace(u'comments', context._clean_inheritance_tokens(), templateuri=u'comments_helper.tmpl', callables=None, calling_uri=_template_uri)
context.namespaces[(__name__, u'comments')] = ns
ns = runtime.TemplateNamespace(u'helper', context._clean_inheritance_tokens(), templateuri=u'index_helper.tmpl', callables=None, calling_uri=_template_uri)
context.namespaces[(__name__, u'helper')] = ns
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, u'base.tmpl', _template_uri)
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
date_format = context.get('date_format', UNDEFINED)
helper = _mako_get_namespace(context, 'helper')
messages = context.get('messages', UNDEFINED)
posts = context.get('posts', UNDEFINED)
_link = context.get('_link', UNDEFINED)
def content():
return render_content(context._locals(__M_locals))
comments = _mako_get_namespace(context, 'comments')
index_teasers = context.get('index_teasers', UNDEFINED)
__M_writer = context.writer()
__M_writer(u'\n')
__M_writer(u'\n')
__M_writer(u'\n')
if 'parent' not in context._data or not hasattr(context._data['parent'], 'content'):
context['self'].content(**pageargs)
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_content(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
date_format = context.get('date_format', UNDEFINED)
helper = _mako_get_namespace(context, 'helper')
messages = context.get('messages', UNDEFINED)
posts = context.get('posts', UNDEFINED)
_link = context.get('_link', UNDEFINED)
def content():
return render_content(context)
comments = _mako_get_namespace(context, 'comments')
index_teasers = context.get('index_teasers', UNDEFINED)
__M_writer = context.writer()
__M_writer(u'\n')
for post in posts:
__M_writer(u' <div class="postbox">\n <h1><a href="')
__M_writer(unicode(post.permalink()))
__M_writer(u'">')
__M_writer(unicode(post.title()))
__M_writer(u'</a></h1>\n <div class="meta" style="background-color: rgb(234, 234, 234); ">\n <span class="authordate">\n ')
__M_writer(unicode(messages("Posted:")))
__M_writer(u' <time class="published" datetime="')
__M_writer(unicode(post.date.isoformat()))
__M_writer(u'">')
__M_writer(unicode(post.formatted_date(date_format)))
__M_writer(u'</time>\n </span>\n <br>\n <span class="tags">Tags: \n')
if post.tags:
for tag in post.tags:
__M_writer(u' <a class="tag" href="')
__M_writer(unicode(_link('tag', tag)))
__M_writer(u'"><span>')
__M_writer(unicode(tag))
__M_writer(u'</span></a>\n')
__M_writer(u' </span>\n </div>\n ')
__M_writer(unicode(post.text(teaser_only=index_teasers)))
__M_writer(u'\n')
if not post.meta('nocomments'):
__M_writer(u' ')
__M_writer(unicode(comments.comment_link(post.permalink(), post.base_path)))
__M_writer(u'\n')
__M_writer(u' </div>\n')
__M_writer(u' ')
__M_writer(unicode(helper.html_pager()))
__M_writer(u'\n ')
__M_writer(unicode(comments.comment_link_script()))
__M_writer(u'\n\t')
__M_writer(unicode(helper.mathjax_script(posts)))
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
"""
__M_BEGIN_METADATA
{"source_encoding": "utf-8", "line_map": {"22": 3, "25": 2, "31": 0, "45": 2, "46": 3, "47": 4, "52": 31, "58": 5, "71": 5, "72": 6, "73": 7, "74": 8, "75": 8, "76": 8, "77": 8, "78": 11, "79": 11, "80": 11, "81": 11, "82": 11, "83": 11, "84": 15, "85": 16, "86": 17, "87": 17, "88": 17, "89": 17, "90": 17, "91": 20, "92": 22, "93": 22, "94": 23, "95": 24, "96": 24, "97": 24, "98": 26, "99": 28, "100": 28, "101": 28, "102": 29, "103": 29, "104": 30, "105": 30, "111": 105}, "uri": "index.tmpl", "filename": "themes/monospace/templates/index.tmpl"}
__M_END_METADATA
"""
| wcmckee/brobeurdotcom | cache/.mako.tmp/index.tmpl.py | Python | mit | 5,371 |
import csv
import os
#### make sure these file names are the same as the ones on your system
baseline_csv = r"baseline.csv"
new_csv = r"cleaned_csv.csv"
########### do not edit below this line #################
baseline_as_rows = []
new_as_rows = []
if not os.path.exists(baseline_csv):
quit("The baseline log csv file is not found - please check your filename '{}'".format(baseline_csv))
if not os.path.exists(new_csv):
quit("Your local log csv file is not found - please check your filename '{}'".format(new_csv))
with open(baseline_csv) as data:
baseline_as_csv = csv.reader(data)
for row in baseline_as_csv:
baseline_as_rows.append(row)
with open(new_csv) as new_data:
new_rows = csv.reader(new_data)
for row in new_rows:
new_as_rows.append(row)
if len(baseline_as_rows) != len(new_as_rows):
quit("Your csv log file '{}' does not have the same number of rows as the baseline log '{}'.".format(new_csv, baseline_csv))
else:
print "Your csv log file '{}' has the same number of rows as the baseline log '{}'.".format(new_csv, baseline_csv)
print
for i, row in enumerate(baseline_as_rows):
if row != new_as_rows[i]:
print "Different row data detected in row #{}".format(i+1)
print "Baseline: \t{}".format(row)
print "New: \t\t{}".format(new_as_rows[i])
print
print "Comparison complete. If you do not see any rows indicated as 'different' your log file is the same as the baseline. Congrats!\nIf you see rows indicated as 'different' check your DROID settings and try again\n\n"
| jayGattusoNLNZ/DROID_comparer | DROID_export_comparer.py | Python | mit | 1,579 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# feeluown documentation build configuration file, created by
# sphinx-quickstart on Fri Oct 2 20:55:54 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../src'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'feeluown'
copyright = '2015, cosven'
author = 'cosven'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '4.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'cn'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'feeluowndoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'feeluown.tex', 'feeluown Documentation',
'cosven', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'feeluown', 'feeluown Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'feeluown', 'feeluown Documentation',
author, 'feeluown', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| JanlizWorldlet/FeelUOwn | sphinx_doc/source/conf.py | Python | mit | 9,220 |
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.forms.models import modelformset_factory
from django.forms.formsets import formset_factory
from formsettesthelpers import *
from formsettesthelpers.test_app.forms import (
UserFormSet,
PersonFormSet,
PersonForm,
)
class UsageTest(TestCase):
def test_demonstration(self):
from django.forms.models import modelformset_factory
# The following formset is something one could use in a view.
FormSet = modelformset_factory(User, fields=('username', 'email'))
# To test such view, we'd need to generate a formset data dict
# to POST to that view.
formset_helper = ModelFormSetHelper(FormSet)
data = formset_helper.generate([
{'username': 'admin', 'email': '[email protected]'},
{'username': 'user1', 'email': '[email protected]'},
], total_forms=2)
# `data` now contains the formset data, something like
# """{u'form-INITIAL_FORMS': 0, u'form-MAX_NUM_FORMS': 1000,
# u'form-1-username': 'user1', u'form-1-email':
# '[email protected]',...}"""
self.assertEquals(data['form-1-username'], 'user1')
# The `test_app` application just happens to have such view, so lets
# use that.
self.client.post(reverse('modelformset'), data)
self.assertEquals(User.objects.count(), 2)
self.assertEquals(User.objects.get(username='admin').email,
'[email protected]')
self.assertEquals(User.objects.get(username='user1').email,
'[email protected]')
class BasicFormsetTestSkeleton(object):
def setUp(self):
self.fh = self.helper_class(self.formset_class)
def test_valid(self):
data = self.fh.generate(self.two_forms_data, total_forms=2)
response = self.client.post(reverse(self.view_name), data)
self.assertEquals(response.content, 'Is valid')
def test_to_dict(self):
data = self.fh.generate(self.single_list_data, total_forms=1)
response = self.client.post(reverse(self.view_name), data)
self.assertEquals(response.content, 'Is valid')
def test_prefixed(self):
fh = self.helper_class(self.formset_class, prefix='humans')
data = fh.generate(self.two_forms_data, total_forms=2)
response = self.client.post(
reverse('prefixed_%s' % self.view_name), data)
self.assertEquals(response.content, 'Is valid')
def test_extra_is_zero(self):
fh = self.helper_class(self.formset_class_zero_extra)
data = fh.generate(self.two_forms_data, total_forms=2)
response = self.client.post(reverse(self.view_name), data)
self.assertEquals(response.content, 'Is valid')
class TestModelFormSet(BasicFormsetTestSkeleton, TestCase):
helper_class = ModelFormSetHelper
formset_class = UserFormSet
formset_class_zero_extra = modelformset_factory(
User, fields=('username', 'email', 'is_staff'), extra=0)
two_forms_data = [
{'username': 'user1', 'email': '[email protected]'},
{'username': 'user2', 'email': '[email protected]'},
]
single_list_data = [['justin', '[email protected]']]
view_name = 'modelformset'
def test_valid(self):
super(TestModelFormSet, self).test_valid()
self.assertEquals(User.objects.count(), 2)
def test_to_dict(self):
super(TestModelFormSet, self).test_to_dict()
self.assertEquals(User.objects.count(), 1)
def test_prefixed(self):
super(TestModelFormSet, self).test_prefixed()
self.assertEquals(User.objects.count(), 2)
def test_extra_is_zero(self):
super(TestModelFormSet, self).test_extra_is_zero()
self.assertEquals(User.objects.count(), 2)
class TestFormSet(BasicFormsetTestSkeleton, TestCase):
helper_class = FormSetHelper
formset_class = PersonFormSet
formset_class_zero_extra = formset_factory(PersonForm, extra=0)
two_forms_data = [
{'name': 'Janelle', 'slug': 'j1', 'age': 24},
{'name': 'Joe', 'slug': 'j2', 'age': 25},
]
single_list_data = [['Max', 'max', 42]]
view_name = 'formset'
| Raekkeri/django-formsettesthelpers | src/formsettesthelpers/tests.py | Python | mit | 4,321 |
# Copyright (C) MetaCarta, Incorporated.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
# Port of nilsimsa-20050414.rb from Ruby to Python
#
# Ported by Michael Itz at MetaCarta
#
# Original comments from Ruby version:
# ---------------------------------------------------------
# Nilsimsa hash (build 20050414)
# Ruby port (C) 2005 Martin Pirker
# released under GNU GPL V2 license
#
# inspired by Digest::Nilsimsa-0.06 from Perl CPAN and
# the original C nilsimsa-0.2.4 implementation by cmeclax
# http://ixazon.dynip.com/~cmeclax/nilsimsa.html
# ---------------------------------------------------------
"""
Computes and compares nilsimsa codes.
A nilsimsa code is something like a hash, but unlike hashes, a small
change in the message results in a small change in the nilsimsa
code. Such a function is called a locality-sensitive hash.
Python port of ruby version that was inspired by a perl version:
http://ixazon.dynip.com/~cmeclax/nilsimsa.html
"""
import sys
if sys.version_info[0] >= 3:
PY3 = True
text_type = str
else:
PY3 = False
text_type = unicode
def is_iterable_non_string(obj):
return hasattr(obj, '__iter__') and not isinstance(obj, (bytes, text_type))
# $ Id: $
# table used in computing trigram statistics
# TRAN[x] is the accumulator that should be incremented when x
# is the value observed from hashing a triplet of recently
# seen characters (done in Nilsimsa.tran3(a, b, c, n))
TRAN = [ord(x) for x in
"\x02\xD6\x9E\x6F\xF9\x1D\x04\xAB\xD0\x22\x16\x1F\xD8\x73\xA1\xAC"\
"\x3B\x70\x62\x96\x1E\x6E\x8F\x39\x9D\x05\x14\x4A\xA6\xBE\xAE\x0E"\
"\xCF\xB9\x9C\x9A\xC7\x68\x13\xE1\x2D\xA4\xEB\x51\x8D\x64\x6B\x50"\
"\x23\x80\x03\x41\xEC\xBB\x71\xCC\x7A\x86\x7F\x98\xF2\x36\x5E\xEE"\
"\x8E\xCE\x4F\xB8\x32\xB6\x5F\x59\xDC\x1B\x31\x4C\x7B\xF0\x63\x01"\
"\x6C\xBA\x07\xE8\x12\x77\x49\x3C\xDA\x46\xFE\x2F\x79\x1C\x9B\x30"\
"\xE3\x00\x06\x7E\x2E\x0F\x38\x33\x21\xAD\xA5\x54\xCA\xA7\x29\xFC"\
"\x5A\x47\x69\x7D\xC5\x95\xB5\xF4\x0B\x90\xA3\x81\x6D\x25\x55\x35"\
"\xF5\x75\x74\x0A\x26\xBF\x19\x5C\x1A\xC6\xFF\x99\x5D\x84\xAA\x66"\
"\x3E\xAF\x78\xB3\x20\x43\xC1\xED\x24\xEA\xE6\x3F\x18\xF3\xA0\x42"\
"\x57\x08\x53\x60\xC3\xC0\x83\x40\x82\xD7\x09\xBD\x44\x2A\x67\xA8"\
"\x93\xE0\xC2\x56\x9F\xD9\xDD\x85\x15\xB4\x8A\x27\x28\x92\x76\xDE"\
"\xEF\xF8\xB2\xB7\xC9\x3D\x45\x94\x4B\x11\x0D\x65\xD5\x34\x8B\x91"\
"\x0C\xFA\x87\xE9\x7C\x5B\xB1\x4D\xE5\xD4\xCB\x10\xA2\x17\x89\xBC"\
"\xDB\xB0\xE2\x97\x88\x52\xF7\x48\xD3\x61\x2C\x3A\x2B\xD1\x8C\xFB"\
"\xF1\xCD\xE4\x6A\xE7\xA9\xFD\xC4\x37\xC8\xD2\xF6\xDF\x58\x72\x4E"]
# table used in comparing bit differences between digests
# POPC[x] = <number of 1 bits in x>
# so...
# POPC[a^b] = <number of bits different between a and b>
POPC = [ord(x) for x in
"\x00\x01\x01\x02\x01\x02\x02\x03\x01\x02\x02\x03\x02\x03\x03\x04"\
"\x01\x02\x02\x03\x02\x03\x03\x04\x02\x03\x03\x04\x03\x04\x04\x05"\
"\x01\x02\x02\x03\x02\x03\x03\x04\x02\x03\x03\x04\x03\x04\x04\x05"\
"\x02\x03\x03\x04\x03\x04\x04\x05\x03\x04\x04\x05\x04\x05\x05\x06"\
"\x01\x02\x02\x03\x02\x03\x03\x04\x02\x03\x03\x04\x03\x04\x04\x05"\
"\x02\x03\x03\x04\x03\x04\x04\x05\x03\x04\x04\x05\x04\x05\x05\x06"\
"\x02\x03\x03\x04\x03\x04\x04\x05\x03\x04\x04\x05\x04\x05\x05\x06"\
"\x03\x04\x04\x05\x04\x05\x05\x06\x04\x05\x05\x06\x05\x06\x06\x07"\
"\x01\x02\x02\x03\x02\x03\x03\x04\x02\x03\x03\x04\x03\x04\x04\x05"\
"\x02\x03\x03\x04\x03\x04\x04\x05\x03\x04\x04\x05\x04\x05\x05\x06"\
"\x02\x03\x03\x04\x03\x04\x04\x05\x03\x04\x04\x05\x04\x05\x05\x06"\
"\x03\x04\x04\x05\x04\x05\x05\x06\x04\x05\x05\x06\x05\x06\x06\x07"\
"\x02\x03\x03\x04\x03\x04\x04\x05\x03\x04\x04\x05\x04\x05\x05\x06"\
"\x03\x04\x04\x05\x04\x05\x05\x06\x04\x05\x05\x06\x05\x06\x06\x07"\
"\x03\x04\x04\x05\x04\x05\x05\x06\x04\x05\x05\x06\x05\x06\x06\x07"\
"\x04\x05\x05\x06\x05\x06\x06\x07\x05\x06\x06\x07\x06\x07\x07\x08"]
class Nilsimsa(object):
"""Nilsimsa code calculator."""
def __init__(self, data=None):
"""Nilsimsa calculator, w/optional list of initial data chunks."""
self.count = 0 # num characters seen
self.acc = [0]*256 # accumulators for computing digest
self.lastch = [-1]*4 # last four seen characters (-1 until set)
if data:
if is_iterable_non_string(data):
for chunk in data:
self.update(chunk)
elif isinstance(data, (bytes, text_type)):
self.update(data)
else:
raise TypeError("Excpected string, iterable or None, got {}"
.format(type(data)))
def tran3(self, a, b, c, n):
"""Get accumulator for a transition n between chars a, b, c."""
return (((TRAN[(a+n)&255]^TRAN[b]*(n+n+1))+TRAN[(c)^TRAN[n]])&255)
def update(self, data):
"""Add data to running digest, increasing the accumulators for 0-8
triplets formed by this char and the previous 0-3 chars."""
for character in data:
if PY3:
ch = character
else:
ch = ord(character)
self.count += 1
# incr accumulators for triplets
if self.lastch[1] > -1:
self.acc[self.tran3(ch, self.lastch[0], self.lastch[1], 0)] +=1
if self.lastch[2] > -1:
self.acc[self.tran3(ch, self.lastch[0], self.lastch[2], 1)] +=1
self.acc[self.tran3(ch, self.lastch[1], self.lastch[2], 2)] +=1
if self.lastch[3] > -1:
self.acc[self.tran3(ch, self.lastch[0], self.lastch[3], 3)] +=1
self.acc[self.tran3(ch, self.lastch[1], self.lastch[3], 4)] +=1
self.acc[self.tran3(ch, self.lastch[2], self.lastch[3], 5)] +=1
self.acc[self.tran3(self.lastch[3], self.lastch[0], ch, 6)] +=1
self.acc[self.tran3(self.lastch[3], self.lastch[2], ch, 7)] +=1
# adjust last seen chars
self.lastch = [ch] + self.lastch[:3]
def digest(self):
"""Get digest of data seen thus far as a list of bytes."""
total = 0 # number of triplets seen
if self.count == 3: # 3 chars = 1 triplet
total = 1
elif self.count == 4: # 4 chars = 4 triplets
total = 4
elif self.count > 4: # otherwise 8 triplets/char less
total = 8 * self.count - 28 # 28 'missed' during 'ramp-up'
threshold = total / 256 # threshold for accumulators, using the mean
code = [0]*32 # start with all zero bits
for i in range(256): # for all 256 accumulators
if self.acc[i] > threshold: # if it meets the threshold
code[i >> 3] += 1 << (i&7) # set corresponding digest bit, equivalent to i/8, 2 ** (i % 8)
return code[::-1] # reverse the byte order in result
def hexdigest(self):
"""Get digest of data seen this far as a 64-char hex string."""
return ("%02x" * 32) % tuple(self.digest())
def __str__(self):
"""Show digest for convenience."""
return self.hexdigest()
def from_file(self, filename):
"""Update running digest with content of named file."""
f = open(filename, 'rb')
while True:
data = f.read(10480)
if not data:
break
self.update(data)
f.close()
def compare(self, otherdigest, ishex=False):
"""Compute difference in bits between own digest and another.
returns -127 to 128; 128 is the same, -127 is different"""
bits = 0
myd = self.digest()
if ishex:
# convert to 32-tuple of unsighed two-byte INTs
otherdigest = tuple([int(otherdigest[i:i+2],16) for i in range(0,63,2)])
for i in range(32):
bits += POPC[255 & myd[i] ^ otherdigest[i]]
return 128 - bits
def compare_hexdigests( digest1, digest2 ):
"""Compute difference in bits between digest1 and digest2
returns -127 to 128; 128 is the same, -127 is different"""
# convert to 32-tuple of unsighed two-byte INTs
digest1 = tuple([int(digest1[i:i+2],16) for i in range(0,63,2)])
digest2 = tuple([int(digest2[i:i+2],16) for i in range(0,63,2)])
bits = 0
for i in range(32):
bits += POPC[255 & digest1[i] ^ digest2[i]]
return 128 - bits
def selftest( name=None, opt=None, value=None, parser=None ):
print("running selftest...")
n1 = Nilsimsa()
n1.update("abcdefgh")
n2 = Nilsimsa(["abcd", "efgh"])
print("abcdefgh:\t%s" % str(n1.hexdigest()==\
'14c8118000000000030800000004042004189020001308014088003280000078'))
print("abcd efgh:\t%s" % str(n2.hexdigest()==\
'14c8118000000000030800000004042004189020001308014088003280000078'))
print("digest:\t\t%s" % str(n1.digest() == n2.digest()))
n1.update("ijk")
print("update(ijk):\t%s" % str(n1.hexdigest()==\
'14c811840010000c0328200108040630041890200217582d4098103280000078'))
print("compare:\t%s" % str(n1.compare(n2.digest())==109))
print("compare:\t%s" % str(n1.compare(n2.hexdigest(), ishex=True)==109))
| diffeo/py-nilsimsa | nilsimsa/deprecated/_deprecated_nilsimsa.py | Python | mit | 9,977 |
"""
graph.py
-------------
Deal with graph operations. Primarily deal with graphs in (n, 2)
edge list form, and abstract the backend graph library being used.
Currently uses networkx or scipy.sparse.csgraph backend.
"""
import numpy as np
import collections
from . import util
from . import grouping
from . import exceptions
from .constants import log, tol
from .geometry import faces_to_edges
try:
from scipy.sparse import csgraph, coo_matrix
except BaseException as E:
# re-raise exception when used
csgraph = exceptions.ExceptionModule(E)
coo_matrix = exceptions.closure(E)
try:
import networkx as nx
except BaseException as E:
# create a dummy module which will raise the ImportError
# or other exception only when someone tries to use networkx
nx = exceptions.ExceptionModule(E)
def face_adjacency(faces=None,
mesh=None,
return_edges=False):
"""
Returns an (n, 2) list of face indices.
Each pair of faces in the list shares an edge, making them adjacent.
Parameters
-----------
faces : (n, 3) int, or None
Vertex indices representing triangles
mesh : Trimesh object
If passed will used cached edges
instead of generating from faces
return_edges : bool
Return the edges shared by adjacent faces
Returns
----------
adjacency : (m, 2) int
Indexes of faces that are adjacent
edges: (m, 2) int
Only returned if return_edges is True
Indexes of vertices which make up the
edges shared by the adjacent faces
Examples
----------
This is useful for lots of things such as finding
face- connected components:
>>> graph = nx.Graph()
>>> graph.add_edges_from(mesh.face_adjacency)
>>> groups = nx.connected_components(graph_connected)
"""
if mesh is None:
# first generate the list of edges for the current faces
# also return the index for which face the edge is from
edges, edges_face = faces_to_edges(faces, return_index=True)
# make sure edge rows are sorted
edges.sort(axis=1)
else:
# if passed a mesh, used the cached values
edges = mesh.edges_sorted
edges_face = mesh.edges_face
# this will return the indices for duplicate edges
# every edge appears twice in a well constructed mesh
# so for every row in edge_idx:
# edges[edge_idx[*][0]] == edges[edge_idx[*][1]]
# in this call to group rows we discard edges which
# don't occur twice
edge_groups = grouping.group_rows(edges, require_count=2)
if len(edge_groups) == 0:
log.warning('No adjacent faces detected! Did you merge vertices?')
# the pairs of all adjacent faces
# so for every row in face_idx, self.faces[face_idx[*][0]] and
# self.faces[face_idx[*][1]] will share an edge
adjacency = edges_face[edge_groups]
# degenerate faces may appear in adjacency as the same value
nondegenerate = adjacency[:, 0] != adjacency[:, 1]
adjacency = adjacency[nondegenerate]
# sort pairs in-place so we can search for indexes with ordered pairs
adjacency.sort(axis=1)
if return_edges:
adjacency_edges = edges[edge_groups[:, 0][nondegenerate]]
assert len(adjacency_edges) == len(adjacency)
return adjacency, adjacency_edges
return adjacency
def face_adjacency_unshared(mesh):
"""
Return the vertex index of the two vertices not in the shared
edge between two adjacent faces
Parameters
----------
mesh : Trimesh object
Input mesh
Returns
-----------
vid_unshared : (len(mesh.face_adjacency), 2) int
Indexes of mesh.vertices
for degenerate faces without exactly
one unshared vertex per face it will be -1
"""
# the non- shared vertex index is the same shape
# as face_adjacency holding vertex indices vs face indices
vid_unshared = np.zeros_like(mesh.face_adjacency,
dtype=np.int64) - 1
# get the shared edges between adjacent faces
edges = mesh.face_adjacency_edges
# loop through the two columns of face adjacency
for i, fid in enumerate(mesh.face_adjacency.T):
# faces from the current column of face adjacency
faces = mesh.faces[fid]
# should have one True per row of (3,)
# index of vertex not included in shared edge
unshared = np.logical_not(np.logical_or(
faces == edges[:, 0].reshape((-1, 1)),
faces == edges[:, 1].reshape((-1, 1))))
# each row should have exactly one uncontained verted
row_ok = unshared.sum(axis=1) == 1
# any degenerate row should be ignored
unshared[~row_ok, :] = False
# set the
vid_unshared[row_ok, i] = faces[unshared]
return vid_unshared
def face_adjacency_radius(mesh):
"""
Compute an approximate radius between adjacent faces.
Parameters
--------------
mesh : trimesh.Trimesh
Returns
-------------
radii : (len(self.face_adjacency),) float
Approximate radius between faces
Parallel faces will have a value of np.inf
span : (len(self.face_adjacency),) float
Perpendicular projection distance of two
unshared vertices onto the shared edge
"""
# solve for the radius of the adjacent faces
# distance
# R = ------------------
# 2 * sin(theta / 2)
nonzero = mesh.face_adjacency_angles > np.radians(.01)
denominator = np.abs(
2.0 * np.sin(mesh.face_adjacency_angles[nonzero] / 1.0))
# consider the distance between the non- shared vertices of the
# face adjacency pair as the key distance
point_pairs = mesh.vertices[mesh.face_adjacency_unshared]
vectors = np.diff(point_pairs,
axis=1).reshape((-1, 3))
# the vertex indices of the shared edge for the adjacency pairx
edges = mesh.face_adjacency_edges
# unit vector along shared the edge
edges_vec = util.unitize(np.diff(mesh.vertices[edges],
axis=1).reshape((-1, 3)))
# the vector of the perpendicular projection to the shared edge
perp = np.subtract(
vectors, (util.diagonal_dot(
vectors, edges_vec).reshape(
(-1, 1)) * edges_vec))
# the length of the perpendicular projection
span = util.row_norm(perp)
# complete the values for non- infinite radii
radii = np.ones(len(mesh.face_adjacency)) * np.inf
radii[nonzero] = span[nonzero] / denominator
return radii, span
def vertex_adjacency_graph(mesh):
"""
Returns a networkx graph representing the vertices and
their connections in the mesh.
Parameters
----------
mesh : Trimesh object
Returns
---------
graph : networkx.Graph
Graph representing vertices and edges between
them where vertices are nodes and edges are edges
Examples
----------
This is useful for getting nearby vertices for a given vertex,
potentially for some simple smoothing techniques.
>>> graph = mesh.vertex_adjacency_graph
>>> graph.neighbors(0)
> [1, 3, 4]
"""
g = nx.Graph()
g.add_edges_from(mesh.edges_unique)
return g
def shared_edges(faces_a, faces_b):
"""
Given two sets of faces, find the edges which are in both sets.
Parameters
---------
faces_a : (n, 3) int
Array of faces
faces_b : (m, 3) int
Array of faces
Returns
---------
shared : (p, 2) int
Edges shared between faces
"""
e_a = np.sort(faces_to_edges(faces_a), axis=1)
e_b = np.sort(faces_to_edges(faces_b), axis=1)
shared = grouping.boolean_rows(
e_a, e_b, operation=np.intersect1d)
return shared
def facets(mesh, engine=None):
"""
Find the list of parallel adjacent faces.
Parameters
-----------
mesh : trimesh.Trimesh
engine : str
Which graph engine to use:
('scipy', 'networkx')
Returns
---------
facets : sequence of (n,) int
Groups of face indexes of
parallel adjacent faces.
"""
# what is the radius of a circle that passes through the perpendicular
# projection of the vector between the two non- shared vertices
# onto the shared edge, with the face normal from the two adjacent faces
radii = mesh.face_adjacency_radius
# what is the span perpendicular to the shared edge
span = mesh.face_adjacency_span
# a very arbitrary formula for declaring two adjacent faces
# parallel in a way that is hopefully (and anecdotally) robust
# to numeric error
# a common failure mode is two faces that are very narrow with a slight
# angle between them, so here we divide by the perpendicular span
# to penalize very narrow faces, and then square it just for fun
parallel = np.ones(len(radii), dtype=np.bool)
# if span is zero we know faces are small/parallel
nonzero = np.abs(span) > tol.zero
# faces with a radii/span ratio larger than a threshold pass
parallel[nonzero] = (radii[nonzero] /
span[nonzero]) ** 2 > tol.facet_threshold
# run connected components on the parallel faces to group them
components = connected_components(
mesh.face_adjacency[parallel],
nodes=np.arange(len(mesh.faces)),
min_len=2,
engine=engine)
return components
def split(mesh, only_watertight=True, adjacency=None, engine=None, **kwargs):
"""
Split a mesh into multiple meshes from face
connectivity.
If only_watertight is true it will only return
watertight meshes and will attempt to repair
single triangle or quad holes.
Parameters
----------
mesh : trimesh.Trimesh
only_watertight: bool
Only return watertight components
adjacency : (n, 2) int
Face adjacency to override full mesh
engine : str or None
Which graph engine to use
Returns
----------
meshes : (m,) trimesh.Trimesh
Results of splitting
"""
if adjacency is None:
adjacency = mesh.face_adjacency
# if only watertight the shortest thing we can split has 3 triangles
if only_watertight:
min_len = 4
else:
min_len = 1
components = connected_components(
edges=adjacency,
nodes=np.arange(len(mesh.faces)),
min_len=min_len,
engine=engine)
meshes = mesh.submesh(
components, only_watertight=only_watertight, **kwargs)
return meshes
def connected_components(edges,
min_len=1,
nodes=None,
engine=None):
"""
Find groups of connected nodes from an edge list.
Parameters
-----------
edges : (n, 2) int
Edges between nodes
nodes : (m, ) int or None
List of nodes that exist
min_len : int
Minimum length of a component group to return
engine : str or None
Which graph engine to use (None for automatic):
(None, 'networkx', 'scipy')
Returns
-----------
components : (n,) sequence of (*,) int
Nodes which are connected
"""
def components_networkx():
"""
Find connected components using networkx
"""
graph = nx.from_edgelist(edges)
# make sure every face has a node, so single triangles
# aren't discarded (as they aren't adjacent to anything)
if min_len <= 1:
graph.add_nodes_from(nodes)
iterable = nx.connected_components(graph)
# newer versions of networkx return sets rather than lists
components = np.array(
[np.array(list(i), dtype=np.int64)
for i in iterable if len(i) >= min_len])
return components
def components_csgraph():
"""
Find connected components using scipy.sparse.csgraph
"""
# label each node
labels = connected_component_labels(edges,
node_count=node_count)
# we have to remove results that contain nodes outside
# of the specified node set and reindex
contained = np.zeros(node_count, dtype=np.bool)
contained[nodes] = True
index = np.arange(node_count, dtype=np.int64)[contained]
components = grouping.group(labels[contained], min_len=min_len)
components = np.array([index[c] for c in components])
return components
# check input edges
edges = np.asanyarray(edges, dtype=np.int64)
# if no nodes were specified just use unique
if nodes is None:
nodes = np.unique(edges)
# exit early if we have no nodes
if len(nodes) == 0:
return np.array([])
elif len(edges) == 0:
if min_len <= 1:
return np.reshape(nodes, (-1, 1))
else:
return np.array([])
if not util.is_shape(edges, (-1, 2)):
raise ValueError('edges must be (n, 2)!')
# find the maximum index referenced in either nodes or edges
counts = [0]
if len(edges) > 0:
counts.append(edges.max())
if len(nodes) > 0:
counts.append(nodes.max())
node_count = np.max(counts) + 1
# remove edges that don't have both nodes in the node set
mask = np.zeros(node_count, dtype=np.bool)
mask[nodes] = True
edges_ok = mask[edges].all(axis=1)
edges = edges[edges_ok]
# networkx is pure python and is usually 5-10x slower than scipy
engines = collections.OrderedDict((
('scipy', components_csgraph),
('networkx', components_networkx)))
# if a graph engine has explicitly been requested use it
if engine in engines:
return engines[engine]()
# otherwise, go through our ordered list of graph engines
# until we get to one that has actually been installed
for function in engines.values():
try:
return function()
# will be raised if the library didn't import correctly above
except NameError:
continue
raise ImportError('No connected component engines available!')
def connected_component_labels(edges, node_count=None):
"""
Label graph nodes from an edge list, using scipy.sparse.csgraph
Parameters
-----------
edges : (n, 2) int
Edges of a graph
node_count : int, or None
The largest node in the graph.
Returns
----------
labels : (node_count,) int
Component labels for each node
"""
matrix = edges_to_coo(edges, node_count)
body_count, labels = csgraph.connected_components(
matrix, directed=False)
if node_count is not None:
assert len(labels) == node_count
return labels
def split_traversal(traversal,
edges,
edges_hash=None):
"""
Given a traversal as a list of nodes, split the traversal
if a sequential index pair is not in the given edges.
Parameters
--------------
edges : (n, 2) int
Graph edge indexes
traversal : (m,) int
Traversal through edges
edge_hash : (n,)
Edges sorted on axis=1 and
passed to grouping.hashable_rows
Returns
---------------
split : sequence of (p,) int
"""
traversal = np.asanyarray(traversal,
dtype=np.int64)
# hash edge rows for contains checks
if edges_hash is None:
edges_hash = grouping.hashable_rows(
np.sort(edges, axis=1))
# turn the (n,) traversal into (n-1, 2) edges
trav_edge = np.column_stack((traversal[:-1],
traversal[1:]))
# hash each edge so we can compare to edge set
trav_hash = grouping.hashable_rows(
np.sort(trav_edge, axis=1))
# check if each edge is contained in edge set
contained = np.in1d(trav_hash, edges_hash)
# exit early if every edge of traversal exists
if contained.all():
# just reshape one traversal
split = [traversal]
else:
# find contiguous groups of contained edges
blocks = grouping.blocks(contained,
min_len=1,
only_nonzero=True)
# turn edges back in to sequence of traversals
split = [np.append(trav_edge[b][:, 0],
trav_edge[b[-1]][1])
for b in blocks]
# close traversals if necessary
for i, t in enumerate(split):
# make sure elements of sequence are numpy arrays
split[i] = np.asanyarray(split[i], dtype=np.int64)
# don't close if its a single edge
if len(t) <= 2:
continue
# make sure it's not already closed
edge = np.sort([t[0], t[-1]])
if edge.ptp() == 0:
continue
close = grouping.hashable_rows(edge.reshape((1, 2)))[0]
# if we need the edge add it
if close in edges_hash:
split[i] = np.append(t, t[0]).astype(np.int64)
result = np.array(split)
return result
def fill_traversals(traversals, edges, edges_hash=None):
"""
Convert a traversal of a list of edges into a sequence of
traversals where every pair of consecutive node indexes
is an edge in a passed edge list
Parameters
-------------
traversals : sequence of (m,) int
Node indexes of traversals of a graph
edges : (n, 2) int
Pairs of connected node indexes
edges_hash : None, or (n,) int
Edges sorted along axis 1 then hashed
using grouping.hashable_rows
Returns
--------------
splits : sequence of (p,) int
Node indexes of connected traversals
"""
# make sure edges are correct type
edges = np.asanyarray(edges, dtype=np.int64)
# make sure edges are sorted
edges.sort(axis=1)
# if there are no traversals just return edges
if len(traversals) == 0:
return edges.copy()
# hash edges for contains checks
if edges_hash is None:
edges_hash = grouping.hashable_rows(edges)
splits = []
for nodes in traversals:
# split traversals to remove edges
# that don't actually exist
splits.extend(split_traversal(
traversal=nodes,
edges=edges,
edges_hash=edges_hash))
# turn the split traversals back into (n, 2) edges
included = util.vstack_empty([np.column_stack((i[:-1], i[1:]))
for i in splits])
if len(included) > 0:
# sort included edges in place
included.sort(axis=1)
# make sure any edges not included in split traversals
# are just added as a length 2 traversal
splits.extend(grouping.boolean_rows(
edges,
included,
operation=np.setdiff1d))
else:
# no edges were included, so our filled traversal
# is just the original edges copied over
splits = edges.copy()
return splits
def traversals(edges, mode='bfs'):
"""
Given an edge list generate a sequence of ordered depth
first search traversals using scipy.csgraph routines.
Parameters
------------
edges : (n, 2) int
Undirected edges of a graph
mode : str
Traversal type, 'bfs' or 'dfs'
Returns
-----------
traversals : (m,) sequence of (p,) int
Ordered DFS or BFS traversals of the graph.
"""
edges = np.array(edges, dtype=np.int64)
if len(edges) == 0:
return []
elif not util.is_shape(edges, (-1, 2)):
raise ValueError('edges are not (n, 2)!')
# pick the traversal method
mode = str(mode).lower().strip()
if mode == 'bfs':
func = csgraph.breadth_first_order
elif mode == 'dfs':
func = csgraph.depth_first_order
else:
raise ValueError('traversal mode must be either dfs or bfs')
# make sure edges are sorted so we can query
# an ordered pair later
edges.sort(axis=1)
# set of nodes to make sure we get every node
nodes = set(edges.reshape(-1))
# coo_matrix for csgraph routines
graph = edges_to_coo(edges)
# we're going to make a sequence of traversals
traversals = []
while len(nodes) > 0:
# starting at any node
start = nodes.pop()
# get an (n,) ordered traversal
ordered = func(graph,
i_start=start,
return_predecessors=False,
directed=False).astype(np.int64)
traversals.append(ordered)
# remove the nodes we've consumed
nodes.difference_update(ordered)
return traversals
def edges_to_coo(edges, count=None, data=None):
"""
Given an edge list, return a boolean scipy.sparse.coo_matrix
representing the edges in matrix form.
Parameters
------------
edges : (n, 2) int
Edges of a graph
count : int
The total number of nodes in the graph
if None: count = edges.max() + 1
data : (n,) any
Assign data to each edge, if None will
be bool True for each specified edge
Returns
------------
matrix: (count, count) scipy.sparse.coo_matrix
Sparse COO
"""
edges = np.asanyarray(edges, dtype=np.int64)
if not (len(edges) == 0 or
util.is_shape(edges, (-1, 2))):
raise ValueError('edges must be (n, 2)!')
# if count isn't specified just set it to largest
# value referenced in edges
if count is None:
count = edges.max() + 1
count = int(count)
# if no data is specified set every specified edge
# to True
if data is None:
data = np.ones(len(edges), dtype=np.bool)
matrix = coo_matrix((data, edges.T),
dtype=data.dtype,
shape=(count, count))
return matrix
def neighbors(edges, max_index=None, directed=False):
"""
Find the neighbors for each node in an edgelist graph.
TODO : re-write this with sparse matrix operations
Parameters
------------
edges : (n, 2) int
Connected nodes
directed : bool
If True, only connect edges in one direction
Returns
---------
neighbors : sequence
Vertex index corresponds to set of other vertex indices
"""
neighbors = collections.defaultdict(set)
if directed:
[neighbors[edge[0]].add(edge[1])
for edge in edges]
else:
[(neighbors[edge[0]].add(edge[1]),
neighbors[edge[1]].add(edge[0]))
for edge in edges]
if max_index is None:
max_index = edges.max() + 1
array = [list(neighbors[i]) for i in range(max_index)]
return array
def smoothed(mesh, angle=None, facet_minarea=15):
"""
Return a non- watertight version of the mesh which
will render nicely with smooth shading by
disconnecting faces at sharp angles to each other.
Parameters
-----------
mesh : trimesh.Trimesh
Source geometry
angle : float or None
Angle in radians face pairs with angles
smaller than this will appear smoothed
facet_minarea : float or None
Minimum area fraction to consider
IE for `facets_minarea=25` only facets larger
than `mesh.area / 25` will be considered.
Returns
---------
smooth : trimesh.Trimesh
Geometry with disconnected face patches
"""
if angle is None:
angle = np.radians(30)
# if the mesh has no adjacent faces return a copy
if len(mesh.face_adjacency) == 0:
return mesh.copy()
# face pairs below angle threshold
angle_ok = mesh.face_adjacency_angles <= angle
# subset of face adjacency
adjacency = mesh.face_adjacency[angle_ok]
# coplanar groups of faces
facets = []
nodes = None
# collect coplanar regions for smoothing
if facet_minarea is not None:
areas = mesh.area_faces
min_area = mesh.area / facet_minarea
try:
# we can survive not knowing facets
# exclude facets with few faces
facets = [f for f in mesh.facets
if areas[f].sum() > min_area]
if len(facets) > 0:
# mask for removing adjacency pairs where
# one of the faces is contained in a facet
mask = np.ones(len(mesh.faces),
dtype=np.bool)
mask[np.hstack(facets)] = False
# apply the mask to adjacency
adjacency = adjacency[
mask[adjacency].all(axis=1)]
# nodes are no longer every faces
nodes = np.unique(adjacency)
except BaseException:
log.warning('failed to calculate facets',
exc_info=True)
# run connected components on facet adjacency
components = connected_components(
adjacency,
min_len=1,
nodes=nodes).tolist()
# add back coplanar groups if any exist
if len(facets) > 0:
components.extend(facets)
if len(components) == 0:
# if no components for some reason
# just return a copy of the original mesh
return mesh.copy()
# add back any faces that were missed
unique = np.unique(np.hstack(components))
if len(unique) != len(mesh.faces):
# things like single loose faces
# or groups below facet_minlen
broke = np.setdiff1d(
np.arange(len(mesh.faces)), unique)
components.extend(broke.reshape((-1, 1)))
# get a submesh as a single appended Trimesh
smooth = mesh.submesh(components,
only_watertight=False,
append=True)
# store face indices from original mesh
smooth.metadata['original_components'] = components
# smoothed should have exactly the same number of faces
if len(smooth.faces) != len(mesh.faces):
log.warning('face count in smooth wrong!')
return smooth
def is_watertight(edges, edges_sorted=None):
"""
Parameters
-----------
edges : (n, 2) int
List of vertex indices
edges_sorted : (n, 2) int
Pass vertex indices sorted on axis 1 as a speedup
Returns
---------
watertight : boolean
Whether every edge is shared by an even
number of faces
winding : boolean
Whether every shared edge is reversed
"""
# passing edges_sorted is a speedup only
if edges_sorted is None:
edges_sorted = np.sort(edges, axis=1)
# group sorted edges
groups = grouping.group_rows(
edges_sorted, require_count=2)
watertight = bool((len(groups) * 2) == len(edges))
# are opposing edges reversed
opposing = edges[groups].reshape((-1, 4))[:, 1:3].T
# wrap the weird numpy bool
winding = bool(np.equal(*opposing).all())
return watertight, winding
def graph_to_svg(graph):
"""
Turn a networkx graph into an SVG string
using graphviz `dot`.
Parameters
----------
graph: networkx graph
Returns
---------
svg: string, pictoral layout in SVG format
"""
import tempfile
import subprocess
with tempfile.NamedTemporaryFile() as dot_file:
nx.drawing.nx_agraph.write_dot(graph, dot_file.name)
svg = subprocess.check_output(['dot', dot_file.name, '-Tsvg'])
return svg
def multigraph_paths(G, source, cutoff=None):
"""
For a networkx MultiDiGraph, find all paths from a source node
to leaf nodes. This function returns edge instance numbers
in addition to nodes, unlike networkx.all_simple_paths.
Parameters
---------------
G : networkx.MultiDiGraph
Graph to evaluate
source : hashable
Node to start traversal at
cutoff : int
Number of nodes to visit
If None will visit all nodes
Returns
----------
traversals : (n,) list of [(node, edge instance index), ] paths
Traversals of the multigraph
"""
if cutoff is None:
cutoff = (len(G.edges()) * len(G.nodes())) + 1
# the path starts at the node specified
current = [(source, 0)]
# traversals we need to go back and do
queue = []
# completed paths
traversals = []
for i in range(cutoff):
# paths are stored as (node, instance) so
# get the node of the last place visited
current_node = current[-1][0]
# get all the children of the current node
child = G[current_node]
if len(child) == 0:
# we have no children, so we are at the end of this path
# save the path as a completed traversal
traversals.append(current)
# if there is nothing on the queue, we are done
if len(queue) == 0:
break
# otherwise continue traversing with the next path
# on the queue
current = queue.pop()
else:
# oh no, we have multiple edges from current -> child
start = True
# iterate through child nodes and edge instances
for node in child.keys():
for instance in child[node].keys():
if start:
# if this is the first edge, keep it on the
# current traversal and save the others for later
current.append((node, instance))
start = False
else:
# this child has multiple instances
# so we will need to traverse them multiple times
# we appended a node to current, so only take the
# first n-1 visits
queue.append(current[:-1] + [(node, instance)])
return traversals
def multigraph_collect(G, traversal, attrib=None):
"""
Given a MultiDiGraph traversal, collect attributes along it.
Parameters
-------------
G: networkx.MultiDiGraph
traversal: (n) list of (node, instance) tuples
attrib: dict key, name to collect. If None, will return all
Returns
-------------
collected: (len(traversal) - 1) list of attributes
"""
collected = []
for u, v in util.pairwise(traversal):
attribs = G[u[0]][v[0]][v[1]]
if attrib is None:
collected.append(attribs)
else:
collected.append(attribs[attrib])
return collected
| dajusc/trimesh | trimesh/graph.py | Python | mit | 30,522 |
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: datalake_samples_access_control_recursive_async.py
DESCRIPTION:
This sample demonstrates recursive set/get access control on directories.
USAGE:
python datalake_samples_access_control_recursive_async.py
Set the environment variables with your own values before running the sample:
1) STORAGE_ACCOUNT_NAME - the storage account name
2) STORAGE_ACCOUNT_KEY - the storage account key
"""
import os
import random
import uuid
import asyncio
from azure.core.exceptions import AzureError
from azure.storage.filedatalake.aio import (
DataLakeServiceClient,
)
# TODO: rerun after test account is fixed
async def recursive_access_control_sample(filesystem_client):
# create a parent directory
dir_name = "testdir"
print("Creating a directory named '{}'.".format(dir_name))
directory_client = await filesystem_client.create_directory(dir_name)
# populate the directory with some child files
await create_child_files(directory_client, 35)
# get and display the permissions of the parent directory
acl_props = await directory_client.get_access_control()
print("Permissions of directory '{}' are {}.".format(dir_name, acl_props['permissions']))
# set the permissions of the entire directory tree recursively
# update/remove acl operations are performed the same way
acl = 'user::rwx,group::r-x,other::rwx'
failed_entries = []
# the progress callback is invoked each time a batch is completed
async def progress_callback(acl_changes):
print(("In this batch: {} directories and {} files were processed successfully, {} failures were counted. " +
"In total, {} directories and {} files were processed successfully, {} failures were counted.")
.format(acl_changes.batch_counters.directories_successful, acl_changes.batch_counters.files_successful,
acl_changes.batch_counters.failure_count, acl_changes.aggregate_counters.directories_successful,
acl_changes.aggregate_counters.files_successful, acl_changes.aggregate_counters.failure_count))
# keep track of failed entries if there are any
failed_entries.append(acl_changes.batch_failures)
# illustrate the operation by using a small batch_size
try:
acl_change_result = await directory_client.set_access_control_recursive(acl=acl,
progress_hook=progress_callback,
batch_size=5)
except AzureError as error:
# if the error has continuation_token, you can restart the operation using that continuation_token
if error.continuation_token:
acl_change_result = \
await directory_client.set_access_control_recursive(acl=acl,
continuation_token=error.continuation_token,
progress_hook=progress_callback,
batch_size=5)
print("Summary: {} directories and {} files were updated successfully, {} failures were counted."
.format(acl_change_result.counters.directories_successful, acl_change_result.counters.files_successful,
acl_change_result.counters.failure_count))
# if an error was encountered, a continuation token would be returned if the operation can be resumed
if acl_change_result.continuation is not None:
print("The operation can be resumed by passing the continuation token {} again into the access control method."
.format(acl_change_result.continuation))
# get and display the permissions of the parent directory again
acl_props = await directory_client.get_access_control()
print("New permissions of directory '{}' and its children are {}.".format(dir_name, acl_props['permissions']))
async def create_child_files(directory_client, num_child_files):
import itertools
async def create_file():
# generate a random name
file_name = str(uuid.uuid4()).replace('-', '')
file_client = directory_client.get_file_client(file_name)
await file_client.create_file()
futures = [asyncio.ensure_future(create_file()) for _ in itertools.repeat(None, num_child_files)]
await asyncio.wait(futures)
print("Created {} files under the directory '{}'.".format(num_child_files, directory_client.path_name))
async def run():
account_name = os.getenv('STORAGE_ACCOUNT_NAME', "")
account_key = os.getenv('STORAGE_ACCOUNT_KEY', "")
# set up the service client with the credentials from the environment variables
service_client = DataLakeServiceClient(account_url="{}://{}.dfs.core.windows.net".format(
"https",
account_name
), credential=account_key)
async with service_client:
# generate a random name for testing purpose
fs_name = "testfs{}".format(random.randint(1, 1000))
print("Generating a test filesystem named '{}'.".format(fs_name))
# create the filesystem
filesystem_client = await service_client.create_file_system(file_system=fs_name)
# invoke the sample code
try:
await recursive_access_control_sample(filesystem_client)
finally:
# clean up the demo filesystem
await filesystem_client.delete_file_system()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(run())
| Azure/azure-sdk-for-python | sdk/storage/azure-storage-file-datalake/samples/datalake_samples_access_control_recursive_async.py | Python | mit | 5,962 |
#------------- Daniel Han-Chen 2017
#------------- https://github.com/danielhanchen/sciblox
#------------- SciBlox v0.02
#-------------
maxcats = 15
import warnings
warnings.filterwarnings("ignore")
true = True; TRUE = True
false = False; FALSE = False
import pip
def install(package): pip.main(['install', package])
#-----------------------------
try:
import pandas as pd, numpy as np, scipy, sklearn as sk, seaborn as sb
from copy import copy
from jupyterthemes import jtplot
import matplotlib.pyplot as plt
jtplot.style()
except:
try:
print("Installing packages... Please wait...")
if __name__ == '__main__':
install('pandas'); install('numpy'); install('scipy'); install('scikit-learn');
install('matplotlib'); install('seaborn'); install('lightgbm');
try: install('jupyterthemes');
except: pass;
try: install('sympy');
except: pass;
try:
install('libpython'); install('theano'); install('fancyimpute');
except: pass;
except: pass;
import pandas as pd, numpy as np, scipy, sklearn as sk, seaborn as sb
from copy import copy
import matplotlib.pyplot as plt
try:
from jupyterthemes import jtplot;
jtplot.style()
except: pass;
#-----------------------------
np.set_printoptions(suppress = True)
pd.set_option('display.max_rows', 10)
pd_colour = '#302f2f'
#-------------
#-------------
#-------------
#-------------
#------------------------------------ DATAFRAME METHODS ------------------------------------#
#-------------------- Display options and pandas methods --------------------#
def maxrows(x = 10): pd.set_option('display.max_rows', x)
def maxcat(x = 15): maxcats = x
def tabcolour(x = '#302f2f'): pd_colour = x
#-----------------------------
def percent(x):
if x <= 1: return x
else: return x/100
#-----------------------------
def table(x):
try: return pd.DataFrame(x)
except: return pd.DataFrame(list(x.items()))
def series(x):
try: return pd.Series(x)
except:
first = pd.Series(x[0])
if len(first)!=len(x): return pd.Series(T(x)[0])
else: return first
#-----------------------------
def istable(x): return (type(x) in [pd.DataFrame,pd.Series])*1
def isarray(x): return (type(x) in [np.array,np.ndarray,np.matrix])*1
#-----------------------------
def shape(x):
try: return x.shape
except: return len(x)
#-----------------------------
def head(x, n = 5):
if istable(x)==1: return x.head(n)
else:
if len(x) > n: return x[:n]
else: return x
def tail(x, n = 5):
if istable(x)==1: return x.tail(n)
else:
if len(x) > n: return x[-n:]
else: return x
#-----------------------------
def sample(x, n = 5, ordered = False):
if n > len(x): g = len(x)
else: g = n
if istable(x)==1:
if ordered == False: return x.sample(g)
else: return x.iloc[[int(y*(len(x)/g)) for y in range(g)]]
else:
if ordered == False: return np.random.choice(x, g)
else: return np.array(x)[[int(y*(len(x)/g)) for y in range(g)]]
#-----------------------------
def columns(x):
try: return x.columns.tolist()
except: pass;
def index(x):
try: return x.index.tolist()
except: pass;
#-----------------------------
def reset(x, index = True, column = False, string = False, drop = False):
if index == True and column == False:
if drop == False: return x.reset_index()
else: return x.reset_index()[columns(x)]
else:
y = copy(x)
if type(x)==pd.Series: ss = 0
else: ss = shape(x)[1]
if string == True: y.columns = ["col"+str(y) for y in range(ss)]
else: y.columns = [y for y in range(ss)]
return y
#-----------------------------
def hcat(*args):
a = args[0]
if type(a)==pd.Series: a = table(a)
for b in args[1:]:
if type(a)==list:
if type(b)!=list: b = list(b)
a = a + b
elif isarray(a)==1:
if isarray(b)==0: b = array(b)
a = np.hstack((a,b))
else:
if type(b)!=pd.DataFrame: b = table(b)
a = pd.concat([a,b],1)
del b
return a
def vcat(*args):
a = args[0]
if type(a)==pd.Series: a = table(a)
elif type(a)==list: a = array(a)
for b in args[1:]:
if isarray(a)==1:
if isarray(b)==0: b = array(b)
a = np.vstack((a,b))
else:
if type(b)!=pd.DataFrame: b = table(b)
a = pd.concat([a,b],0)
del b
return a
#-----------------------------
def dtypes(x):
if type(x)==pd.Series:
types = x.dtype
if types==('O' or "string" or "unicode"): return 'obj'
elif types==("int64" or "uint8" or "uint16" or "uint32" or "uint64" or "int8" or "int32" or "int16"): return 'int'
elif types==('float64' or 'float16' or 'float32' or 'float128'): return 'float'
elif types=='bool': return 'bool'
else: return 'date'
else:
dfs = x.dtypes
for f in (dfs.index.tolist()):
dfs[f] = str(dfs[f])
if "int" in dfs[f]: dfs[f] = 'int'
elif "float" in dfs[f]: dfs[f] = "float"
elif "bool" in dfs[f]: dfs[f] = "bool"
elif "O" in dfs[f] or "obj" in dfs[f]: dfs[f] = "obj"
elif "date" in dfs[f]: dfs[f] = "date"
else: dfs[f] = "obj"
return dfs
def dtype(x): return dtypes(x)
def contcol(x):
try: return ((dtypes(x)=="int")|(dtypes(x)=="float")).index[(dtypes(x)=="int")|(dtypes(x)=="float")].tolist()
except: return np.nan
def conts(x):
if type(x) == pd.Series:
if dtype(x) in ["int","float"]: return x
else: return np.nan
else: return x[contcol(x)]
def objcol(x):
try: return (dtypes(x)=="obj").index[dtypes(x)=="obj"].tolist()
except: return np.nan
def objects(x):
if type(x) == pd.Series:
if dtype(x) == "obj": return x
else: return np.nan
else: return x[objcol(x)]
def objs(x): return objects(x)
def notobj(x): return exc(x, objcol(x))
def catcol(x):
if type(x) == pd.Series:
if iscat(x) == True: return x
else: return np.nan
else: return (iscat(x).index[iscat(x)]).tolist()
def classcol(x): return cats(x)
def cats(x): return x[catcol(x)]
def classes(x): return x[catcol(x)]
def iscat(x, cat = maxcats):
return ((dtypes(x)!='float')|(dtypes(x)!='int'))&(nunique(x)<=cat)
#-----------------------------
def nullcol(x): return (count(x)!=len(x)).index[count(x)!=len(x)].tolist()
def nacol(x): return nullcol(x)
def missingcol(x): return nullcol(x)
def notnull(x, row = 1, keep = None, col = 0):
if row!=1: axis = 1
elif col!=0: axis = 0
else: axis = 0
if keep is None:
try: return x.dropna(axis = axis)
except: return x.dropna()
else:
if keep < 1:
if axis==1: keep = len(x)*keep
else: keep = shape(x)[1]*keep
return x.dropna(axis = axis, thresh = keep)
def isnull(x, row = 1, keep = None, col = 0):
if row!=1 or col!=0: axis = 0
else: axis = 1
if keep is None: miss = missing(x, row = axis)!=0
else:
if axis == 1:
if keep < 1: miss = missing(x, row = axis)<=shape(x)[1]*keep
else: miss = missing(x, row = axis)<=keep
else:
if keep < 1: miss = missing(x, row = axis)<=len(x)*keep
else: miss = missing(x, row = axis)<=keep
try: return x.iloc[miss.index[miss]]
except: return x[pd.isnull(x)==True]
def dropna(x, col = None):
if col is None: return x.dropna()
else:
if type(col)!=list: col = list(col)
return x.dropna(subset = col)
#-----------------------------
def diff(want, rem):
w = copy(want)
for j in w:
if j in rem: w.remove(j)
for j in rem:
if j in w: w.remove(j)
return w
def exc(x, l):
if type(l) == str: l = [l]
return x[diff(columns(x),l)]
def drop(x, l): return exc(x, l), x[l]
def pop(x, l): return exc(x, l), x[l]
def append(l, r):
g = copy(l);
if type(g)!= list: g = [g]
if type(r) == list:
for a in r: g.append(a)
else: g.append(r)
return g
#-------------
#-------------
#-------------
#-------------
#------------------------------------ OTHER ANALYTICAL METHODS ------------------------------------#
#-------------------- Uniques and counting and sorting --------------------#
def count(x):
try: return x.count()
except: return len(x)
def missing(x, row = 0, col = 1):
if row!=0 or col!=1: x = x.T
try: return (pd.isnull(x)).sum()
except: return (np.isnan(x)).sum()
#-----------------------------
def unique(x, dropna = False):
if dropna == True: x = notnull(x)
if type(x) == pd.Series: return list(x.unique())
elif type(x) == pd.DataFrame: return {col:list(x[col].unique()) for col in columns(x)}
else:
u = []
for a in x:
if dropna == True:
if a not in u and a!=np.nan: u.append(a)
else:
if a not in u: u.append(a)
del a
return u
def nunique(x, dropna = False):
if istable(x)==True: return x.nunique()
else:
u = []; n = 0
for a in x:
if dropna == True:
if a not in u and a!=np.nan: u.append(a); n += 1
else:
if a not in u: u.append(a); n += 1
del u,a
return n
def cunique(x, dropna = False):
if type(x) == pd.Series: return x.value_counts(dropna = dropna)
elif type(x) == pd.DataFrame: return {col:x[col].value_counts() for col in columns(x)}
else:
u = {}
for a in x:
if dropna == True:
if a not in u and a!=np.nan: u[a]=1
else: u[a]+=1
else:
if a not in u: u[a]=1
else: u[a]+=1
del a
return u
def punique(x, dropna = False):
return round(nunique(x, dropna = dropna)/(count(x)+missing(x)*(dropna==False)*1)*100,4)
#-----------------------------
def reverse(x):
if type(x) == pd.Series and dtype(x) == 'bool': return x == False
elif istable(x)==1: return x.iloc[::-1]
elif type(x) == list: return x[::-1]
elif type(x) == dict: return {i[1]:i[0] for i in x.items()}
#-----------------------------
def sort(x, by = None, asc = True, ascending = True, des = False, descending = False):
if type(x) == list:
if asc == ascending == True and des == descending == False: return sorted(x)
else: return reverse(sorted(x))
else:
if type(x) == pd.Series:
if asc == ascending == True and des == descending == False: return x.sort_values(ascending = True)
else: return x.sort_values(ascending = False)
else:
if by is None: col = columns(x)
else: col = by
if asc == ascending == True and des == descending == False: return x.sort_values(ascending = True, by = col)
else: return x.sort_values(ascending = False, by = col)
def fsort(x, by = None, keep = False, asc = True, ascending = True, des = False, descending = False):
if type(x)==pd.Series: x = table(x); x = reset(x, column = True, string = True); by = columns(x)[0];
if type(by)==list: by = by[0]
if type(x) == list:
from collections import Counter
c = copy(x)
if asc == ascending == True and des == descending == False: c.sort(key=Counter(sort(c, asc = True)).get, reverse = True); return c
else: c.sort(key=Counter(sort(c, asc = False)).get, reverse = False); return c
elif by is None: print("Please specify column to sort by: fsort(x, by = 'Name')")
else:
f = by; fg = reset(table(x[f].value_counts()))
ff = f+"_Freq"; fg.columns = [f,ff]
del ff
try: fg[f+"_Length"] = fg[f].str.len()
except: fg[f+"_Length"] = fg[f]
df = x.merge(fg, how = "outer")
if asc == ascending == True and des == descending == False: df = sort(df, [f+"_Freq",f+"_Length"], asc = True)
else: df = sort(df, [f+"_Freq",f+"_Length"], asc = False)
if keep == True: return df
else: l = columns(df); l.remove(f+"_Freq"); l.remove(f+"_Length")
return df[l]
#-------------
#-------------
#-------------
#-------------
#------------------------------------ BASIC ANALYSIS METHODS ------------------------------------#
#-------------------- Ratios and detections --------------------#
def freqratio(x):
counted = cunique(x)
if type(x) == pd.Series:
try: return counted[0]/counted[1]
except: return 1
else:
empty = []
for col in columns(x):
try: empty.append(counted[col].iloc[0]/counted[col].iloc[1])
except: empty.append(1)
tab = table(empty); tab.index = columns(x); return tab[0]
def isid(x):
for col in columns(x):
if (nunique(x[col]) == len(x)) or "id" in col.lower() or "index" in col.lower(): return col
else: return ''
def pzero(x): return sum(x==0, axis = 0)/count(x)*100
#-------------
#-------------
#-------------
#-------------
#------------------------------------ MATHEMATICAL METHODS ------------------------------------#
#-------------------- Statistical methods --------------------#
def var(x, axis = 0, dof = 1):
try: return x.var(axis = axis, ddof = dof)
except: return np.nanvar(x, axis = axis, ddof = dof)
def std(x, axis = 0, dof = 1):
try: return x.std(axis = axis, ddof = dof)
except: return np.nanstd(x, axis = axis, ddof = dof)
#-------------
def mean(x, axis = 0):
try: return x.mean(axis = axis)
except: return np.nanmean(x, axis = axis)
def median(x, axis = 0):
try: return x.median(axis = axis)
except: return np.nanmedian(x, axis = axis)
def mode(x, axis = 0):
try: return series(x).mode()[0]
except: return x.mode(axis = axis).iloc[0]
def rng(x, axis = 0):
try: return conts(x).max(axis = axis) - conts(x).min(axis = axis)
except:
try: return max(x)-min(x)
except: return np.nan
#-------------
def percentile(x, p, axis = 0):
if p > 1: p = p/100
try: return x.quantile(p, axis = axis)
except: return np.nanpercentile(x, p, axis = axis)
def iqr(x, axis = 0):
return percentile(x, 0.75, axis = axis) - percentile(x, 0.25, axis = axis)
#-------------
def skewness(x, axis = 0):
try: return x.skew(axis = axis)
except: return scipy.stats.skew(x, axis = axis, nan_policy='omit')
def skew(x, axis = 0): return skewness(x, axis)
def kurtosis(x, axis = 0):
try: return scipy.stats.kurtosis(x, axis = axis, nan_policy='omit')
except: return x.kurt(axis = axis)
def kurt(x, axis = 0): return kurtosis(x, axis)
#-------------
def pnorm(p, mean = 0, var = 1):
if p > 1: p = p/100
return scipy.stats.norm.cdf(p, loc=mean, scale=var)
def qnorm(q, mean = 0, var = 1):
if q > 1: q = q/100
return scipy.stats.norm.ppf(q, loc=mean, scale=var)
def CI(q, data, method = "mean",U = True, L = True):
if q > 1: q = q/100
norms = qnorm(q+(1-q)/2)*(std(data) / sqrt(len(data)) )
if method == "mean": u = mean(data) + norms; l = mean(data) - norms
if U == L == True: return (l,u)
elif U == True: return u
else: return l
#-------------
#-------------
#-------------
#-------------
#------------------------------------ TYPES METHODS ------------------------------------#
#-------------------- Changing case --------------------#
def lower(x):
j = copy(x)
if type(x) == list:
for k in range(len(j)):
try: j[k] = j[k].lower()
except: pass;
return j
def upper(x):
j = copy(x)
if type(x) == list:
for k in range(len(j)):
try: j[k] = j[k].upper()
except: pass;
return j
#-------------------- Other types and conversions --------------------#
def int(df, *args):
if type(df) == pd.DataFrame:
x, col = argcheck(df, args)
for y in col:
try: x[y] = x[y].astype("int64")
except:
try: x[y] = np.floor(x[y])
except: pass
return x
else:
try: return np.int64(df)
except:
try: return np.floor(df)
except: return df
def float(df, *args):
if type(df) == pd.DataFrame:
x, col = argcheck(df, args)
for y in col:
try: x[y] = x[y].astype("float64")
except: pass
return x
else:
try: return np.float64(df)
except: return df
#-------------
def max(x, axis = 0):
if istable(x)==1: return conts(x).max()
else:
if shape(matrix(x))[0] == 1: return np.amax(x,axis=axis)
else: return np.amax(x)
def min(x, axis = 0):
if istable(x)==1: return conts(x).min()
else:
if shape(matrix(x))[0] == 1: return np.amin(x)
else: return np.amin(x,axis=axis)
#-------------
def argcheck(df, args):
if len(args)==0: col = columns(df)
elif type(args[0])!=list: col = list(args)
else: col = args[0]
return copy(df), col
#-------------
def abs(df, *args):
if type(df) == pd.DataFrame:
x, col = argcheck(df, args)
for y in col:
try: x[y] = np.abs(x[y])
except: pass
return x
else:
try: return np.abs(df)
except: return df
#-------------
def log(df, *args, shift = 0):
if type(df) == pd.DataFrame:
x, col = argcheck(df, args)
for y in col:
try: x[y] = np.log(x[y]+shift)
except: pass;
return x
else:
try: return np.log(df+shift)
except: return df
#-------------
def exp(df, *args, shift = 0):
if type(df) == pd.DataFrame:
x, col = argcheck(df, args)
for y in col:
try: x[y] = np.exp(x[y])+shift
except: pass;
return x
else:
try: return np.exp(df)+shift
except: return df
#-------------
def sin(df, *args):
if type(df) == pd.DataFrame:
x, col = argcheck(df, args)
for y in col:
try: x[y] = np.sin(x[y])
except: pass;
return x
else:
try: return np.sin(df)
except: return df
#-------------
def cos(df, *args):
if type(df) == pd.DataFrame:
x, col = argcheck(df, args)
for y in col:
try: x[y] = np.cos(x[y])
except: pass;
return x
else:
try: return np.cos(df)
except: return df
#-------------
def cos(df, *args):
if type(df) == pd.DataFrame:
x, col = argcheck(df, args)
for y in col:
try: x[y] = np.cos(x[y])
except: pass;
return x
else:
try: return np.cos(df)
except: return df
#-------------
def sqrt(df, *args):
if type(df) == pd.DataFrame:
x, col = argcheck(df, args)
for y in col:
try: x[y] = np.sqrt(x[y])
except: pass;
return x
else:
try: return np.sqrt(df)
except: return df
#-------------
def floor(df, *args):
if type(df) == pd.DataFrame:
x, col = argcheck(df, args)
for y in col:
try: x[y] = np.floor(x[y])
except: pass;
return x
else:
try: return np.floor(df)
except: return df
#-------------
def ceiling(df, *args):
if type(df) == pd.DataFrame:
x, col = argcheck(df, args)
for y in col:
try: x[y] = np.ceil(x[y])
except: pass;
return x
else:
try: return np.ceil(df)
except: return df
def ceil(df, *args): return ceiling(df, args)
#-------------
def sum(x, axis = 1):
try: return x.sum(axis = axis)
except: return np.nansum(x, axis = 0)
#-------------
#-------------
#-------------
#-------------
#------------------------------------ MATHEMATICAL METHODS ------------------------------------#
#-------------------- Linear Algebra --------------------#
from numpy import dot, multiply, multiply as mult
def array(*args):
if len(args)==1:
arrs = np.array(args[0])
try:
if shape(arrs)[1]==1: arrs = arrs.T[0]
except: pass;
return arrs
else:
try: return np.array(args)
except: return np.array([args])
def matrix(*args): return np.matrix(array(args))
def T(x):
if type(x)==np.array: return matrix(x).T
else:
try: return x.T
except: return array(x).T
def inv(x):
try: return np.linalg.inv(x)
except: print("Either det(x)=0 or not square matrix")
def det(x):
try: return np.linalg.det(x)
except: print("Not square matrix")
#-------------
def eye(x): return np.eye(x)
def I(x): return np.eye(x)
#-------------
def ones(x, s = 1):
if s == 1: return np.ones((x,x))
else: return np.ones(x)
def J(x, s = 1): return ones(x, s)
#-------------
def zeros(x, s = 1):
if s == 1: return np.zeros((x,x))
else: return np.zeros(x)
def zeroes(x, s = 1): return zeros(x, s)
def Z(x, s = 1): return zeros(x, s)
#-------------
def triu(matrix): return np.triu(matrix)
def tril(matrix): return np.tril(matrix)
#-------------
def trace(A): return np.trace(A)
def tr(A): return trace(A)
def diag(A): return np.diagonal(A)
#-------------
def repmat(A, *args):
if len(args) == 2: return np.tile(A, (args[0],args[1]))
elif len(args) == 1: return np.tile(A, args[0])
else: print("Error")
def tile(A, *args): return repmat(A, args)
#-------------
#-------------
#-------------
#-------------
#------------------------------------ TABLE METHODS ------------------------------------#
#-------------------- Opening and editing --------------------#
def read(x):
if type(x) == list:
for y in x:
if "csv" in y: return clean(pd.read_csv(y))
else:
if "csv" in x: return clean(pd.read_csv(x))
#-------------
def string(dfx, *args):
x = copy(dfx); df = copy(dfx)
if type(df) == pd.DataFrame:
x, col = argcheck(df, args)
for y in col:
x[y] = x[y].astype("str")+"*"
return x
elif type(df) == pd.Series:
df = df.astype("str")+"*"
return df
else: return str(df)
#-------------
def clean(x, *args):
def cleancol(x):
if dtypes(x) == 'obj':
c = x.str.replace(",","").str.replace(" ","").str.replace("-","").str.replace("%","").str.replace("#","")
else: c = x
try:
if ((sum(int(c)) - sum(float(c)) == 0) or sum(int(c)-float(c))==0) and count(c) == len(c): return int(c)
else: return float(c)
except:
return x
x = x.replace(np.inf, np.nan).replace(-np.inf, np.nan).replace("NaN",np.nan)
df = copy(x)
if type(x) == pd.Series: return cleancol(x)
else:
if len(args)==0: col = columns(df)
elif type(args[0]) != list: col = list(args)
else: col = args[0]
for y in col: df[y] = cleancol(df[y])
return df
#-------------
#-------------
#-------------
#-------------
#------------------------------------ DATA ANALYTICS ------------------------------------#
#-------------------- Analyse --------------------#
def analyse(c, y = None, extra = ["skew"], dec = 2, colour = True, limit = True, graph = True):
x = copy(c)
if y is not None:
if type(y) == str: x, y = drop(x, y)
first = describe(x, extra = extra, clean = False); cols = columns(first)
df = hcat(guess_importance(x,y), first)
df.columns = append("Importance", cols)
df = round(sort(df, by = ["Importance","FreqRatio","%Unique"], des = True),dec)
if limit == True: df = df[df["Importance"]>0]
if graph == True: plot(x = index(df)[0], y = index(df)[1], z = index(df)[2], hue = y, data = c)
if colour == True: df = df.style.bar(align='mid', color=pd_colour, width = 80).set_properties(**{'max-width': '90px'})
return df
def describe(x, extra = ["skew"], clean = True):
normal = hcat(mean(x), median(x), rng(x), freqratio(x), mode(x), punique(x))
normal.columns = ["Mean","Median","Range", "FreqRatio", "Mode","%Unique"]
if type(extra)!=list: extra = [extra];extra = lower(extra);
for j in extra:
before = columns(normal)
if "skew" in j: normal = hcat(normal, skew(x)); normal.columns = append(before, "Skewness")
elif "cat" in j: normal = hcat(normal, iscat(x)); normal.columns = append(before, "IsCategorical")
elif "iqr" in j: normal = hcat(normal, iqr(x)); normal.columns = append(before, "InterQuartileRng")
elif "var" in j: normal = hcat(normal, var(x)); normal.columns = append(before, "Variance")
elif "std" in j or "sd" in j: normal = hcat(normal, std(x)); normal.columns = append(before, "SD")
elif "min" in j: normal = hcat(normal, np.min(x)); normal.columns = append(before, "Min")
elif "kurt" in j: normal = hcat(normal, kurtosis(x)); normal.columns = append(before, "Kurt")
elif "max" in j: normal = hcat(normal, np.max(x)); normal.columns = append(before, "Max")
elif "punq" in j: normal = hcat(normal, punique(x)); normal.columns = append(before, "%Unique")
elif "nunq" in j: normal = hcat(normal, nunique(x)); normal.columns = append(before, "No.Unique")
df = sort(normal, by = "FreqRatio")
if clean == True: return df.replace(np.nan,"")
else: return df
#-------------------- Var-Check and FreqRatio Check --------------------#
def varcheck(x, freq = "mean", unq = 0.1, colour = True, limit = True, output = False):
freqs = freqratio(x); unqs = punique(x)
if freq == "mean": fd = (freqs>=CI(q=0.99,data =freqs,L=False))*1
else: fd = (freqs>freq)*1
df = hcat(freqs,fd,unqs,(unqs<=unq)*1,var(x))
df.columns = ["FreqRatio","BadFreq?","%Unique","BadUnq?","Var"]
df["BadVar?"] = (df["Var"].fillna(1000)<=0.1)*1
df["BAD?"] = (df["BadFreq?"]+df["BadUnq?"]+df["BadVar?"])>0
df = round(sort(df, by =["BAD?","BadVar?","BadFreq?","BadUnq?","FreqRatio","%Unique","Var"], des = True),2)
if limit == True: df = T(T(df)[((df["BAD?"]==True).index[df["BAD?"]==True]).tolist()])
if colour == True:
df = df.style.bar(align='zero', color=pd_colour, width = 80, subset=["FreqRatio","%Unique","Var"])
df = df.apply(highlight_one, subset = ["BadFreq?","BadUnq?","BadVar?"]).apply(highlight_true, subset=["BAD?"])
df = df.set_properties(**{'max-width': '90px'})
if output == True: return exc(x, index(df))
else: return df
#-------------------- Correlations --------------------#
def corr(x, table = False, limit = 20):
if table == False:
corrs = round(x.corr()*100)
sortby = sort(sum(abs(corrs)-100),des=False)
corrs = corrs[index(sortby)]
corrs = T(T(corrs)[index(sortby)])
if shape(corrs)[0]>limit: corrs = T(T(corrs.iloc[0:limit]).iloc[0:limit])
corrs = T(reverse(T(reverse(corrs))))
cmap = sb.light_palette("black", as_cmap=True)
show = abs(corrs).style.background_gradient(cmap).set_properties(**{'max-width': '50px', 'font-size': '8pt'
,'color':'black'})
return show
else:
try: return conts(x).corr()
except: print("Error. No continuous data")
def correlation(x, table = False): return corr(x, table)
def correlation_matrix(x, table = False): return corr(x, table)
def cor(x, table = False): return corr(x, table)
#-------------------- Feature Importance --------------------#
def guess_importance(df, y):
x = copy(df)
if type(y) == str:
try: y = x[y]
except:
print("No column for y")
x = dummies(x)
x_train, x_test, y_train, y_test = holdout(x, y, info = False);
def lightmodel(x_train, x_test, y_train, y_test, reg, seed = 1234):
try: import lightgbm as lgb
except: print("Cannot install"); raise
x_train = array(x_train); y_train = array(y_train); x_test = array(x_test); y_test = array(y_test)
if reg == True:
model = lgb.LGBMRegressor(objective='regression', num_leaves = 5, learning_rate = 0.1, n_estimators = 100, seed = seed)
model.fit(x_train, y_train, early_stopping_rounds = 10, eval_metric='l2', eval_set=[(x_test, y_test)],verbose=False)
return model
imps = lightmodel(x_train, x_test, y_train, y_test, reg = True).feature_importances_
tab = table(imps); tab.index = columns(x)
imps = dict(tab)[0]*100; cols = columns(df)
imp = {k:0 for k in cols}
for j in imps.keys():
for c in cols:
if c in j: imp[c] += imps[j]
return series(imp)
def guess_imp(df, y): return guess_importance(df, y)
#-------------------- Data Reduction --------------------#
## https://stackoverflow.com/questions/29294983/how-to-calculate-correlation-between-all-columns-and-remove-highly-correlated-on
def remcor(x, limit = 0.9):
dataset = copy(x)
col_corr = set(); corr_matrix = dataset.corr()
for i in range(len(corr_matrix.columns)):
for j in range(i):
if corr_matrix.iloc[i, j] >= limit:
colname = corr_matrix.columns[i]
col_corr.add(colname)
if colname in dataset.columns: del dataset[colname]
return dataset
def remcorr(x,limit=0.9): return remcor(x,limit)
#-------------
#https://stackoverflow.com/questions/28816627/how-to-find-linearly-independent-rows-from-a-matrix
def independent(A):
try: import sympy
except:
print("Cannot install"); raise
_, inds = sympy.Matrix(A).T.rref()
print("Lin Indp rows are: "+str(inds))
return A[list(inds)]
#-------------
#-------------
#-------------
#-------------
#------------------------------------ DUMMIFICATION ------------------------------------#
#-------------------- Dummies method --------------------#
def dummies(x, dummies = True, codes = False, freq = True, na = "inplace", nanew = True, col = None, ascending = True, cat = True, drop = True,
ids = False):
try:
if dtypes(x)[0]==('int' or 'float') and type(x)==pd.Series: return x
except:
if dtypes(x)==('int' or 'float') and type(x)==pd.Series: return x
if type(x)!=pd.DataFrame: x = table(x)
df = copy(x)
if ids == False: df = exc(df, isid(df))
if col is None:
if cat == True: col = catcol(df)
else: col = objcol(df)
elif type(col)!=list: col = [col]
if dummies == True:
if "in" in na:
for j in col:
dummified = pd.get_dummies(x[j], dummy_na = nanew)
dummified.columns = [str(j)+"_"+str(c) for c in columns(dummified)]
if j in nacol(x): dummified.iloc[isnull(x[j]).index]=np.nan
df = hcat(df, dummified)
else: df = pd.get_dummies(x, dummy_na = nanew, columns = col)
if drop == True: return notobj(zerodrop(df))
else: return zerodrop(df)
else:
if freq == True:
code = {}
for j in col:
part = {};
try: i = min(df[j]);
except: i = 0;
if dtype(df[j])!=('int'or'float'): d = fsort(df, by = j)[j]
else: d = sort(df, by = j)[j]
for k in d:
if pd.isnull(k)==False:
try: part[k]
except: part[k] = i; i+=1
code[j] = part
df[j]=df[j].replace(part)
del part,i,d,k
else:
code = {}
for j in col:
code[j] = reverse(dict(enumerate(df[j].astype("category").cat.categories)))
df[j]=df[j].replace(code[j])
if drop == True: df = notobj(df)
if shape(df)[1]==1: df = df[columns(df)[0]]
if codes == True: return df,code
else: return df
#-------------------- Quantile conversion --------------------#
def discretise(x, n = 4, smooth = True, codes = False):
if codes == False: codes = None
else: codes = False
if smooth == True:
try: return pd.qcut(x, q = n, duplicates = 'drop', labels = codes)
except: return pd.cut(x, q = n, labels = codes)
else:
return pd.cut(x, bins = n, labels = codes)
def qcut(x, n = 4, smooth = True, codes = False): return discretise(x, n, smooth, codes)
#-------------
#-------------
#-------------
#-------------
#------------------------------------ ADVANCED DATA ANALYTICS ------------------------------------#
#-------------------- Skewness Analysis --------------------#
def topositive(y, info = False):
x = copy(y); d = conts(x)
notgood = ((np.min(d)<=0).index[np.min(d)<=0]).tolist()
add = np.abs(np.min(d[notgood]))+1
d[notgood] = d[notgood]+add
x[columns(d)] = d
if info == False: return x
else: return x,add
#-------------
def boxcox(x):
if type(x) == pd.Series:
k = (conts(x)+abs(min(conts(x)))+1)
lm = scipy.stats.boxcox(k)[1]
if lm == 0: return log(x), lm
else: return ((k**lm)-1)/lm, lm
else:
df = []; lms = []
for col in contcol(x):
k = (x[col]+abs(min(x[col]))+1)
lm = scipy.stats.boxcox(k)[1]
if lm == 0: df.append(log(x[col])); lms.append(lm)
else: df.append(((k**lm)-1)/lm); lms.append(lm)
return T(table(df)), array(lms)
#-------------
def unskew(x, info = False):
def show(q, df):
if q == 0: return (df, "normal")
elif q == 1: return (sqrt(df), "sqrt")
else: return (boxcox(df)[0], "boxcox")
original = copy(x)
df = topositive(conts(x))
skews = np.abs(skew(df))
sqrted = sqrt(df)
boxcoxed = boxcox(df)[0]
comp = hcat(skew(df),skew(sqrted),skew(boxcoxed)); comp.columns = ["norm","sqrt","box"]
res = np.abs(comp.T)
r = []; out = []
for col in res:
p = 0
for i in res[col]:
if i == np.min(res[col]):
f = show(p, df[col]); r.append(f[1]); out.append(f[0]); break
else: p += 1
first = out[0]
for c in out[1:]: first = hcat(first, c)
del c, out, res, comp, sqrted, skews, boxcoxed, show
original[columns(first)] = first
res = table(r); res.index = columns(first)
if info == True: return original, res[0]
else: return original
#-------------
def outlier(df, method = "forest", poutlier = 0.025, sd = 3.5, iqr = 1.5, indicate = True, n_estimators = 100):
x = copy(df)
if "for" in method or "tree" in method:
from sklearn.ensemble import IsolationForest
df = dummies(x, na = "clear"); df = df.fillna(df[nullcol].median())
model = IsolationForest(n_estimators = n_estimators, n_jobs=-1, bootstrap = True, contamination = poutlier)
model.fit(df); preds = model.predict(df)
res = x.iloc[np.where(preds==-1)[0]]
else:
f = dummies(x, na = "clear"); df = topositive(f.fillna(f.median()))
if "std" in method or "sd" in method:
#https://stackoverflow.com/questions/22354094/pythonic-way-of-detecting-outliers-in-one-dimensional-observation-data
if len(shape(df)) == 1: df = df[:,None]
df = unskew(df)
meds = median(df, axis=0)
diff = sum((df - meds)**2, axis=1)
diff = sqrt(diff); mad = median(diff)
z = 0.6745 * diff / mad
out = (z>sd)==True
where = out.index[out].tolist()
res = x.iloc[where]
elif "iqr" in method:
first = percentile(df, p = 0.25)
last = percentile(df, p = 0.75)
iqrred = first-last
where = sum((df>(last+iqr*last))|(df<(first-iqr*first)))!=0
res = x.iloc[where.index[where].tolist()]
print("No. outliers = "+str(len(res)))
if indicate == True:
x["IsOutlier"] = 0
try: x["IsOutlier"].iloc[[res.index.tolist()]] = 1
except: pass;
return x
else: return res
def isoutlier(df, method = "forest", poutlier = 0.025, sd = 3.5, iqr = 1.5, indicate = False, n_estimators = 100):
d = outlier(df, method = method, poutlier = poutlier, sd = sd, iqr = iqr, indicate = True, n_estimators = n_estimators)
if indicate == False: return exc(d.iloc[(d["IsOutlier"]==1).index[d["IsOutlier"]==1]], "IsOutlier")
else: return d.iloc[(d["IsOutlier"]==1).index[d["IsOutlier"]==1]]
def notoutlier(df, method = "forest", poutlier = 0.025, sd = 3.5, iqr = 1.5, indicate = False, n_estimators = 100):
d = outlier(df, method = method, poutlier = poutlier, sd = sd, iqr = iqr, indicate = True, n_estimators = n_estimators)
if indicate == False: return exc(d.iloc[(d["IsOutlier"]==0).index[d["IsOutlier"]==0]], "IsOutlier")
else: return d.iloc[(d["IsOutlier"]==0).index[d["IsOutlier"]==0]]
#-------------
def zerodrop(x): return exc(x, (pzero(x)==100).index[pzero(x)==100].tolist())
#-------------
#-------------
#-------------
#-------------
#------------------------------------ DATA CLEANING AND CONVERSION ------------------------------------#
#-------------------- Normal statistic filling --------------------#
def fillobj(x, method):
data = copy(clean(x))
missed = nacol(data[objcol(data)]); missdf = data[missed]
if method in ["mode","freq","frequency"]: data[missed] = data[missed].fillna(mode(missdf))
elif method in ["zero","missing","none"]: data[missed] = data[missed].fillna("Missing_Data")
elif method in ["mix","half","halved"]:
ins = (count(x)<0.75*len(x)).index[count(x)<0.75*len(x)]
data[ins] = data[ins].fillna("Missing_Data")
other = diff(columns(x), ins)
data[other] = data[other].fillna(mode(x[other]))
return data
#-------------
def fillcont(x, method):
data = copy(clean(x))
missed = nacol(conts(data)); missdf = data[missed]
if method in ["mean","avg","average"]: data[missed] = data[missed].fillna(mean(missdf))
elif method in ["median"]: data[missed] = data[missed].fillna(median(missdf))
elif method in ["mode","freq","frequency"]: data[missed] = data[missed].fillna(mode(missdf))
return data
#-------------------- Full methods --------------------#
def complete(df, method = None, objects = None, continuous = None, knn = 5, max_unique = 20, epoch = 100, mice = "forest", ids = False):
x = copy(df); imputation = ["bpca","pca","knn","mice","svd"]; imped = 0
if ids == False: x = exc(x, isid(x))
if method is not None: meth = method.lower()
else: meth = "a"
if method is None and objects is None and continuous is None: meth = 'knn'
if meth in imputation or objects in imputation or continuous in imputation:
imped = 1
try: import fancyimpute
except:
print("Cannot import"); raise
def matching(method, objects, continuous, thingo):
if method is not None:
if thingo in method: return 1
else: return 0
else:
if thingo in objects or thingo in continuous: return 1
else: return 0
res,codes = dummies(x, codes = True, dummies = False)
intcols = (dtypes(res)=='int').index[dtypes(res)=='int'].tolist()
if matching(meth, objects, continuous, "knn") == 1: dfilled = fancyimpute.KNN(k=knn, verbose = 0).complete(res)
elif matching(meth, objects, continuous, "svd") == 1: dfilled = fancyimpute.SoftImpute(verbose = 0).complete(res)
elif matching(meth, objects, continuous, "mice") == 1:
print("Please wait...")
dfilled = mice_complete(res, epochs = int(epoch/10), impute_method = mice, strings = objcol(x))
print("Done")
else:
print("Please wait...")
dfilled = bpca_complete(res, epochs = epoch)
print("Done")
dfilled = table(dfilled); dfilled.columns = columns(res)
for col in codes: x[col] = squeeze(series(int(round(dfilled[col],0))), upper = len(codes[col])-1, lower = 0).replace(reverse(codes[col]))
for col in contcol(x): x[col] = dfilled[col]
for col in contcol(x): x[col] = squeeze(x[col], lower = np.min(df[col]), upper = np.max(df[col]))
if (missingcol(x) != [] and objects in imputation) or meth in imputation: x = fillobj(x, "mix")
elif objects is not None: x[objcol(x)] = fillobj(df[objcol(df)], objects)
if continuous not in imputation and continuous is not None: x[contcol(x)] = fillcont(df[contcol(df)], continuous)
x = round(x, 4)
x[intcols] = int(round(x[intcols]))
return x
#-------------------- BPCA --------------------#
#http://ishiilab.jp/member/oba/tools/BPCAFill.html
def bpca_complete(x, epochs = 100):
decimals = 4
y = copy(x); cols = y.columns.tolist()
maximum = np.int(np.max(y.max())*999)
means = round(y.mean(),decimals); sd = round(y.std(),decimals); y = round((y-means)/sd,decimals)
y[missingcol(y)] = y[missingcol(y)].fillna(maximum)
mat = float(np.matrix(y))
N,d = mat.shape; q = d-1
yest = np.copy(mat); yest[yest==maximum]=0
missidx = {}; bad = np.where(mat==maximum)
for a in bad[0]: missidx[a] = []
for a in range(len(bad[0])): missidx[bad[0][a]].append(bad[1][a])
nomissidx = {}; good = np.where(mat!=maximum)
for a in good[0]: nomissidx[a] = []
for a in range(len(good[0])): nomissidx[good[0][a]].append(good[1][a])
gmiss = list(set(bad[0]))
gnomiss = list(set(good[0]))
covy = np.cov(yest.T)
U, S, V = np.linalg.svd(np.matrix(covy))
U = (U.T[0:q]).T; S = S[0:q]*np.eye(q); V = (V.T[0:q]).T
mu = np.copy(mat); mu[mu==maximum]=np.nan; mu = np.nanmean(mu, 0)
W = U*np.sqrt(S); tau = 1/ (np.trace(covy)-np.trace(S)); taumax = 1e20; taumin = 1e-20; tau = np.amax([np.amin([tau,taumax]),taumin])
galpha0 = 1e-10; balpha0 = 1; alpha = (2*galpha0 + d)/(tau*np.diag(W.T*W)+2*galpha0/balpha0)
gmu0 = 0.001; btau0 = 1; gtau0 = 1e-10; SigW = eye(q)
tauold = 1000
for epoch in range(epochs):
Rx = np.eye(q)+tau*W.T*W+SigW; Rxinv = np.linalg.inv(Rx)
idx = gnomiss; n = len(idx)
dy = mat[idx,:] - np.tile(mu,(n,1)); x = tau * Rxinv * W.T * dy.T
Td = dy.T*x.T; trS = np.sum(np.multiply(dy,dy))
for n in range(len(gmiss)):
i = gmiss[n]
dyo = np.copy(mat)[i,nomissidx[i]] - mu[nomissidx[i]]
Wm = W[missidx[i],:]; Wo = W[nomissidx[i],:]
Rxinv = np.linalg.inv( Rx - tau*Wm.T*Wm ); ex = tau * Wo.T * np.matrix(dyo).T; x = Rxinv * ex
dym = Wm * x; dy = np.copy(mat)[i,:]
dy[nomissidx[i]] = dyo; dy[missidx[i]] = dym.T
yest[i,:] = dy + mu
Td = Td + np.matrix(dy).T*x.T; Td[missidx[i],:] = Td[missidx[i],:] + Wm * Rxinv
trS = trS + dy*np.matrix(dy).T + len(missidx[i])/tau + np.trace( Wm * Rxinv * Wm.T )
Td = Td/N; trS = trS/N; Rxinv = np.linalg.inv(Rx);
Dw = Rxinv + tau*Td.T*W*Rxinv + np.diag(alpha)/N; Dwinv = np.linalg.inv(Dw);
W = Td * Dwinv;
tau = (d+2*gtau0/N)/(trS-np.trace(Td.T*W) + (mu*np.matrix(mu).T*gmu0+2*gtau0/btau0)/N)[0,0];
SigW = Dwinv*(d/N);
alpha = (2*galpha0 + d)/ (tau*np.diag(W.T*W)+np.diag(SigW)+2*galpha0/balpha0).T
if np.abs(np.log10(tau)-np.log10(tauold)) < 1e-4: break;
tauold = tau
out = table(yest)
out.columns = cols
out = (out*sd)+means
return out
#-------------------- MICE --------------------#
#https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3074241/
def mice_complete(res, strings, epochs = 10, impute_method = "forest"):
x = copy(clean(res)); original = copy(x)
filled = fillcont(original, method = "median")
from sklearn.cross_validation import train_test_split
for epoch in range(epochs):
for missing_col in missingcol(original):
null_data = isnull(original[missing_col]).index
not_null = filled.iloc[notnull(original[missing_col]).index]
y = not_null.pop(missing_col)
if "forest" in impute_method or "tree" in impute_method or "bag" in impute_method:
from sklearn.ensemble import RandomForestRegressor as rfr
from sklearn.ensemble import RandomForestClassifier as rfc
if missing_col in strings: model = rfc(n_jobs = -1, n_estimators=epochs*4)
else: model = rfr(n_jobs = -1, n_estimators=epochs*4)
elif "linear" in impute_method or "log" in impute_method:
from sklearn.linear_model import LinearRegression as linreg
from sklearn.linear_model import LogisticRegression as logreg
if missing_col in strings: model = logreg(n_jobs = -1, solver = 'sag', multi_class = "multinomial")
else: model = linreg(n_jobs = -1)
elif "boost" in impute_method:
from lightgbm import LGBMRegressor as xgbr
from lightgbm import LGBMClassifier as xgbc
if missing_col in strings: model = xgbc(learning_rate = 10/epochs, n_estimators=epochs*4, nthread =-1)
else: model = xgbr(learning_rate = 10/epochs, n_estimators=epochs*4, nthread=-1)
train_x, test_x, train_y, test_y = train_test_split(not_null, y, test_size=0.33, random_state=42)
model.fit(train_x, train_y)
filled[missing_col].iloc[null_data] = model.predict(exc(filled.iloc[null_data], missing_col))
return filled
#-------------------- Squeeze or round functions --------------------#
def squeeze(df, lower = 0, upper = 1):
x = copy(df)
x[x<lower] = lower; x[x>upper] = upper
return x
#-------------
#-------------
#-------------
#-------------
#------------------------------------ MACHINE LEARNING ------------------------------------#
#-------------------- Boosting --------------------#
def lightgbm(x_train, x_test, y_train, y_test, noclass = None, lr = 0.05, method = "dart", gpu = False, trees = 100, metric = None,
depth = -1, splits=2, leaves=31.123, min_weight=20.123, features=1, bins=5.123, impurity=1e-3+0.000001, jobs=-1, state=None, bagging = 0.1,
stop = 10, l1 = 0, l2 = 1, dropout = 0.1, skipdrop = 0.5, verbose = False, info = True):
if noclass is None:
try: noclass = nunique(array(hcat(y_train,y_test)))
except: noclass = nunique(array(vcat(y_train,y_test)))
if gpu == True: gpu = "gpu"
else: gpu = "cpu"
if min_weight <1: min_weight = int(min_weight*(len(vcat(x_train,y_train))))
if bagging != False: bagged = 1;
else: bagged = 0;
if verbose == True: verbose = 1;
else: verbose = 0;
leaves = int(leaves); min_weight = int(min_weight); bins = int(bins)
try: import lightgbm as lgb
except:
print("Cannot import"); raise
x_train = array(x_train); y_train = array(y_train); x_test = array(x_test); y_test = array(y_test)
train_data = lgb.Dataset(x_train,label=y_train)
mets = metrics(noclass,"lightgbm")
param = {'num_leaves':leaves, 'application':mets[0],'max_depth':depth,'learning_rate':lr,'num_iterations':trees, 'device':gpu,
'max_depth':depth, 'metric':mets[1],'min_sum_hessian_in_leaf':impurity,'feature_fraction':features,
'min_data_in_bin':bins,'bagging_fraction':bagging,'bagging_freq':bagged,'early_stopping_round':stop,'lambda_l1':l1,
'lambda_l2':l2,'verbose':verbose,'nthread':jobs}
if method == "dart": param['drop_rate'] = dropout; param['skip_drop'] = skipdrop
elif mets[1] == 'multiclass': param['num_class'] = noclass
print("--------------------------------\nLightGBM: Training...")
modeller=lgb.train(param,train_data,trees)
print("Finished")
if info == True:
if mets[0] == ('binary' or 'multiclass'): preds = toclasses(modeller.predict(x_test), unique(hcat(y_train,y_test)))
else: preds = modeller.predict(x_test)
for k in list(mets[2].keys()):
if k != 'rmse': print("Score = "+str(k)+" = "+str(mets[2][k](y_test, preds)))
else: print("Score = "+str(k)+" = "+str(mets[2][k](y_test, preds)**0.5))
return modeller
#-------------------- RF --------------------#
def randomforest(x_train, x_test, y_train, y_test, noclass = None, lr = 0.05, method = "dart", gpu = False, trees = 100, metric = None,
depth = -1, splits=2, leaves=31.123, min_weight=20, features=1, bins=5.123, impurity=1e-3+0.000001, jobs=-1, state=None, bagging = 0.1,
stop = 10, l1 = 0, l2 = 1, dropout = 0.1, skipdrop = 0.5, verbose = False, info = True, addon = False):
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
if noclass is None:
try: noclass = nunique(array(hcat(y_train,y_test)))
except: noclass = nunique(array(vcat(y_train,y_test)))
if depth == -1: depth = None;
if method not in ["gini","entropy"]: method = "gini";
if features == 1: features = "auto";
if impurity == (1e-3+0.000001): impurity = 1e-07;
if leaves == 31.123: leaves = None;
if min_weight == 20.123: min_weight = 0;
if bins == 5.123: bins = 1;
leaves = int(leaves); bins = int(bins)
x_train = array(x_train); y_train = array(y_train); x_test = array(x_test); y_test = array(y_test)
mets = metrics(noclass,"randomforest")
if mets[0] != 'regression':
modeller = RandomForestClassifier(n_estimators=trees, criterion=method, max_depth=depth, min_samples_split=splits, min_samples_leaf=bins,
min_weight_fraction_leaf=0.0, max_features=features, max_leaf_nodes=leaves, min_impurity_split=impurity,
bootstrap=True, oob_score=info, n_jobs=jobs, random_state=state, verbose=verbose, warm_start=addon)
else:
modeller = RandomForestRegressor(n_estimators=trees, criterion="mse", max_depth=depth, min_samples_split=splits, min_samples_leaf=bins,
min_weight_fraction_leaf=0.0, max_features=features, max_leaf_nodes=leaves, min_impurity_split=impurity,
bootstrap=True, oob_score=info, n_jobs=jobs, random_state=state, verbose=verbose, warm_start=addon)
print("--------------------------------\nRandomForest: Training...")
modeller.fit(x_train,y_train)
print("Finished")
if info == True:
preds = modeller.predict(x_test)
for k in list(mets[1].keys()):
if k != 'rmse': print("Score = "+str(k)+" = "+str(mets[1][k](y_test, preds)))
else: print("Score = "+str(k)+" = "+str(mets[1][k](y_test, preds)**0.5))
print("Score = "+"OOB"+" = "+str(modeller.oob_score_))
return modeller
#-------------
#-------------
#-------------
#-------------
#------------------------------------ SCALING AND NORMALISING ------------------------------------#
#-------------------- Standardise --------------------#
def standardise(data, output = True, method = "robust"):
if method == "robust": from sklearn.preprocessing import RobustScaler as scaler
elif method == "standard": from sklearn.preprocessing import StandardScaler as scaler
elif "min" in method or "max" in method: from sklearn.preprocessing import MinMaxScaler as scaler
elif "abs" in method: from sklearn.preprocessing import MaxAbsScaler as scaler
if type(data)==pd.DataFrame: cols = columns(data)
scaler = scaler()
res = scaler.fit(data)
res = scaler.transform(data)
if type(data)==pd.DataFrame:
res = table(res)
res.columns = cols
if output == True: return res, scaler
else: return res
#-------------------- Normalise --------------------#
def normalise(data, output = True, method = "l2"):
from sklearn.preprocessing import Normalizer
if type(data)==pd.DataFrame: cols = columns(data)
scaler = Normalizer(norm=method).fit(data)
res = scaler.transform(data)
if type(data)==pd.DataFrame:
res = table(res)
res.columns = cols
if output == True: return res, scaler
else: return res
#-------------
#-------------
#-------------
#-------------
#------------------------------------ PREPROCESS FUNCTION ------------------------------------#
#-------------------- :) --------------------#
def preprocess(train, target, hold = 0.2, dummy = True, impute = "bpca", mice = "boost",remove_outlier = 0, scale = "robust", transform = 0,
norm = False, output = True):
processor = {'dummies':-1, 'impute':-1, 'scale':-1, 'transform':-1, 'norm':-1, 'columns':-1}
if remove_outlier == 1: train = notoutlier(train)
if type(target)==str: x = exc(train, target); y = train[target]
if nunique(y)<=15: processor['target'] = unique(y)
else: processor['target'] = -1
x = complete(x, method = impute, mice = mice)
if transform == (1 or True): x, unskewer = unskew(x, info = True)
if dummy == False: x, codes = dummies(x, dummies = dummy, codes = True, ids = True)
else: x = dummies(x, dummies = dummy, ids = True); codes = -2
x = conts(x)
if scale is not None and scale != False:
if scale == True: x, scaler = standardise(x, method = "robust")
else: x, scaler = standardise(x, method = scale)
if norm is not None and norm != False:
if norm == True: x, normer = normalise(x, method = "l2")
else: x, normer = normalise(x, method = norm)
if hold != (0 or False) and hold is not None: x_train, x_test, y_train, y_test = holdout(x, y = y)
print("Processing finished :)")
if output == True:
try: processor['dummies'] = codes
except: pass;
try: processor['impute'] = [impute,train,mice]
except: pass;
try: processor['scale'] = scaler
except: pass;
try: processor['norm'] = normer
except: pass;
try: processor['transform'] = unskewer
except: pass;
processor['columns'] = columns(x_train)
return x_train, x_test, y_train, y_test, processor
else: return x_train, x_test, y_train, y_test
#-------------------- :) Transform the test data --------------------#
def prefit(test, processor):
alldf = reset(vcat(processor['impute'][1],test), drop = True)
df = complete(alldf, method = processor['impute'][0], ids = True, mice = processor['impute'][2])
test = df[len(processor['impute'][1]):]
if processor['dummies'] == -2: test = dummies(test, dummies = True, ids = True)
a = set(processor['columns'])
b = set(columns(test))
matching = set.intersection(a,b)
not_matching = a.symmetric_difference(matching)
test = test[list(matching)]
if processor['dummies'] == -2:
try:
tabs = int(table(np.zeros((len(test),len(not_matching)))))
tabs.columns = list(not_matching)
test[columns(tabs)] = tabs
except: pass;
test = test[processor['columns']]
else:
for key in list(processor['dummies'].keys()):
try: test[key] = test[key].replace(processor['dummies'][key])
except: pass;
test = conts(test)
if processor['scale']!=-1: test = processor['scale'].transform(test)
if processor['norm']!=-1: test = processor['norm'].transform(test)
df = table(test)
df.columns = processor['columns']
return df
#-------------
#-------------
#-------------
#-------------
#------------------------------------ METRICS AND HOLDOUT ------------------------------------#
def holdout(x, y, test = 0.2, seed = 1234, info = True):
from sklearn.model_selection import train_test_split
if info == True: print("--------------------------------\nx_train, x_test, y_train, y_test")
return train_test_split(x, y, test_size = test, random_state = seed)
#--------------------
def metrics(noclass, model = "lightgbm"):
from sklearn.metrics import mean_squared_error, cohen_kappa_score, r2_score
if model == "lightgbm":
if noclass == 2: return ['binary', ['binary_logloss','auc'], {'kappa':cohen_kappa_score,'rmse':mean_squared_error}]
elif noclass < 15: return ['multiclass', ['multi_logloss','multi_error'], {'kappa':cohen_kappa_score,'rmse':mean_squared_error}]
else: return ['regression_l2', ['l2_root'], {'r2':r2_score,'rmse':mean_squared_error}]
elif model == "randomforest":
if noclass == 2: return ['binary', {'kappa':cohen_kappa_score,'rmse':mean_squared_error}]
elif noclass < 15: return ['multiclass', {'kappa':cohen_kappa_score,'rmse':mean_squared_error}]
else: return ['regression', {'r2':r2_score,'rmse':mean_squared_error}]
#--------------------
def toclasses(preds, classes):
preds = np.round(preds)
preds = int(squeeze(preds, lower = min(classes), upper = max(classes)))
return preds
#--------------------
def predict(test, model, processor):
preds = model.predict(array(test))
if processor['target'] != -1: return toclasses(preds, classes = processor['target'])
else: return preds
#-------------
#-------------
#-------------
#-------------
#------------------------------------ GRAPHING ------------------------------------#
def plot(x = None, y = None, z = None, hue = None, size = 8, data = None, color = 'afmhot', smooth = True, n = 4):
dfdf = copy(data)
import matplotlib.pyplot as plt
if data is None and x is not None: print("Need to specify data"); return
try:
if type(x)==str: xlabel = x; x = dfdf[xlabel]; x = dummies(x, dummies = False)
except: pass;
try:
if type(y)==str: ylabel = y; y = dfdf[ylabel]; y = dummies(y, dummies = False)
except: pass;
try:
if type(z)==str: zlabel = z; z = dfdf[zlabel]; z = dummies(z, dummies = False)
except: pass;
try:
if type(hue)==str: huelabel = hue; hue = dfdf[huelabel]; hue = dummies(hue, dummies = False)
except: pass;
try:
xlabel = columns(x)[0];
if xlabel is None: xlabel = "X"
except: pass;
try:
ylabel = columns(y)[0];
if ylabel is None: ylabel = "Y"
except: pass;
try:
zlabel = columns(z)[0];
if zlabel is None: zlabel = "Z"
except: pass;
try:
huelabel = columns(hue)[0];
if huelabel is None: huelabel = "Hue"
except: pass;
if x is not None and y is not None and z is not None:
from mpl_toolkits.mplot3d import Axes3D
import matplotlib
fig = plt.figure(figsize=(size,size))
ax = Axes3D(fig)
if hue is not None:
cm = plt.get_cmap(color)
try: cNorm = matplotlib.colors.Normalize(vmin=np.min(hue)[0], vmax=np.max(hue)[0])
except: cNorm = matplotlib.colors.Normalize(vmin=np.min(hue), vmax=np.max(hue))
scalarMap = matplotlib.cm.ScalarMappable(norm=cNorm, cmap=cm)
ax.scatter(array(x),array(y),array(z),c=scalarMap.to_rgba(array(hue)),s=size*5)
ax.set_xlabel(xlabel); ax.set_ylabel(ylabel); ax.set_zlabel(zlabel)
scalarMap.set_array(hue)
fig.colorbar(scalarMap, pad=0, orientation = "h", shrink = .8)
plt.show()
else:
import matplotlib
ax.scatter(x,y,z,s=size*5)
ax.set_xlabel(xlabel); ax.set_ylabel(ylabel); ax.set_zlabel(zlabel)
plt.show()
else:
import seaborn as sb
try:
if check_type(dfdf[xlabel]) == 'cat': dfdf[xlabel] = string(dfdf[xlabel])
except: pass;
try:
if check_type(dfdf[ylabel]) == 'cat': dfdf[ylabel] = string(dfdf[ylabel])
except: pass;
try:
if check_type(dfdf[huelabel]) == 'cat': dfdf[huelabel] = string(dfdf[huelabel])
except: pass;
if y is None and hue is None:
fig = plt.figure(figsize=(size,size))
if check_type(dfdf[xlabel]) == 'cont':
fig = sb.kdeplot(data = dfdf[xlabel], linewidth = 3,clip = [min(dfdf[xlabel]),max(dfdf[xlabel])])
mean_line(dfdf[xlabel])
plt.ylabel("Frequency"); plt.xlabel(xlabel); plt.title("Kernel Density graph"); plt.show()
elif check_type(dfdf[xlabel]) == 'cat':
fig = sb.countplot(dfdf[xlabel].fillna("Missing"))
plt.title("Count graph for "+xlabel); plt.show()
elif y is None:
if check_type(dfdf[xlabel]) == 'cont': sort_by = xlabel
else: sort_by = huelabel
if dtypes(dfdf[huelabel])[0] != 'obj':
df = sort(dfdf, by = sort_by)
dfdf[sort_by+"_Q"] = qcut(dfdf[sort_by], smooth = smooth, n = n)
dfdf[sort_by+"_Q"] = string(dfdf[sort_by+"_Q"])
fig = plt.figure(figsize=(size,size))
if check_type(dfdf[xlabel]) == 'cont':
if check_type(dfdf[huelabel]) == "cont":
fig = sb.violinplot(x=xlabel+"_Q", y=huelabel, bw='scott' ,scale="width",
cut=min(dfdf[huelabel]), inner = None, linewidth =4, data = dfdf)
plt.setp(fig.get_xticklabels(), rotation=45); plt.title("Violin graph for "+xlabel+" & "+huelabel)
plt.show()
elif check_type(dfdf[huelabel]) == 'cat':
fig = sb.countplot(x = xlabel+"_Q", hue = huelabel, data = dfdf)
plt.title("Count graph for "+xlabel+" & "+huelabel); plt.setp(fig.get_xticklabels(), rotation=45)
plt.show()
elif check_type(dfdf[xlabel]) == 'cat':
if check_type(dfdf[huelabel]) == "cont":
fig = sb.countplot(x = xlabel, hue = huelabel+"_Q", data = dfdf)
plt.title("Count graph for "+xlabel+" & "+huelabel); plt.setp(fig.get_xticklabels(), rotation=45)
plt.show()
if check_type(dfdf[huelabel]) == "cat":
fig = sb.countplot(x = xlabel, hue = huelabel, data = dfdf)
plt.title("Count graph for "+xlabel+" & "+huelabel); plt.setp(fig.get_xticklabels(), rotation=45)
plt.show()
elif hue is None:
if check_type(dfdf[xlabel]) == 'cont':
if check_type(dfdf[ylabel]) == 'cont':
fig = plt.figure(figsize=(size,size))
dfdf = notnull(dfdf)
dfdf[xlabel+"_Q"] = qcut(dfdf[xlabel], n = 30, smooth = True)
dfdf = (dfdf.groupby(by = xlabel+"_Q").median()+dfdf.groupby(by = xlabel+"_Q").mean())/2
sb.regplot(x = xlabel, y = ylabel, data = dfdf, ci = None, truncate=True, order=2, color = 'black')
plt.title("Regression graph for "+xlabel+" & "+ylabel); plt.show()
elif check_type(dfdf[ylabel]) == 'cat':
fig, (ax1,ax2) = plt.subplots(1,2, sharey = True, figsize = (size*1.5,size))
sb.boxplot(x = xlabel, y = ylabel, data = dfdf, palette="Set3", linewidth = 3, whis = 1, ax = ax1)
sb.pointplot(x = xlabel, y = ylabel, data = dfdf, lw=5, ax = ax2, ci = 50, capsize = .1, palette = 'Set1')
plt.title("Mean PointPlot graph for "+xlabel+" & "+ylabel); plt.show()
elif check_type(dfdf[xlabel]) == 'cat':
if check_type(dfdf[ylabel]) == 'cont':
fig, (ax1,ax2) = plt.subplots(1,2, sharey = False, figsize = (size*1.5,size))
sb.boxplot(x = xlabel, y = ylabel, data = dfdf, palette="Set3", linewidth = 3, whis = 1, ax = ax1)
plt.setp(ax1.get_xticklabels(), rotation=45)
plt.setp(ax2.get_xticklabels(), rotation=45)
sb.pointplot(x = xlabel, y = ylabel, data = dfdf, lw=5, ax = ax2, ci = 50, capsize = .1, palette = 'Set1')
plt.title("Mean PointPlot graph for "+xlabel+" & "+ylabel); plt.show()
elif check_type(dfdf[ylabel]) == 'cat':
fig = sb.factorplot(x = xlabel, col = ylabel, data = dfdf, size = 5, palette="Set2", col_wrap = 4, kind = "count")
plt.show()
else:
if check_type(dfdf[huelabel]) == 'cont':
dfdf = notnull(sort(dfdf, by = huelabel))
dfdf[huelabel] = string(qcut(dfdf[huelabel], smooth = False, n = 4))
elif check_type(dfdf[huelabel]) == 'cat':
dfdf = notnull(dfdf)
if check_type(dfdf[xlabel]) == 'cat':
if check_type(dfdf[ylabel]) == 'cont':
try:
fig = plt.figure(figsize=(size,size))
fig = sb.barplot(x = xlabel, y = ylabel, hue = huelabel, data = dfdf)
plt.setp(fig.get_xticklabels(), rotation=45)
plt.show()
except:
fig = sb.factorplot(x = xlabel, y = ylabel, data = dfdf, col = huelabel, size = 5, capsize=.1, palette="Set2", ci = 70)
plt.show()
elif check_type(dfdf[ylabel]) == 'cat':
fig = sb.factorplot(x = xlabel, hue = ylabel, data = dfdf, col = huelabel, kind = "count", size = 5)
plt.show()
elif check_type(dfdf[xlabel]) == 'cont':
if check_type(dfdf[ylabel]) == 'cont':
fig = plt.figure(figsize=(size,size))
fig = sb.lmplot(x = xlabel, y = ylabel, hue = huelabel, data = dfdf,robust = True, n_boot = 50, scatter = False, ci = None)
plt.show()
elif check_type(dfdf[ylabel]) == 'cat':
fig = sb.factorplot(x = xlabel, y = ylabel, col = huelabel, data = dfdf, palette = "Set3", dodge=True, ci = 70,
estimator = special_statistic, capsize=.2, n_boot = 100, size = 5)
plt.show()
def highlight_larger(s):
is_max = s > CI(99,s,L=False); return ['background-color: '+pd_colour if v else '' for v in is_max]
def highlight_smaller(s):
is_min = s < CI(99,s,U=False); return ['background-color: '+pd_colour if v else '' for v in is_min]
def highlight_one(s):
is_true = s == 1; return ['background-color: '+pd_colour if v else '' for v in is_true]
def highlight_true(s):
is_true = s == True; return ['background-color: '+pd_colour if v else '' for v in is_true]
#-------------
def mean_line(x, **kwargs):
ls = {"0":"--"}
plt.axvline(mean(x), linestyle =ls[kwargs.get("label","0")],
color = kwargs.get("color", "brown"), linewidth=2)
txkw = dict(size=12, color = kwargs.get("color", "brown"))
plt.text(mean(x),0.03, "MEAN", **txkw)
#-------------
def special_statistic(x): return (2*np.nanmedian(x)+np.nanmean(x))/3
#-------------
def check_type(x):
ctd = nunique(x); parts = (((ctd<=15)&(len(x)>15))|((ctd<len(x)*0.01)&(ctd<=20)&(dtypes(x)=='int'))|((dtypes(x)=='str')&(ctd<=15)))
if dtypes(x) != 'str':
if parts == True: return 'cat'
else: return 'cont'
else:
if parts == False: return 'str'
else: return 'cat'
#-------------
#-------------
#-------------
#-------------
#------------------------------------ DATA MINING AND NLP ------------------------------------#
#-------------------- String extracting --------------------#
def getfunction(c, args, now):
if "split" in c:
if "ex" in c: expanding = True;
else: expanding = False
if "letter" in args[c].lower() or "word" in args[c].lower() or "digit" in args[c].lower() or "number" in args[c].lower():
how = ''
for j in args[c].split(","):
if "letter" in j: how = how+"([a-z])"
elif "Letter" in j: how = how+"([a-zA-Z])"
elif "LETTER" in j: how = how+"([A-Z])"
elif "word" in j: how = how+"([a-z]+)"
elif "Word" in j: how = how+"([a-zA-Z]+)"
elif "WORD" in j: how = how+"([A-Z]+)"
elif "digit" in j.lower(): how = how+"([0-9])"
elif "number" in j.lower(): how = how+"([0-9]+)"
elif "symbol" in j.lower(): how+'[^\w]+'
now = now.str.extract(how, expand = expanding)
else: now = now.str.split(args[c], expand = expanding)
elif "col" in c or "loc" in c:
try:
if "le" in args[c]: now = now.str[0:-1]
elif "ri" in args[c]: now = now.str[-1:1]
except:
if type(now) == pd.Series: now = now.str[args[c]]
else: now = now[args[c]]
elif "not" in c: now = now.str.contains(args[c]); now = reverse(now)
elif "has" in c: now = now.str.contains(args[c])
elif "rep" in c:
if "symbol" in args[c]: now = now.replace(r'[^\w]',args[c][1])
else: now = now.str.replace(args[c][0], args[c][1])
elif "rem" in c or "strip" in c:
if "all" in args[c]:
for j in [".",",","+","=","-","_","(",")","[","]","*","$","?","<",">",'"',"'","/","<",">","%"]:
now = now.str.replace(j,"")
elif "symbol" in args[c]: now = now.replace(r'[^\w]','')
else: now = now.str.replace(args[c][0], "")
elif "len" in c:
if args[c] == 1: now = now.str.len()
elif "low" in c:
if args[c] == 1: now = now.str.lower()
elif "up" in c:
if args[c] == 1: now = now.str.upper()
elif "count" in c:
if args[c] == ".": now = now.str.count(r"(\.)")
elif args[c] == "(": now = now.str.count(r"(\()")
elif args[c] == ")": now = now.str.count(r"(\))")
elif args[c] == "[": now = now.str.count(r"(\[)")
elif args[c] == "]": now = now.str.count(r"(\])")
elif args[c] == "{": now = now.str.count(r"(\{)")
elif args[c] == "}": now = now.str.count(r"(\})")
elif 'symbol' in args[c]: now = now.str.count(r'[^\w]')
elif 'sym' in args[c]: now = now.str.count(r'[\w]')
elif 'num' in args[c] or 'dig' in args[c]: now = now.str.count(r'[\d]')
else: now = now.str.count(args[c])
elif "df" in c or "table" in c or "series" in c: now = now.apply(pd.Series)
return now
def get(x, **args):
import re
now = copy(x)
for c in args:
now = getfunction(c, args, now)
return now
def extract(x, **args): return get(x, args)
#-------------------- Word Frequency --------------------#
def flatten(y, split = " ", dropna = True, symbols = False, lower = True):
def col_split(x,split,dropna,symbols,lower):
if split is not None:
if symbols == False:
if lower == True: f = list(get(x, lower = True, rem = "all", splitex = split).fillna(np.nan).values.flatten())
else: f = list(get(x, rem = "all", splitex = split).fillna(np.nan).values.flatten())
else: f = list(get(x, splitex = split).fillna(np.nan).values.flatten())
else: f = list(x.fillna(np.nan).values.flatten())
return f
if type(y)==pd.Series: flattened = col_split(y,split,dropna,symbols,lower)
else:
flattened = []
for col in strs(y):
flattened += col_split(y[col],split,dropna,symbols,lower)
if dropna == True: return list(array(flattened)[array(flattened)!='nan'])
else: return flattened
#-------------
def wordfreq(x, hist = True, first = 15, separate = True):
if separate == False or type(x) == pd.Series:
df = reset(table(cunique(flatten(x))))[0:first]
df.columns = ["Word","Count"]
else:
first = int(first/len(strs(x)))
df = reset(table(cunique(flatten(x[strs(x)[0]]))))[0:first]
df.columns = ["Word","Count"]
df["Column"] = objcol(x)[0]
for col in objcol(x)[1:]:
dfx = reset(table(cunique(flatten(x[col]))))[0:first]
dfx.columns = ["Word","Count"]
dfx["Column"] = col
df = vcat(df,dfx)
if hist == True:
if separate == True and type(x) != pd.Series:
k = first*1.25
if k < 10: k = 8
fig = plt.figure(figsize=(k,k))
fig = sb.barplot(x = "Word", y = "Count", hue = "Column", data = df)
plt.setp(fig.get_xticklabels(), rotation=45, size = 16)
else:
fig = plt.figure(figsize=(first*0.5,first*0.35))
fig = sb.barplot(x = "Word", y = "Count", data = df)
plt.setp(fig.get_xticklabels(), rotation=45, size = 16)
plt.show()
else:
return df
#-------------
def getwords(y, first = 10):
x = copy(y)
df = wordfreq(x, first = first, hist = False)
for col in objcol(x):
cols = get(x[col], lower = True, rem = "all", table = True)
for j in df[df["Column"]==col]["Word"]:
x["Count="+str(j)] = get(cols[0], count = j)
return x
#-------------
#------------- Daniel Han-Chen 2017
#------------- https://github.com/danielhanchen/sciblox
#------------- SciBlox v0.02
#------------- | danielhanchen/sciblox | sciblox (v1)/sciblox.py | Python | mit | 74,277 |
# -*- coding: utf-8 -*-
'''
Creates a Gene Wiki protein box template around a gene specified by
the first argument passed to it on the command line.
Usage: `python create_template.py <entrez_id>`
'''
import sys
from genewiki.mygeneinfo import parse
if len(sys.argv[1]) > 1:
entrez = sys.argv[1]
try: int(entrez)
except ValueError:
sys.stderr.write("Entrez ids must contain only digits.")
sys.exit(1)
sys.stdout.write(str(parse(entrez)))
| SuLab/genewiki | old-assets/scripts/create_template.py | Python | mit | 480 |
"""pytest_needle.driver
.. codeauthor:: John Lane <[email protected]>
"""
import base64
from errno import EEXIST
import math
import os
import re
import sys
import pytest
from needle.cases import import_from_string
from needle.engines.pil_engine import ImageDiff
from PIL import Image, ImageDraw, ImageColor
from selenium.webdriver.remote.webdriver import WebElement
from pytest_needle.exceptions import ImageMismatchException, MissingBaselineException, MissingEngineException
if sys.version_info >= (3, 0):
from io import BytesIO as IOClass
# Ignoring since basetring is not redefined if running on python3
basestring = str # pylint: disable=W0622,C0103
else:
try:
from cStringIO import StringIO as IOClass
except ImportError:
from StringIO import StringIO as IOClass
DEFAULT_BASELINE_DIR = os.path.realpath(os.path.join(os.getcwd(), 'screenshots', 'baseline'))
DEFAULT_OUTPUT_DIR = os.path.realpath(os.path.join(os.getcwd(), 'screenshots'))
DEFAULT_ENGINE = 'needle.engines.pil_engine.Engine'
DEFAULT_VIEWPORT_SIZE = '1024x768'
class NeedleDriver(object): # pylint: disable=R0205
"""NeedleDriver instance
"""
ENGINES = {
'pil': DEFAULT_ENGINE,
'imagemagick': 'needle.engines.imagemagick_engine.Engine',
'perceptualdiff': 'needle.engines.perceptualdiff_engine.Engine'
}
def __init__(self, driver, **kwargs):
self.options = kwargs
self.driver = driver
# Set viewport position, size
self.driver.set_window_position(0, 0)
self.set_viewport()
@staticmethod
def _create_dir(directory):
"""Recursively create a directory
.. note:: From needle
https://github.com/python-needle/needle/blob/master/needle/cases.py#L125
:param str directory: Directory path to create
:return:
"""
try:
os.makedirs(directory)
except OSError as err:
if err.errno == EEXIST and os.path.isdir(directory):
return
raise err
def _find_element(self, element_or_selector=None):
"""Returns an element
:param element_or_selector: WebElement or tuple containing selector ex. ('id', 'mainPage')
:return:
"""
if isinstance(element_or_selector, tuple): # pylint: disable=R1705
elements = self.driver.find_elements(*element_or_selector)
return elements[0] if elements else None
elif isinstance(element_or_selector, WebElement):
return element_or_selector
raise ValueError("element_or_selector must be a WebElement or tuple selector")
@staticmethod
def _get_element_dimensions(element):
"""Returns an element's position and size
:param WebElement element: Element to get dimensions for
:return:
"""
if isinstance(element, WebElement):
# Get dimensions of element
location = element.location
size = element.size
return {
'top': int(location['y']),
'left': int(location['x']),
'width': int(size['width']),
'height': int(size['height'])
}
raise ValueError("element must be a WebElement")
def _get_element_rect(self, element):
"""Returns the two points that define the rectangle
:param WebElement element: Element to get points for
:return:
"""
dimensions = self._get_element_dimensions(element)
if dimensions:
return (
dimensions['left'],
dimensions['top'],
(dimensions['left'] + dimensions['width']),
(dimensions['top'] + dimensions['height'])
)
return ()
@staticmethod
def _get_ratio(image_size, window_size):
return max((
math.ceil(image_size[0] / float(window_size[0])),
math.ceil(image_size[1] / float(window_size[1]))
))
def _get_window_size(self):
window_size = self.driver.get_window_size()
return window_size['width'], window_size['height']
@property
def baseline_dir(self):
"""Return baseline image path
:return:
:rtype: str
"""
return self.options.get('baseline_dir', DEFAULT_BASELINE_DIR)
@baseline_dir.setter
def baseline_dir(self, value):
"""Set baseline image directory
:param str value: File path
:return:
"""
assert isinstance(value, basestring)
self.options['baseline_dir'] = value
@property
def cleanup_on_success(self):
"""Returns True, if cleanup on success flag is set
:return:
:rtype: bool
"""
return self.options.get('cleanup_on_success', False)
@cleanup_on_success.setter
def cleanup_on_success(self, value):
"""Set cleanup on success flag
:param bool value: Cleanup on success flag
:return:
"""
self.options['cleanup_on_success'] = bool(value)
@property
def engine(self):
"""Return image processing engine
:return:
"""
return import_from_string(self.engine_class)()
@property
def engine_class(self):
"""Return image processing engine name
:return:
:rtype: str
"""
return self.ENGINES.get(self.options.get('needle_engine', 'pil').lower(), DEFAULT_ENGINE)
@engine_class.setter
def engine_class(self, value):
"""Set image processing engine name
:param str value: Image processing engine name (pil, imagemagick, perceptualdiff)
:return:
"""
assert value.lower() in self.ENGINES
self.options['needle_engine'] = value.lower()
def get_screenshot(self, element=None):
"""Returns screenshot image
:param WebElement element: Crop image to element (Optional)
:return:
"""
stream = IOClass(base64.b64decode(self.driver.get_screenshot_as_base64().encode('ascii')))
image = Image.open(stream).convert('RGB')
if isinstance(element, WebElement):
window_size = self._get_window_size()
image_size = image.size
# Get dimensions of element
dimensions = self._get_element_dimensions(element)
if not image_size == (dimensions['width'], dimensions['height']):
ratio = self._get_ratio(image_size, window_size)
return image.crop([point * ratio for point in self._get_element_rect(element)])
return image
def get_screenshot_as_image(self, element=None, exclude=None):
"""
:param WebElement element: Crop image to element (Optional)
:param list exclude: Elements to exclude
:return:
"""
image = self.get_screenshot(element)
# Mask elements in exclude if element is not included
if isinstance(exclude, (list, tuple)) and exclude and not element:
# Gather all elements to exclude
elements = [self._find_element(element) for element in exclude]
elements = [element for element in elements if element]
canvas = ImageDraw.Draw(image)
window_size = self._get_window_size()
image_size = image.size
ratio = self._get_ratio(image_size, window_size)
for ele in elements:
canvas.rectangle([point * ratio for point in self._get_element_rect(ele)],
fill=ImageColor.getrgb('black'))
del canvas
return image
def assert_screenshot(self, file_path, element_or_selector=None, threshold=0, exclude=None):
"""Fail if new fresh image is too dissimilar from the baseline image
.. note:: From needle
https://github.com/python-needle/needle/blob/master/needle/cases.py#L161
:param str file_path: File name for baseline image
:param element_or_selector: WebElement or tuple containing selector ex. ('id', 'mainPage')
:param threshold: Distance threshold
:param list exclude: Elements or element selectors for areas to exclude
:return:
"""
element = self._find_element(element_or_selector) if element_or_selector else None
# Get baseline screenshot
self._create_dir(self.baseline_dir)
baseline_image = os.path.join(self.baseline_dir, '%s.png' % file_path) \
if isinstance(file_path, basestring) else Image.open(file_path).convert('RGB')
# Take screenshot and exit if in baseline saving mode
if self.save_baseline:
self.get_screenshot_as_image(element, exclude=exclude).save(baseline_image)
return
# Get fresh screenshot
self._create_dir(self.output_dir)
fresh_image = self.get_screenshot_as_image(element, exclude=exclude)
fresh_image_file = os.path.join(self.output_dir, '%s.png' % file_path)
fresh_image.save(fresh_image_file)
# Error if there is not a baseline image to compare
if not self.save_baseline and not isinstance(file_path, basestring) and not os.path.exists(baseline_image):
raise IOError('The baseline screenshot %s does not exist. You might want to '
're-run this test in baseline-saving mode.' % baseline_image)
# Compare images
if isinstance(baseline_image, basestring):
try:
self.engine.assertSameFiles(fresh_image_file, baseline_image, threshold)
except AssertionError as err:
msg = getattr(err, 'message', err.args[0] if err.args else "")
args = err.args[1:] if len(err.args) > 1 else []
raise ImageMismatchException(msg, baseline_image, fresh_image_file, args)
except EnvironmentError:
msg = "Missing baseline '{}'. Please run again with --needle-save-baseline".format(baseline_image)
raise MissingBaselineException(msg)
except ValueError as err:
if self.options['needle_engine'] == 'imagemagick':
msg = "It appears {0} is not installed. Please verify {0} is installed or choose a different engine"
raise MissingEngineException(msg.format(self.options['needle_engine']))
raise err
finally:
if self.cleanup_on_success:
os.remove(fresh_image_file)
else:
diff = ImageDiff(fresh_image, baseline_image)
distance = abs(diff.get_distance())
if distance > threshold:
pytest.fail('Fail: New screenshot did not match the baseline (by a distance of %.2f)' % distance)
@property
def output_dir(self):
"""Return output image path
:return:
:rtype: str
"""
return self.options.get('output_dir', DEFAULT_OUTPUT_DIR)
@output_dir.setter
def output_dir(self, value):
"""Set output image directory
:param str value: File path
:return:
"""
assert isinstance(value, basestring)
self.options['output_dir'] = value
@property
def save_baseline(self):
"""Returns True, if save baseline flag is set
:return:
:rtype: bool
"""
return self.options.get('save_baseline', False)
@save_baseline.setter
def save_baseline(self, value):
"""Set save baseline flag
:param bool value: Save baseline flag
:return:
"""
self.options['save_baseline'] = bool(value)
def set_viewport(self):
"""Set viewport width, height based off viewport size
:return:
"""
if self.viewport_size.lower() == 'fullscreen':
self.driver.maximize_window()
return
viewport_size = re.match(r'(?P<width>\d+)\s?[xX]\s?(?P<height>\d+)', self.viewport_size)
viewport_dimensions = (viewport_size.group('width'), viewport_size.group('height')) if viewport_size \
else DEFAULT_VIEWPORT_SIZE.split('x')
self.driver.set_window_size(*[int(dimension) for dimension in viewport_dimensions])
@property
def viewport_size(self):
"""Return setting for browser window size
:return:
:rtype: str
"""
return self.options.get('viewport_size', DEFAULT_VIEWPORT_SIZE)
@viewport_size.setter
def viewport_size(self, value):
"""Set setting for browser window size
:param value: Browser window size, as string or (x,y)
:return:
"""
assert isinstance(value, (basestring, list, tuple))
assert len(value) == 2 and all([isinstance(i, int) for i in value]) \
if isinstance(value, (list, tuple)) else True
self.options['viewport_size'] = value if isinstance(value, basestring) else '{}x{}'.format(*value)
| jlane9/pytest-needle | pytest_needle/driver.py | Python | mit | 13,087 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Facebook OAuth interface."""
# System imports
import json
import logging
try:
from urllib import quote_plus
except ImportError:
from urllib.parse import quote_plus
import oauth2 as oauth
from django.conf import settings
# Project imports
from .base_auth import Base3rdPartyAuth
logger = logging.getLogger(__name__)
FACEBOOK_REQUEST_TOKEN_URL = 'https://www.facebook.com/dialog/oauth'
FACEBOOK_ACCESS_TOKEN_URL = 'https://graph.facebook.com/oauth/access_token'
FACEBOOK_CHECK_AUTH = 'https://graph.facebook.com/me'
consumer = oauth.Consumer(key=settings.FACEBOOK_APP_ID, secret=settings.FACEBOOK_APP_SECRET)
class FacebookOAuth(Base3rdPartyAuth):
PROVIDER = 'facebook'
BACKEND = 'draalcore.auth.backend.FacebookOAuthBackend'
def get_authorize_url(self, request):
"""Request and prepare URL for login using Facebook account."""
base_url = '{}?client_id={}&redirect_uri={}&scope={}'
return base_url.format(FACEBOOK_REQUEST_TOKEN_URL, settings.FACEBOOK_APP_ID,
quote_plus(self.get_callback_url()), 'email')
def set_user(self, response):
return self.get_user({
'username': 'fb-{}'.format(response['id']),
'email': response['email'],
'first_name': response['first_name'],
'last_name': response['last_name'],
})
def authorize(self, request):
base_url = '{}?client_id={}&redirect_uri={}&client_secret={}&code={}'
request_url = base_url.format(FACEBOOK_ACCESS_TOKEN_URL, settings.FACEBOOK_APP_ID,
self.get_callback_url(), settings.FACEBOOK_APP_SECRET,
request.GET.get('code'))
# Get the access token from Facebook
client = oauth.Client(consumer)
response, content = client.request(request_url, 'GET')
if response['status'] == '200':
# Get profile info from Facebook
base_url = '{}?access_token={}&fields=id,first_name,last_name,email'
access_token = json.loads(content)['access_token']
request_url = base_url.format(FACEBOOK_CHECK_AUTH, access_token)
response, content = client.request(request_url, 'GET')
if response['status'] == '200':
user_data = json.loads(content)
# Authenticate user
logger.debug(user_data)
user = self.set_user(user_data)
return self.authenticate(request, user.username)
self.login_failure()
| jojanper/draalcore | draalcore/auth/sites/fb_oauth.py | Python | mit | 2,606 |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class ShortCodeTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts("ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.short_codes("SCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/SMS/ShortCodes/SCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX.json',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"api_version": "2010-04-01",
"date_created": null,
"date_updated": null,
"friendly_name": "API_CLUSTER_TEST_SHORT_CODE",
"short_code": "99990",
"sid": "SCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sms_fallback_method": "POST",
"sms_fallback_url": null,
"sms_method": "POST",
"sms_url": null,
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SMS/ShortCodes/SCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json"
}
'''
))
actual = self.client.api.v2010.accounts("ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.short_codes("SCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts("ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.short_codes("SCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.holodeck.assert_has_request(Request(
'post',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/SMS/ShortCodes/SCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX.json',
))
def test_update_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"api_version": "2010-04-01",
"date_created": null,
"date_updated": null,
"friendly_name": "API_CLUSTER_TEST_SHORT_CODE",
"short_code": "99990",
"sid": "SCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sms_fallback_method": "POST",
"sms_fallback_url": null,
"sms_method": "POST",
"sms_url": null,
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SMS/ShortCodes/SCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json"
}
'''
))
actual = self.client.api.v2010.accounts("ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.short_codes("SCXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts("ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.short_codes.list()
self.holodeck.assert_has_request(Request(
'get',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/SMS/ShortCodes.json',
))
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"end": 0,
"first_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SMS/ShortCodes.json?Page=0&PageSize=50",
"last_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SMS/ShortCodes.json?Page=0&PageSize=50",
"next_page_uri": null,
"num_pages": 1,
"page": 0,
"page_size": 50,
"previous_page_uri": null,
"short_codes": [
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"api_version": "2010-04-01",
"date_created": null,
"date_updated": null,
"friendly_name": "API_CLUSTER_TEST_SHORT_CODE",
"short_code": "99990",
"sid": "SCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sms_fallback_method": "POST",
"sms_fallback_url": null,
"sms_method": "POST",
"sms_url": null,
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SMS/ShortCodes/SCaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json"
}
],
"start": 0,
"total": 1,
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SMS/ShortCodes.json"
}
'''
))
actual = self.client.api.v2010.accounts("ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.short_codes.list()
self.assertIsNotNone(actual)
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"end": 0,
"first_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SMS/ShortCodes.json?Page=0&PageSize=50",
"last_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SMS/ShortCodes.json?Page=0&PageSize=50",
"next_page_uri": null,
"num_pages": 1,
"page": 0,
"page_size": 50,
"previous_page_uri": null,
"short_codes": [],
"start": 0,
"total": 1,
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SMS/ShortCodes.json"
}
'''
))
actual = self.client.api.v2010.accounts("ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.short_codes.list()
self.assertIsNotNone(actual)
| twilio/twilio-python | tests/integration/api/v2010/account/test_short_code.py | Python | mit | 6,758 |
class GeneticEngine:
genomLength = 10
generationCount = 10
individualCount = 10
selectionType = 10
crossingType = 10
useMutation = 1
mutationPercent = 50
"""constructor"""
def __init__(self, fitnessFunction):
return 0
"""main body"""
def run():
return 0
def generateFirstGeneration():
return 0
def selection():
return 0
def crossing():
return 0
def mutation():
return 0
| GrimRanger/GeneticAlgorithm | GeneticLib/genetic_engine.py | Python | mit | 512 |
import argparse
import logging
from typing import List, Optional
from redis import StrictRedis
from minique.compat import sentry_sdk
from minique.work.worker import Worker
def get_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
parser.add_argument("-u", "--redis-url", required=True)
parser.add_argument("-q", "--queues", nargs="+", required=True)
parser.add_argument("--allow-callable", nargs="+", required=True)
parser.add_argument("--single-tick", action="store_true")
return parser
def main(argv: Optional[List[str]] = None) -> None:
parser = get_parser()
args = parser.parse_args(argv)
logging.basicConfig(datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
redis = StrictRedis.from_url(args.redis_url)
worker = Worker.for_queue_names(redis=redis, queue_names=args.queues)
worker.allowed_callable_patterns = set(args.allow_callable)
worker.log.info("Worker initialized")
if sentry_sdk:
try:
sentry_sdk.init()
except Exception as exc:
worker.log.warning("Failed to initialize Sentry: %s", exc, exc_info=True)
else:
hub = sentry_sdk.hub.Hub.current
if hub and hub.client and hub.client.dsn:
worker.log.info("Sentry configured with a valid DSN")
if args.single_tick:
worker.tick()
else:
worker.loop()
| valohai/minique | minique/cli.py | Python | mit | 1,399 |
"""
Contains basic interface (abstract base class) for word embeddings.
"""
import os
from abc import ABCMeta, abstractmethod
class IWordEmbedding(object):
"""
Abstract base class for word embeddings
"""
__metaclass__ = ABCMeta
def __init__(self, path, vector_length):
self.model = None
self.path = path
self.vector_length = vector_length
self.already_built = False
@abstractmethod
def _build(self):
raise NotImplementedError
@abstractmethod
def __getitem__(self, word):
raise NotImplementedError
def build(self):
""" Loads word embedding from its file """
if not self.already_built:
print("Loading pre-trained word embedding from {0}...".format(self.path))
self._build()
self.already_built = True
print("Pre-trained word embedding from {0} loaded!".format(self.path))
def get_embedding_model_path(self):
""" :return: absolute path to folder containing saved word embedding model """
return os.path.join(os.path.dirname(__file__), '../../../models/word_embeddings', self.path)
@staticmethod
def data_file_to_sentences(data_file_path):
"""
Converts a processed data file to generator of lists of words
:param data_file_path: path to data file
:return: iterator yielding sentences as lists of words
"""
with open(data_file_path, 'r') as f:
for line in f:
sentence = line.split(' ')[1]
yield map(lambda word: word.rstrip(), sentence.split(','))
def __str__(self):
return type(self).__name__
| mikolajsacha/tweetsclassification | src/features/word_embeddings/iword_embedding.py | Python | mit | 1,681 |
"""
Views for PubSite app.
"""
from django.conf import settings
from django.contrib.auth.views import (
PasswordResetView,
PasswordResetDoneView,
PasswordResetConfirmView,
PasswordResetCompleteView,
)
from django.shortcuts import render
import requests
import logging
logger = logging.getLogger(__name__)
def _get_context(page_name):
return {
"pages": settings.PUBLIC_PAGES,
"current_page_name": page_name,
}
# Regular index
# def index(request):
# """
# View for the static index page
# """
# return render(request, 'public/home.html', _get_context('Home'))
def index(request):
"""
View for the static index page
"""
return render(request, "public/home.html", _get_context("Home"))
def about(request):
"""
View for the static chapter history page.
"""
return render(request, "public/about.html", _get_context("About"))
def activities(request):
"""
View for the static chapter service page.
"""
return render(
request,
"public/activities.html",
_get_context("Service & Activities"),
)
def rush(request):
"""
View for the static chapter service page.
"""
return render(
request,
"public/rush.html",
_get_context("Rush"),
)
def campaign(request):
"""
View for the campaign service page.
"""
# Overrride requests Session authentication handling
class NoRebuildAuthSession(requests.Session):
def rebuild_auth(self, prepared_request, response):
"""
No code here means requests will always preserve the Authorization
header when redirected.
Be careful not to leak your credentials to untrusted hosts!
"""
url = "https://api.givebutter.com/v1/transactions/"
headers = {"Authorization": f"Bearer {settings.GIVEBUTTER_API_KEY}"}
response = None
# Create custom requests session
session = NoRebuildAuthSession()
# Make GET request to server, timeout in seconds
try:
r = session.get(url, headers=headers, timeout=0.75)
if r.status_code == 200:
response = r.json()
else:
logger.error(f"ERROR in request: {r.status_code}")
except requests.exceptions.Timeout:
logger.warning("Connection to GiveButter API Timed out")
except requests.ConnectionError:
logger.warning("Connection to GiveButter API could not be resolved")
except requests.exceptions.RequestException:
logger.error(
"An unknown issue occurred while trying to retrieve GiveButter Donor List"
)
# Grab context object to use later
ctx = _get_context("Campaign")
# Check for successful response, if so - filter, sort, and format data
if response and "data" in response:
response = response["data"] # Pull data from GET response object
logger.debug(f"GiveButter API Response: {response}")
# Filter by only successful transactions, then sort by amount descending
successful_txs = [tx for tx in response if tx["status"] == "succeeded"]
sorted_txs = sorted(successful_txs, key=lambda tx: tx["amount"], reverse=True)
# Clean data to a list of dictionaries & remove unnecessary data
transactions = [
{
"name": tx["giving_space"]["name"],
"amount": tx["giving_space"]["amount"],
"message": tx["giving_space"]["message"],
}
for tx in sorted_txs[:20]
]
# Attach transaction dictionary & length to context object
ctx["transactions"] = transactions
ctx["num_txs"] = len(successful_txs)
return render(
request,
"public/campaign.html",
ctx,
)
def permission_denied(request):
"""
View for 403 (Permission Denied) error.
"""
return render(
request,
"common/403.html",
_get_context("Permission Denied"),
)
def handler404(request, exception):
""" """
return render(request, "common/404.html", _get_context("Page Not Found"))
class ResetPassword(PasswordResetView):
template_name = "password_reset/password_reset_form.html"
class ResetPasswordDone(PasswordResetDoneView):
template_name = "password_reset/password_reset_done.html"
class ResetPasswordConfirm(PasswordResetConfirmView):
template_name = "password_reset/password_reset_confirm.html"
class ResetPasswordComplete(PasswordResetCompleteView):
template_name = "password_reset/password_reset_complete.html"
| sigmapi-gammaiota/sigmapi-web | sigmapiweb/apps/PubSite/views.py | Python | mit | 4,610 |
import os
import re
from amlib import conf, utils, log
'''
Functions for parsing AD automount maps into a common dict format.
Part of ampush. https://github.com/sfu-rcg/ampush
Copyright (C) 2016 Research Computing Group, Simon Fraser University.
'''
# ff = flat file automount map
def get_names():
'''
Return a list of files in ${conf/flat_file_map_dir} with the master map and
(optional) direct map first.
'''
l_names, fs_map_names = [], []
for root, dirs, filenames in os.walk(conf.c['flat_file_map_dir']):
for map_name in filenames:
fs_map_names.append(map_name)
# ensure the master map and direct map (if one exists) are processed first
l_names.append(conf.c['master_map_name'])
try:
fs_map_names.remove(conf.c['master_map_name'])
except ValueError:
log_msg = '{0} does not exist on the filesystem. Terminating.'
log_msg = log_msg.format(conf.c['master_map_name'])
log.m.critical(log_msg)
print(log_msg)
exit(6)
if conf.c['direct_map_name'] in fs_map_names:
l_names.append(conf.c['direct_map_name'])
fs_map_names.remove(conf.c['direct_map_name'])
fs_map_names.sort()
for map_name in fs_map_names:
if re.match(r'^auto\.', map_name):
l_names.append(map_name)
return l_names
def detect_orphans():
'''
Return a list of maps that exist on the filesystem but are not mentioned
in auto.master.
'''
master_entries = parse(conf.c['master_map_name'])
master_mapnames = []
l_orphans = []
for k, v in master_entries.items():
master_mapnames.append(v['map'])
for ff_mapname in get_names():
# auto.master should not be listed in auto.master
if (ff_mapname not in master_mapnames and
ff_mapname != 'auto.master'):
l_orphans.append(ff_mapname)
if len(l_orphans) > 0:
l_orphans.sort()
log_msg = 'Found unused maps listed in {0}: {1}'
log_msg = log_msg.format(conf.c['master_map_name'],
' '.join(l_orphans))
log.m.warning(log_msg)
print(log_msg)
return
def parse_master(map_lines=None, map_name=None):
'''
Ingest master map as a list of strings. Return a nice dict like this:
{'/-': {'map': 'auto.direct', 'options': '-rw,intr,soft,bg'},
'/foo': {'map': 'auto.foo', 'options': '-rw,intr,soft,bg'},
'/bar': {'map': 'auto.bar', 'options': '-rw,intr,soft,bg'},
'/baz': {'map': 'auto.baz',
'options': '-ro,int,soft,bg,fstype=nfs4,port=2049'},}
'''
d_map = {}
for l in map_lines:
chunks = l.split()
am_key = chunks[0]
joined = ' '.join(chunks)
d_map[am_key] = {}
'''
As with submaps the mount options field is optional.
2 fields == automount entry without mount options.
'''
if len(chunks) == 2:
d_map[am_key] = {'map': chunks[1]}
log_msg = 'No mount options for {0} in {1}'
log_msg = log_msg.format(am_key, conf.c['master_map_name'])
log.m.info(log_msg)
# 3 fields? automount directory + mapname + mount options
elif len(chunks) == 3:
d_map[am_key] = {'map': chunks[1],
'options': chunks[2]}
else:
log_msg = (
'Terminating. Bad flat file master map format: '
'unexpected number of fields in ' + joined
)
log.m.critical(log_msg)
print(log_msg)
exit(11)
return d_map
def parse_submap(map_name=None, map_lines=None):
'''
Ingest a list of automount map entries. Return a nice dict like this:
{'yuv': {'options': '-intr,bg,tcp,vers=4',
'server_dir': '/yuv',
'server_hostname': 'nfssrv01.example.com'},
'luma': {'options': '-nosuid,tcp,intr,bg,vers=3,rw',
'server_dir': '/exports/luma',
'server_hostname': 'nfssrv02.example.com'}, ...}
'''
d_map = {}
log_msg = 'Reading {0}/{1}'.format(conf.c['flat_file_map_dir'],
map_name)
log.m.debug(log_msg)
for l in map_lines:
chunks = l.split()
am_key = chunks[0] # automount key
utils.validate_nis_map_entry(in_list=chunks[1:],
map_name=map_name,
am_key=am_key,
map_type='flat file')
d_map[am_key] = {}
'''
Consider these two valid automount entries:
apps -tcp,vers=3 nfs-server1.example.com:/exports/apps
data nfs-server2.example.com:/srv/data
If a third field exists, use it as the NFS path.
Otherwise use the second field as the NFS path.
'''
try: # server:path pair with options
server_hostname = chunks[2].split(':')[0]
server_dir = chunks[2].split(':')[1]
options = chunks[1]
utils.validate_mount_options(opt_str=options,
map_name=map_name,
am_key=am_key)
d_map[am_key] = {'server_hostname': server_hostname,
'server_dir': server_dir,
'options': options}
except IndexError: # without options
server_hostname = chunks[1].split(':')[0]
server_dir = chunks[1].split(':')[1]
d_map[am_key] = {'server_hostname': server_hostname,
'server_dir': server_dir,
'options': None}
return d_map
def parse(map_name=None):
'''
Read flat file automount maps ${ampush.conf/flat_file_map_dir} and
pass map names to parser_master_map or parse_submap.
'''
map_pathname = conf.c['flat_file_map_dir'] + '/' + map_name
map_lines = utils.ff_map_to_list(map_pathname)
map_type = 'flat file'
# different map types (master, direct, plain) == different sanity checks
if map_name == conf.c['master_map_name']:
d_map = parse_master(map_name=map_name,
map_lines=map_lines)
utils.master_map_sanity_checks(map_dict=d_map,
map_type=map_type)
else:
d_map = parse_submap(map_name=map_name,
map_lines=map_lines)
utils.submap_sanity_checks(map_dict=d_map,
map_type=map_type)
return d_map
| sfu-rcg/ampush | amlib/file_map.py | Python | mit | 6,519 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-06 12:34
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.forms.widgets
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='RemoteServer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('server_url', models.CharField(max_length=50)),
('server_name', models.CharField(max_length=20)),
('date_added', models.DateField()),
],
),
migrations.CreateModel(
name='UserData',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_id', models.IntegerField()),
('user_name', models.CharField(max_length=20)),
('user_password', models.CharField(max_length=20, verbose_name=django.forms.widgets.PasswordInput)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('server', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sshcomm.RemoteServer')),
],
),
]
| t-mertz/slurmCompanion | django-web/sshcomm/migrations/0001_initial.py | Python | mit | 1,536 |
from operator import mul
def multiply(n, l):
return map(lambda a: mul(a, n), l)
| the-zebulan/CodeWars | katas/beta/multiply_list_by_integer_with_restrictions.py | Python | mit | 86 |
# Copyright (c) 2014 - 2016 townhallpinball.org
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import itertools
import logging
import pygame
width = 128
height = 32
log = logging.getLogger("pin.dmd")
class DMD(object):
def __init__(self):
self.renderer = None
self.previous_renderer = None
self.frame = pygame.Surface((width, height))
self.previous_frame = pygame.Surface((width, height))
self.frame_from = pygame.Surface((width, height))
self.frame_to = pygame.Surface((width, height))
self.transition = None
self.override = None
self.stack = []
self.queue = []
def add(self, renderer, transition=None):
if renderer in self.stack:
return
self.add_renderer(self.stack, renderer, transition)
def enqueue(self, renderer, transition=None):
if renderer in self.queue:
return
self.add_renderer(self.queue, renderer, transition)
def interrupt(self, renderer):
self.override = renderer
self.override.render_start()
log.debug("interrupting with {}".format(renderer))
def replace(self, previous, current, transition=None):
trans = "using {}".format(transition) if transition else ""
log.debug("{} replaces {} {}".format(current, previous, trans))
if previous in self.stack:
self.stack[self.stack.index(previous)] = current
elif previous in self.queue:
self.queue[self.queue.index(previous)] = current
else:
transition = None
self.stack += [current]
self.shift_renderer(transition)
def clear(self):
for renderer in self.queue:
renderer.on_render_stop()
self.queue[:] = []
if self.override:
self.override.on_render_stop()
self.override = None
self.shift_renderer()
def reset(self):
if self.renderer:
self.renderer.on_render_stop()
if self.previous_renderer:
self.previous_renderer.on_render_stop()
self.renderer = None
self.previous_renderer = None
self.stack[:] = []
self.clear()
self.transition = None
def add_renderer(self, collection, renderer, transition=None):
trans = "using {}".format(transition) if transition else ""
log.debug("{} added {}".format(renderer, trans))
collection += [renderer]
self.shift_renderer(transition)
def remove(self, renderer):
if renderer == self.override:
self.override.render_stop()
self.override = None
return
if renderer in self.stack:
self.stack.remove(renderer)
if renderer in self.queue:
self.queue.remove(renderer)
self.shift_renderer()
def shift_renderer(self, transition=None):
if len(self.queue) > 0:
renderer = self.queue[0]
elif len(self.stack) > 0:
renderer = self.stack[-1]
else:
renderer = None
if self.previous_renderer in self.stack:
self.previous_renderer.render_suspend()
elif self.previous_renderer:
self.previous_renderer.render_stop()
if self.renderer:
self.renderer.render_stop()
self.previous_renderer = self.renderer
if not renderer:
self.renderer = None
else:
if transition:
transition.reset()
elif self.renderer in self.stack:
self.renderer.render_suspend()
elif self.renderer:
self.renderer.render_stop()
self.renderer = renderer
self.transition = transition
self.renderer.render_start()
def render(self):
self.frame, self.previous_frame = self.previous_frame, self.frame
self.frame.fill(0)
if self.override:
self.override.render(self.frame)
return self.frame
if not self.renderer and (len(self.stack) > 0 or len(self.queue) > 0):
raise ValueError("No Renderer")
elif not self.renderer:
return
if self.transition and self.transition.done:
self.transition = None
if self.renderer != self.previous_renderer:
self.previous_renderer.render_stop()
self.previous_renderer = None
if self.transition:
self.frame_from.fill(0)
self.frame_to.fill(0)
self.renderer.render(self.frame_to)
self.previous_renderer.render(self.frame_from)
self.transition.render(self.frame, self.frame_from, self.frame_to)
else:
self.renderer.render(self.frame)
return self.frame
dmd = DMD()
add = dmd.add
replace = dmd.replace
interrupt = dmd.interrupt
remove = dmd.remove
enqueue = dmd.enqueue
clear = dmd.clear
reset = dmd.reset
render = dmd.render
def create_frame(width=width, height=height, has_alpha=True):
if has_alpha:
return pygame.Surface((width, height), pygame.locals.SRCALPHA)
else:
return pygame.Surface((width, height))
def create_dots(frame):
return pygame.PixelArray(frame)
| town-hall-pinball/project-omega | pin/lib/dmd.py | Python | mit | 6,277 |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
f = open('data.txt', 'w')
size = f.write('Hello\n')
f.write('World\n')
f.close()
f = open('data.txt')
text = f.read()
print(text)
f.close()
| Furzoom/learnpython | usefull_scripts/write_to_file.py | Python | mit | 187 |
r"""OS routines for Mac, NT, or Posix depending on what system we're on.
This exports:
- all functions from posix, nt, os2, or ce, e.g. unlink, stat, etc.
- os.path is either posixpath or ntpath
- os.name is either 'posix', 'nt', 'os2' or 'ce'.
- os.curdir is a string representing the current directory ('.' or ':')
- os.pardir is a string representing the parent directory ('..' or '::')
- os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
#'
import sys, errno
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
__all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep",
"defpath", "name", "path", "devnull",
"SEEK_SET", "SEEK_CUR", "SEEK_END"]
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
if 'posix' in _names:
name = 'posix'
linesep = '\n'
from posix import *
try:
from posix import _exit
except ImportError:
pass
import posixpath as path
import posix
__all__.extend(_get_exports_list(posix))
del posix
elif 'nt' in _names:
name = 'nt'
linesep = '\r\n'
from nt import *
try:
from nt import _exit
except ImportError:
pass
import ntpath as path
import nt
__all__.extend(_get_exports_list(nt))
del nt
elif 'os2' in _names:
name = 'os2'
linesep = '\r\n'
from os2 import *
try:
from os2 import _exit
except ImportError:
pass
if sys.version.find('EMX GCC') == -1:
import ntpath as path
else:
import os2emxpath as path
from _emx_link import link
import os2
__all__.extend(_get_exports_list(os2))
del os2
elif 'ce' in _names:
name = 'ce'
linesep = '\r\n'
from ce import *
try:
from ce import _exit
except ImportError:
pass
# We can use the standard Windows path.
import ntpath as path
import ce
__all__.extend(_get_exports_list(ce))
del ce
else:
raise ImportError('no os specific module found')
sys.modules['os.path'] = path
from os.path import curdir, pardir, sep, pathsep, defpath, altsep, devnull
del _names
# Python uses fixed values for the SEEK_ constants; they are mapped
# to native constants if necessary in posixmodule.c
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
#'
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0o777):
"""makedirs(path [, mode=0o777])
Super-mkdir; create a leaf directory and all intermediate ones.
Works like mkdir, except that any intermediate path segment (not
just the rightmost) will be created if it does not exist. This is
recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs(head, mode)
except OSError as e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
if tail == curdir: # xxx/newdir/. exists if xxx/newdir exists
return
mkdir(name, mode)
def removedirs(name):
"""removedirs(path)
Super-rmdir; remove a leaf directory and all empty intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except error:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned way until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except error:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
def walk(top, topdown=True, onerror=None, followlinks=False):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune
the search, or to impose a specific order of visiting. Modifying
dirnames when topdown is false is ineffective, since the directories in
dirnames have already been generated by the time dirnames itself is
generated.
By default errors from the os.listdir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an os.error instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
By default, os.walk does not follow symbolic links to subdirectories on
systems that support them. In order to get this functionality, set the
optional argument 'followlinks' to true.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
import os
from os.path import join, getsize
for root, dirs, files in os.walk('python/Lib/email'):
print(root, "consumes", end="")
print(sum([getsize(join(root, name)) for name in files]), end="")
print("bytes in", len(files), "non-directory files")
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
from os.path import join, isdir, islink
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that listdir and error are globals in this module due
# to earlier import-*.
names = listdir(top)
except error as err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
path = join(top, name)
if followlinks or not islink(path):
for x in walk(path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
__all__.append("walk")
# Make sure os.environ exists, at least
try:
environ
except NameError:
environ = {}
def execl(file, *args):
"""execl(file, *args)
Execute the executable file with argument list args, replacing the
current process. """
execv(file, args)
def execle(file, *args):
"""execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. """
env = args[-1]
execve(file, args[:-1], env)
def execlp(file, *args):
"""execlp(file, *args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process. """
execvp(file, args)
def execlpe(file, *args):
"""execlpe(file, *args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env, replacing the current
process. """
env = args[-1]
execvpe(file, args[:-1], env)
def execvp(file, args):
"""execp(file, args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process.
args may be a list or tuple of strings. """
_execvpe(file, args)
def execvpe(file, args, env):
"""execvpe(file, args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env , replacing the
current process.
args may be a list or tuple of strings. """
_execvpe(file, args, env)
__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
def _execvpe(file, args, env=None):
if env is not None:
func = execve
argrest = (args, env)
else:
func = execv
argrest = (args,)
env = environ
head, tail = path.split(file)
if head:
func(file, *argrest)
return
if 'PATH' in env:
envpath = env['PATH']
else:
envpath = defpath
PATH = envpath.split(pathsep)
last_exc = saved_exc = None
saved_tb = None
for dir in PATH:
fullname = path.join(dir, file)
try:
func(fullname, *argrest)
except error as e:
last_exc = e
tb = sys.exc_info()[2]
if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR
and saved_exc is None):
saved_exc = e
saved_tb = tb
if saved_exc:
raise error(saved_exc).with_traceback(saved_tb)
raise error(last_exc).with_traceback(tb)
# Change environ to automatically call putenv(), unsetenv if they exist.
from _abcoll import MutableMapping # Can't use collections (bootstrap)
class _Environ(MutableMapping):
def __init__(self, environ, keymap, putenv, unsetenv):
self.keymap = keymap
self.putenv = putenv
self.unsetenv = unsetenv
self.data = data = {}
for key, value in environ.items():
data[keymap(key)] = str(value)
def __getitem__(self, key):
return self.data[self.keymap(key)]
def __setitem__(self, key, value):
value = str(value)
self.putenv(key, value)
self.data[self.keymap(key)] = value
def __delitem__(self, key):
self.unsetenv(key)
del self.data[self.keymap(key)]
def __iter__(self):
for key in self.data:
yield key
def __len__(self):
return len(self.data)
def copy(self):
return dict(self)
def setdefault(self, key, value):
if key not in self:
self[key] = value
return self[key]
try:
_putenv = putenv
except NameError:
_putenv = lambda key, value: None
else:
__all__.append("putenv")
try:
_unsetenv = unsetenv
except NameError:
_unsetenv = lambda key: _putenv(key, "")
else:
__all__.append("unsetenv")
if name in ('os2', 'nt'): # Where Env Var Names Must Be UPPERCASE
_keymap = lambda key: str(key.upper())
else: # Where Env Var Names Can Be Mixed Case
_keymap = lambda key: str(key)
environ = _Environ(environ, _keymap, _putenv, _unsetenv)
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default."""
return environ.get(key, default)
__all__.append("getenv")
def _exists(name):
try:
eval(name)
return True
except NameError:
return False
# Supply spawn*() (probably only for Unix)
if _exists("fork") and not _exists("spawnv") and _exists("execv"):
P_WAIT = 0
P_NOWAIT = P_NOWAITO = 1
# XXX Should we support P_DETACH? I suppose it could fork()**2
# and close the std I/O streams. Also, P_OVERLAY is the same
# as execv*()?
def _spawnvef(mode, file, args, env, func):
# Internal helper; func is the exec*() function to use
pid = fork()
if not pid:
# Child
try:
if env is None:
func(file, args)
else:
func(file, args, env)
except:
_exit(127)
else:
# Parent
if mode == P_NOWAIT:
return pid # Caller is responsible for waiting!
while 1:
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
elif WIFSIGNALED(sts):
return -WTERMSIG(sts)
elif WIFEXITED(sts):
return WEXITSTATUS(sts)
else:
raise error("Not stopped, signaled or exited???")
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execv)
def spawnve(mode, file, args, env):
"""spawnve(mode, file, args, env) -> integer
Execute file with arguments from args in a subprocess with the
specified environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execve)
# Note: spawnvp[e] is't currently supported on Windows
def spawnvp(mode, file, args):
"""spawnvp(mode, file, args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execvp)
def spawnvpe(mode, file, args, env):
"""spawnvpe(mode, file, args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execvpe)
if _exists("spawnv"):
# These aren't supplied by the basic Windows code
# but can be easily implemented in Python
def spawnl(mode, file, *args):
"""spawnl(mode, file, *args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnv(mode, file, args)
def spawnle(mode, file, *args):
"""spawnle(mode, file, *args, env) -> integer
Execute file with arguments from args in a subprocess with the
supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnve(mode, file, args[:-1], env)
__all__.extend(["spawnv", "spawnve", "spawnl", "spawnle",])
if _exists("spawnvp"):
# At the moment, Windows doesn't implement spawnvp[e],
# so it won't have spawnlp[e] either.
def spawnlp(mode, file, *args):
"""spawnlp(mode, file, *args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnvp(mode, file, args)
def spawnlpe(mode, file, *args):
"""spawnlpe(mode, file, *args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnvpe(mode, file, args[:-1], env)
__all__.extend(["spawnvp", "spawnvpe", "spawnlp", "spawnlpe",])
import copyreg as _copyreg
def _make_stat_result(tup, dict):
return stat_result(tup, dict)
def _pickle_stat_result(sr):
(type, args) = sr.__reduce__()
return (_make_stat_result, args)
try:
_copyreg.pickle(stat_result, _pickle_stat_result, _make_stat_result)
except NameError: # stat_result may not exist
pass
def _make_statvfs_result(tup, dict):
return statvfs_result(tup, dict)
def _pickle_statvfs_result(sr):
(type, args) = sr.__reduce__()
return (_make_statvfs_result, args)
try:
_copyreg.pickle(statvfs_result, _pickle_statvfs_result,
_make_statvfs_result)
except NameError: # statvfs_result may not exist
pass
if not _exists("urandom"):
def urandom(n):
"""urandom(n) -> str
Return a string of n random bytes suitable for cryptographic use.
"""
try:
_urandomfd = open("/dev/urandom", O_RDONLY)
except (OSError, IOError):
raise NotImplementedError("/dev/urandom (or equivalent) not found")
bs = b""
while len(bs) < n:
bs += read(_urandomfd, n - len(bs))
close(_urandomfd)
return bs
# Supply os.popen()
def popen(cmd, mode="r", buffering=None):
if not isinstance(cmd, str):
raise TypeError("invalid cmd type (%s, expected string)" % type(cmd))
if mode not in ("r", "w"):
raise ValueError("invalid mode %r" % mode)
import subprocess, io
if mode == "r":
proc = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(io.TextIOWrapper(proc.stdout), proc)
else:
proc = subprocess.Popen(cmd,
shell=True,
stdin=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(io.TextIOWrapper(proc.stdin), proc)
# Helper for popen() -- a proxy for a file whose close waits for the process
class _wrap_close:
def __init__(self, stream, proc):
self._stream = stream
self._proc = proc
def close(self):
self._stream.close()
return self._proc.wait() << 8 # Shift left to match old behavior
def __getattr__(self, name):
return getattr(self._stream, name)
def __iter__(self):
return iter(self._stream)
# Supply os.fdopen()
def fdopen(fd, *args, **kwargs):
if not isinstance(fd, int):
raise TypeError("invalid fd type (%s, expected integer)" % type(fd))
import io
return io.open(fd, *args, **kwargs)
| MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.0/Lib/os.py | Python | mit | 21,471 |
from mabozen.lib.url import parse_rfc1738_args
def parse_url(db_url):
"""parse url"""
components = parse_rfc1738_args(db_url)
return components | mabotech/mabozen | mabozen/lib/parse_url.py | Python | mit | 163 |
"""Unit tests for PyGraphviz interface."""
import os
import tempfile
import pytest
import pytest
pygraphviz = pytest.importorskip('pygraphviz')
from networkx.testing import assert_edges_equal, assert_nodes_equal, \
assert_graphs_equal
import networkx as nx
class TestAGraph(object):
def build_graph(self, G):
edges = [('A', 'B'), ('A', 'C'), ('A', 'C'), ('B', 'C'), ('A', 'D')]
G.add_edges_from(edges)
G.add_node('E')
G.graph['metal'] = 'bronze'
return G
def assert_equal(self, G1, G2):
assert_nodes_equal(G1.nodes(), G2.nodes())
assert_edges_equal(G1.edges(), G2.edges())
assert G1.graph['metal'] == G2.graph['metal']
def agraph_checks(self, G):
G = self.build_graph(G)
A = nx.nx_agraph.to_agraph(G)
H = nx.nx_agraph.from_agraph(A)
self.assert_equal(G, H)
fname = tempfile.mktemp()
nx.drawing.nx_agraph.write_dot(H, fname)
Hin = nx.nx_agraph.read_dot(fname)
os.unlink(fname)
self.assert_equal(H, Hin)
(fd, fname) = tempfile.mkstemp()
with open(fname, 'w') as fh:
nx.drawing.nx_agraph.write_dot(H, fh)
with open(fname, 'r') as fh:
Hin = nx.nx_agraph.read_dot(fh)
os.unlink(fname)
self.assert_equal(H, Hin)
def test_from_agraph_name(self):
G = nx.Graph(name='test')
A = nx.nx_agraph.to_agraph(G)
H = nx.nx_agraph.from_agraph(A)
assert G.name == 'test'
def test_undirected(self):
self.agraph_checks(nx.Graph())
def test_directed(self):
self.agraph_checks(nx.DiGraph())
def test_multi_undirected(self):
self.agraph_checks(nx.MultiGraph())
def test_multi_directed(self):
self.agraph_checks(nx.MultiDiGraph())
def test_view_pygraphviz(self):
G = nx.Graph() # "An empty graph cannot be drawn."
pytest.raises(nx.NetworkXException, nx.nx_agraph.view_pygraphviz, G)
G = nx.barbell_graph(4, 6)
nx.nx_agraph.view_pygraphviz(G)
def test_view_pygraphviz_edgelable(self):
G = nx.Graph()
G.add_edge(1, 2, weight=7)
G.add_edge(2, 3, weight=8)
nx.nx_agraph.view_pygraphviz(G, edgelabel='weight')
def test_graph_with_reserved_keywords(self):
# test attribute/keyword clash case for #1582
# node: n
# edges: u,v
G = nx.Graph()
G = self.build_graph(G)
G.nodes['E']['n'] = 'keyword'
G.edges[('A', 'B')]['u'] = 'keyword'
G.edges[('A', 'B')]['v'] = 'keyword'
A = nx.nx_agraph.to_agraph(G)
def test_round_trip(self):
G = nx.Graph()
A = nx.nx_agraph.to_agraph(G)
H = nx.nx_agraph.from_agraph(A)
#assert_graphs_equal(G, H)
AA = nx.nx_agraph.to_agraph(H)
HH = nx.nx_agraph.from_agraph(AA)
assert_graphs_equal(H, HH)
G.graph['graph'] = {}
G.graph['node'] = {}
G.graph['edge'] = {}
assert_graphs_equal(G, HH)
def test_2d_layout(self):
G = nx.Graph()
G = self.build_graph(G)
G.graph["dimen"] = 2
pos = nx.nx_agraph.pygraphviz_layout(G, prog='neato')
pos = list(pos.values())
assert len(pos) == 5
assert len(pos[0]) == 2
def test_3d_layout(self):
G = nx.Graph()
G = self.build_graph(G)
G.graph["dimen"] = 3
pos = nx.nx_agraph.pygraphviz_layout(G, prog='neato')
pos = list(pos.values())
assert len(pos) == 5
assert len(pos[0]) == 3
| sserrot/champion_relationships | venv/Lib/site-packages/networkx/drawing/tests/test_agraph.py | Python | mit | 3,587 |
print("hola me llamo juan")
a = 'ofi'
b = 'chilea'
c = 'Mmm'
d = 'me da igual'
bandera = True
carac = input()
if carac.endswith('?'):
print(a)
elif carac.isupper():
print (b)
elif carac.isalnum():
print(d)
else:
print(c)
| erick84/uip-iiiq2016-prog3 | laboratorio3/Juanelcallado/Juan.py | Python | mit | 284 |
import pygame
import os
from buffalo import utils
from item import Item
# User interface for trading with NPCs
# Similar to the crafting UI, with some minor differences
# The biggest thing is that it only appears when you "talk to" (read click on)
# A trader NPC and disappears when you leave that window, and only contains a
# Limited number of trades
class TradingUI:
BUTTON_SIZE = 32
PADDING = 6
def __init__(self, inventory, tradeSet):
self.tradeSet = tradeSet
self.inventory = inventory
self.surface = utils.empty_surface((228,500))
self.surface.fill((100,100,100,100))
self.pos = (utils.SCREEN_W / 2 + self.surface.get_width() / 2 + 350, utils.SCREEN_H / 2 - 150)
self.tileRects = list()
self.tileTrades = list()
self.updateTradeTable()
def updateTradeTable(self):
self.surface = utils.empty_surface((228,500))
self.surface.fill((100,100,100,100))
self.tileRects = list()
self.tileTrades = list()
tradeTiles = list()
total_y = 0
for t in self.tradeSet:
newTile = self.generateTradeTile(t)
tradeTiles.append(newTile)
self.tileRects.append(pygame.Rect(0, total_y, newTile.get_width(), newTile.get_height()))
self.tileTrades.append(t)
total_y += newTile.get_height()
newSurface = utils.empty_surface((228, total_y))
newSurface.fill((100,100,100,255))
currY = 0
for surf in tradeTiles:
newSurface.blit(surf, (0, currY))
currY += surf.get_height()
self.surface = newSurface
def generateTradeTile(self, trade):
y_length = 36 * (len(trade.price.keys()) / 3) + 78;
newScreen = utils.empty_surface((228, y_length))
for num, item in enumerate(trade.price.keys()):
x = ((num % 3) * TradingUI.BUTTON_SIZE) + TradingUI.PADDING
y = ((num / 3) * TradingUI.BUTTON_SIZE) + TradingUI.PADDING
itemSurface = pygame.Surface.copy(Item(item, quantity = trade.price[item]).surface)
if self.inventory.getTotalItemQuantity(item) < trade.price[item]:
itemSurface.fill(pygame.Color(255,0,0,250)[0:3] + (0,), None, pygame.BLEND_RGBA_ADD)
newScreen.blit(itemSurface, (x,y))
for num, item in enumerate(trade.goods.keys()):
x = 192 - (((num % 2) * TradingUI.BUTTON_SIZE) + TradingUI.PADDING)
y = ((num / 2) * TradingUI.BUTTON_SIZE) + TradingUI.PADDING
newScreen.blit(Item(item, quantity = trade.goods[item]).surface, (x,y))
path = os.path.join(os.path.join(*list(['assets'] + ['items'] + ["arrow.png"])))
arrowSurface = pygame.image.load(path)
newScreen.blit(arrowSurface,(114, (newScreen.get_height() / 2) - arrowSurface.get_height() / 2))
myfont = pygame.font.SysFont("monospace", 15)
color = (255,255,0)
if not trade.canTrade(self.inventory):
color = (255,0,0)
label = myfont.render(str(trade.name), 1, color)
newScreen.blit(label, (newScreen.get_width() - label.get_width() - 2, newScreen.get_height() - label.get_height() - 2))
pygame.draw.rect(newScreen, (0,0,0,255), pygame.Rect(0,0,228, y_length), 1)
return newScreen
def blit(self, dest, pos):
dest.blit(self.surface, pos)
def update(self):
pass
def mouseDown(self, pos):
for tile in self.tileRects:
if(tile.collidepoint(pos)):
clickedTrade = self.tileTrades[self.tileRects.index(tile)]
if not clickedTrade.canTrade(self.inventory):
return
for item in clickedTrade.price.keys():
self.inventory.removeItemQuantity(item, clickedTrade.price[item])
for item in clickedTrade.goods.keys():
newItem = Item(item)
newItem.quantity = clickedTrade.goods[item]
self.inventory.addItem(newItem)
self.inventory.update()
self.updateTradeTable()
return
| benjamincongdon/adept | tradingUI.py | Python | mit | 3,586 |
#!/usr/bin/env python
"""Robobonobo setup script.
Usage:
./get_ready.py [options]
Options:
-h, --help Show this help screen
--version Show the version.
"""
from docopt import docopt
from glob import glob
import os
GPIOS = [30, 31, 112, 113, 65, 27]
GPIO_BASE = "/sys/class/gpio"
SLOTS_GLOB = "/sys/devices/bone_capemgr.?/slots"
def write_gpio(filename, msg, pindir=""):
with open(os.path.join(GPIO_BASE, pindir, filename), mode="w+") as ex:
ex.write(msg)
def setup_gpio(pin):
write_gpio("export", pin)
pindir = "gpio" + pin
write_gpio("direction", "out", pindir)
write_gpio("value", "0", pindir)
def setup_dto():
for match in glob(SLOTS_GLOB):
with open(match, mode="w+") as slots:
slots.write("robobonobo")
def main():
for gpio in GPIOS:
setup_gpio(str(gpio))
setup_dto()
if __name__ == "__main__":
args = docopt(__doc__, version="Robobonobo setup script v1")
main()
| dunmatt/robobonobo | scripts/get_ready.py | Python | mit | 979 |
import PySeis as ps
import numpy as np
import pylab
#import dataset
input = ps.io.su.SU("./data/sample.su")
input.read("./data/raw.npy")
#initialise dataset
#~ data, params = toolbox.initialise("geometries.su")
#trim data
#~ params['ns'] = 1500
#~ data = toolbox.slice(data, None, **params)
#~ data.tofile("geom_short.su")
#initialise dataset
#data, params = toolbox.initialise("geom_short.su")
#agc
#~ toolbox.agc(data, None, **params)
#params['gamma'] = 1.5
#toolbox.tar(data, None, **params)
#kills = [270, 300, 374, 614] #fldr
#mask = toolbox.build_mask(data['fldr'], kills)
#data = data[mask]
#data.tofile("prepro.su")
#display
#~ params['primary'] = 'fldr'
#~ params['secondary'] = 'tracf'
#~ params['wiggle'] = True
#~ toolbox.display(data, **params)
#~ pylab.show()
| stuliveshere/PySeis | examples/01.0_import_su.py | Python | mit | 787 |
#!/usr/bin/env python
"""Parse GCC-XML output files and produce a list of class names."""
# import system modules.
import multiprocessing
import xml.dom.minidom
import sys
import os
# Import application modules.
import mpipe
import util
# Configure and parse the command line.
NAME = os.path.basename(sys.argv[0])
ARGS = [('out_file', 'output file'),
('xml_dir', 'directory with XML files'),]
ARGS = util.parse_cmd(NAME, ARGS)
# Create a list of input files.
fnames = list()
for entry in os.listdir(ARGS['xml_dir']):
fname = os.path.join(ARGS['xml_dir'], entry)
if not os.path.isfile(fname):
continue
fnames.append(fname)
num_cpus = multiprocessing.cpu_count()
print('Parsing %d files on %d CPUs'%(len(fnames), num_cpus,))
# Parse files in a pipeline.
def parseFile(fname):
"""Parse the XML file looking for fully demangled class
names, and communicate the result."""
names = list()
doc = xml.dom.minidom.parse(fname)
classes = doc.getElementsByTagName('Class')
for entry in classes:
name = entry.getAttribute('demangled')
NSPACE = 'Wm5::'
if name[:len(NSPACE)] != NSPACE:
continue
names.append(name)
return names
pipe = mpipe.Pipeline(mpipe.UnorderedStage(parseFile, num_cpus))
for fname in fnames:
pipe.put(fname)
pipe.put(None)
# Report on progress in realtime.
total_names = dict()
done_count = 0
for result in pipe.results():
for name in result:
total_names[name] = None
done_count += 1
percent = float(done_count) / len(fnames) * 100
sys.stdout.write('\r' + '%d of %d (%.1f%%)'%(done_count, len(fnames), percent))
sys.stdout.flush()
# End on a newline.
print()
print('Writing file %s'%ARGS['out_file'])
fout = open(ARGS['out_file'], 'w')
for key in sorted(total_names):
fout.write('%s\n'%key)
fout.close()
# The end.
| vmlaker/pythonwildmagic | tool/parse-xml.py | Python | mit | 1,870 |
from __future__ import print_function, division
import numpy as np
import pytest
import sys
import chronostar.likelihood
sys.path.insert(0,'..')
from chronostar import expectmax as em
from chronostar.synthdata import SynthData
from chronostar.component import SphereComponent
from chronostar import tabletool
from chronostar import expectmax
import chronostar.synthdata as syn
# import chronostar.retired2.measurer as ms
# import chronostar.retired2.converter as cv
#
# def test_calcMedAndSpan():
# """
# Test that the median, and +- 34th percentiles is found correctly
# """
# dx = 10.
# dv = 5.
# dummy_mean = np.array([10,10,10, 5, 5, 5,np.log(dx),np.log(dv),20])
# dummy_std = np.array([1.,1.,1.,1.,1.,1.,0.5, 0.5, 3.])
# assert len(dummy_mean) == len(dummy_std)
# npars = len(dummy_mean)
#
# nsteps = 10000
# nwalkers = 18
#
# dummy_chain = np.array([np.random.randn(nsteps)*std + mean
# for (std, mean) in zip(dummy_std, dummy_mean)]).T
# np.repeat(dummy_chain, 18, axis=0).reshape(nwalkers,nsteps,npars)
#
# med_and_span = em.calcMedAndSpan(dummy_chain)
# assert np.allclose(dummy_mean, med_and_span[:,0], atol=0.1)
# approx_stds = 0.5*(med_and_span[:,1] - med_and_span[:,2])
# assert np.allclose(dummy_std, approx_stds, atol=0.1)
def test_calcMembershipProbs():
"""
Even basicer. Checks that differing overlaps are
correctly mapped to memberships.
"""
# case 1
star_ols = [10, 10]
assert np.allclose([.5,.5], em.calc_membership_probs(np.log(star_ols)))
# case 2
star_ols = [10, 30]
assert np.allclose([.25,.75], em.calc_membership_probs(np.log(star_ols)))
# case 3
star_ols = [10, 10, 20]
assert np.allclose([.25, .25, .5],
em.calc_membership_probs(np.log(star_ols)))
def test_expectation():
"""
Super basic, generates some association stars along
with some background stars and checks membership allocation
is correct
"""
age = 1e-5
ass_pars1 = np.array([0, 0, 0, 0, 0, 0, 5., 2., age])
comp1 = SphereComponent(ass_pars1)
ass_pars2 = np.array([100., 0, 0, 20, 0, 0, 5., 2., age])
comp2 = SphereComponent(ass_pars2)
starcounts = [100,100]
synth_data = SynthData(pars=[ass_pars1, ass_pars2],
starcounts=starcounts)
synth_data.synthesise_everything()
tabletool.convert_table_astro2cart(synth_data.table)
true_memb_probs = np.zeros((np.sum(starcounts), 2))
true_memb_probs[:starcounts[0], 0] = 1.
true_memb_probs[starcounts[0]:, 1] = 1.
# star_means, star_covs = tabletool.buildDataFromTable(synth_data.astr_table)
# all_lnols = em.getAllLnOverlaps(
# synth_data.astr_table, [comp1, comp2]
# )
fitted_memb_probs = em.expectation(
tabletool.build_data_dict_from_table(synth_data.table),
[comp1, comp2]
)
assert np.allclose(true_memb_probs, fitted_memb_probs, atol=1e-10)
'''
@pytest.mark.skip
def test_fit_many_comps_gradient_descent_with_multiprocessing():
"""
Added by MZ 2020 - 07 - 13
Test if maximisation works when using gradient descent and multiprocessing.
"""
age = 1e-5
ass_pars1 = np.array([0, 0, 0, 0, 0, 0, 5., 2., age])
comp1 = SphereComponent(ass_pars1)
starcounts = [100,]
synth_data = SynthData(pars=[ass_pars1,],
starcounts=starcounts)
synth_data.synthesise_everything()
tabletool.convert_table_astro2cart(synth_data.table)
true_memb_probs = np.zeros((np.sum(starcounts), 2))
true_memb_probs[:starcounts[0], 0] = 1.
true_memb_probs[starcounts[0]:, 1] = 1.
ncomps = len(starcounts)
best_comps, med_and_spans, memb_probs = \
expectmax.fit_many_comps(synth_data.table, ncomps,
rdir='test_gradient_descent_multiprocessing',
#~ init_memb_probs=None,
#~ init_comps=None,
trace_orbit_func=None,
optimisation_method='Nelder-Mead',
nprocess_ncomp = True,
)
'''
@pytest.mark.skip(reason='Too long for unit tests. Put this in integration instead')
def test_maximisation_gradient_descent_with_multiprocessing_tech():
"""
Added by MZ 2020 - 07 - 13
Test if maximisation works when using gradient descent and multiprocessing.
NOTE: this is not a test if maximisation returns appropriate results but
it only tests if the code runs withour errors. This is mainly to test
multiprocessing.
"""
age = 1e-5
ass_pars1 = np.array([0, 0, 0, 0, 0, 0, 5., 2., age])
comp1 = SphereComponent(ass_pars1)
starcounts = [100,]
synth_data = SynthData(pars=[ass_pars1,],
starcounts=starcounts)
synth_data.synthesise_everything()
tabletool.convert_table_astro2cart(synth_data.table)
true_memb_probs = np.zeros((np.sum(starcounts), 1))
true_memb_probs[:starcounts[0], 0] = 1.
#~ true_memb_probs[starcounts[0]:, 1] = 1.
ncomps = len(starcounts)
noise = np.random.rand(ass_pars1.shape[0])*5
all_init_pars = [ass_pars1 + noise]
new_comps, all_samples, _, all_init_pos, success_mask =\
expectmax.maximisation(synth_data.table, ncomps,
true_memb_probs, 100, 'iter00',
all_init_pars,
optimisation_method='Nelder-Mead',
nprocess_ncomp=True,
)
# TODO: test if new_comps, all_samples, _, all_init_pos, success_mask are of the right format.
# def test_background_overlaps():
# """
# Author: Marusa Zerjal, 2019 - 05 - 26
# Compare background overlap with KDE and background overlap with tiny covariance matrix
# :return:
# """
# background_means = tabletool.build_data_dict_from_table(kernel_density_input_datafile,
# only_means=True,
# )
# ln_bg_ols_kde = em.get_kernel_densities(background_means,
# # star_means, )
if __name__=='__main__':
test_maximisation_gradient_descent_with_multiprocessing_tech()
| mikeireland/chronostar | unit_tests/test_unit_expectmax.py | Python | mit | 6,240 |
def python_evaluate(text):
return eval(str(text))
def python_print(*values, sep=' '):
joined = sep.join((str(v) for v in values))
print(joined)
def python_list(*args):
return args
def error(text=''):
raise RuntimeError(text) | osspeak/osspeak | osspeak/recognition/actions/library/general.py | Python | mit | 247 |
#!/usr/bin/env python
'''
utils.py: part of som package
Copyright (c) 2017 Vanessa Sochat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import collections
import fnmatch
import os
import json
import re
import requests
import shutil
import simplejson
import som.__init__ as hello
from som.logger import bot
import sys
import subprocess
import tempfile
import tarfile
import zipfile
# Python less than version 3 must import OSError
if sys.version_info[0] < 3:
from exceptions import OSError
######################################################################################
# Local commands and requests
######################################################################################
def get_installdir():
'''get_installdir returns the installation directory of the application
'''
return os.path.abspath(os.path.dirname(hello.__file__))
def get_dataset(dataset=None):
'''get_dataset will return some data provided by the application,
based on a user-provided label. In the future, we can add https endpoints
to retrieve online datasets.
'''
here = get_installdir()
valid_datasets = {'developers_uid':'%s/api/identifiers/data/developers_uid.json' %here}
if dataset is not None:
# In case the user gave an extension
dataset = os.path.splitext(dataset)[0].lower()
if dataset in valid_datasets:
return valid_datasets[dataset]
bot.info("Valid datasets include: %s" %(','.join(list(valid_datasets.keys()))))
def run_command(cmd,error_message=None,sudopw=None,suppress=False):
'''run_command uses subprocess to send a command to the terminal.
:param cmd: the command to send, should be a list for subprocess
:param error_message: the error message to give to user if fails,
if none specified, will alert that command failed.
:param execute: if True, will add `` around command (default is False)
:param sudopw: if specified (not None) command will be run asking for sudo
'''
if sudopw == None:
sudopw = os.environ.get('pancakes',None)
if sudopw != None:
cmd = ' '.join(["echo", sudopw,"|","sudo","-S"] + cmd)
if suppress == False:
output = os.popen(cmd).read().strip('\n')
else:
output = cmd
os.system(cmd)
else:
try:
process = subprocess.Popen(cmd,stdout=subprocess.PIPE)
output, err = process.communicate()
except OSError as error:
if error.errno == os.errno.ENOENT:
bot.error(error_message)
else:
bot.error(err)
return None
return output
def get_listfirst(item,group):
'''return the first found in a list (group) from a
dictionary item.
'''
if not isinstance(group,list):
group = [group]
for contender in group:
if contender in item:
return item[contender]
return None
############################################################################
## FILE OPERATIONS #########################################################
############################################################################
def write_file(filename,content,mode="w"):
'''write_file will open a file, "filename" and write content, "content"
and properly close the file
'''
with open(filename,mode) as filey:
filey.writelines(content)
return filename
def write_json(json_obj,filename,mode="w",print_pretty=True):
'''write_json will (optionally,pretty print) a json object to file
:param json_obj: the dict to print to json
:param filename: the output file to write to
:param pretty_print: if True, will use nicer formatting
'''
with open(filename,mode) as filey:
if print_pretty == True:
filey.writelines(simplejson.dumps(json_obj, indent=4, separators=(',', ': ')))
else:
filey.writelines(simplejson.dumps(json_obj))
return filename
def read_file(filename,mode="r"):
'''write_file will open a file, "filename" and write content, "content"
and properly close the file
'''
with open(filename,mode) as filey:
content = filey.readlines()
return content
def read_json(filename,mode="r"):
'''read_json will open a file, "filename" and read the string as json
'''
with open(filename,mode) as filey:
content = json.loads(filey.read())
return content
############################################################################
## COMPRESSED FILES ########################################################
############################################################################
def detect_compressed(folder,compressed_types=None):
'''detect compressed will return a list of files in
some folder that are compressed, by default this means
.zip or .tar.gz, but the called can specify a custom list
:param folder: the folder base to use.
:param compressed_types: a list of types to include, should
be extensions in format like *.tar.gz, *.zip, etc.
'''
compressed = []
if compressed_types == None:
compressed_types = ["*.tar.gz",'*zip']
bot.debug("Searching for %s" %", ".join(compressed_types))
for filey in os.listdir(folder):
for compressed_type in compressed_types:
if fnmatch.fnmatch(filey, compressed_type):
compressed.append("%s/%s" %(folder,filey))
bot.debug("Found %s compressed files in %s" %len(compressed),folder)
return compressed
def unzip_dir(zip_file,dest_dir=None):
'''unzip_dir will extract a zipfile to a directory. If
an extraction destination is not defined, a temporary
directory will be created and used.
:param zip_file: the .zip file to unzip
:param dest_dir: the destination directory
'''
if dest_dir == None:
dest_dir = tempfile.mkdtemp()
with zipfile.ZipFile(zip_file,"r") as zf:
zf.extractall(dest_dir)
return dest_dir
def zip_dir(zip_dir, zip_name, output_folder=None):
'''zip_dir will zip up and entire zip directory
:param folder_path: the folder to zip up
:param zip_name: the name of the zip to return
:output_folder:
'''
tmpdir = tempfile.mkdtemp()
output_zip = "%s/%s" %(tmpdir,zip_name)
zf = zipfile.ZipFile(output_zip, "w", zipfile.ZIP_DEFLATED, allowZip64=True)
for root, dirs, files in os.walk(zip_dir):
for file in files:
zf.write(os.path.join(root, file))
zf.close()
if output_folder != None:
shutil.copyfile(output_zip,"%s/%s"%(output_folder,zip_name))
shutil.rmtree(tmpdir)
output_zip = "%s/%s"%(output_folder,zip_name)
return output_zip
def untar_dir(tar_file,dest_dir=None):
'''untar_dir will extract a tarfile to a directory. If
an extraction destination is not defined, a temporary
directory will be created and used.
:param tar_file: the .tar.gz file to untar/decompress
:param dest_dir: the destination directory
'''
if dest_dir == None:
dest_dir = tempfile.mkdtemp()
contents = []
if tarfile.is_tarfile(tar_file):
with tarfile.open(tar_file) as tf:
tf.extractall(dest_dir)
return dest_dir
| radinformatics/som-tools | som/utils.py | Python | mit | 8,265 |
#!/usr/bin/python
import argparse
from board_manager import BoardManager
from constants import *
def main():
parser = argparse.ArgumentParser(description='Board client settings')
parser.add_argument('-sp', '--PORT', help='server port', type=int,
default=80, required=False)
parser.add_argument('-sip', '--IP', help='server ip', type=str,
default='', required=False)
parser.add_argument('-pt', '--TO', help='phone to', type=str,
default='', required=False)
parser.add_argument('-pf', '--FROM', help='phone from', type=str,
default='', required=False)
parser.add_argument('-tk', '--TWKEY', help='twilio key', type=str,
default='', required=False)
args = parser.parse_args()
bm = BoardManager(args)
bm.activate()
if __name__ == "__main__":
main()
| TeamProxima/predictive-fault-tracker | board/board_client.py | Python | mit | 909 |
"""
@brief test log(time=8s)
@author Xavier Dupre
"""
import sys
import os
import unittest
import shutil
from contextlib import redirect_stdout
from io import StringIO
from pyquickhelper.pycode import ExtTestCase
from pyquickhelper.pycode import process_standard_options_for_setup_help, get_temp_folder
from pyquickhelper.texthelper import compare_module_version
from pyquickhelper.texthelper.version_helper import numeric_module_version
from pyquickhelper.pycode.setup_helper import (
clean_notebooks_for_numbers, hash_list, process_argv_for_unittest,
process_standard_options_for_setup)
class TestMissingFunctionsPycode(ExtTestCase):
def test_process_standard_options_for_setup_help(self):
f = StringIO()
with redirect_stdout(f):
process_standard_options_for_setup_help('--help-commands')
self.assertIn('Commands processed by pyquickhelper:', f.getvalue())
f = StringIO()
with redirect_stdout(f):
process_standard_options_for_setup_help(['--help', 'unittests'])
self.assertIn('-f file', f.getvalue())
f = StringIO()
with redirect_stdout(f):
process_standard_options_for_setup_help(['--help', 'clean_space'])
self.assertIn('clean unnecessary spaces', f.getvalue())
@unittest.skipIf(sys.platform != 'win32', reason="not available")
def test_process_standard_options_for_setup(self):
temp = get_temp_folder(
__file__, "temp_process_standard_options_for_setup")
os.mkdir(os.path.join(temp, '_unittests'))
f = StringIO()
with redirect_stdout(f):
process_standard_options_for_setup(
['build_script'], file_or_folder=temp, project_var_name="debug",
fLOG=print)
text = f.getvalue()
self.assertIn('[process_standard_options_for_setup]', text)
self.assertExists(os.path.join(temp, 'bin'))
def test_numeric_module_version(self):
self.assertEqual(numeric_module_version((4, 5)), (4, 5))
self.assertEqual(numeric_module_version("4.5.e"), (4, 5, 'e'))
self.assertEqual(compare_module_version(("4.5.e"), (4, 5, 'e')), 0)
self.assertEqual(compare_module_version(("4.5.e"), None), -1)
self.assertEqual(compare_module_version(None, ("4.5.e")), 1)
self.assertEqual(compare_module_version(None, None), 0)
self.assertEqual(compare_module_version(
("4.5.e"), (4, 5, 'e', 'b')), -1)
def test_clean_notebooks_for_numbers(self):
temp = get_temp_folder(__file__, "temp_clean_notebooks_for_numbers")
nb = os.path.join(temp, "..", "data", "notebook_with_svg.ipynb")
fold = os.path.join(temp, '_doc', 'notebooks')
self.assertNotExists(fold)
os.makedirs(fold)
shutil.copy(nb, fold)
res = clean_notebooks_for_numbers(temp)
self.assertEqual(len(res), 1)
with open(res[0], 'r') as f:
content = f.read()
self.assertIn('"execution_count": 1,', content)
def test_hash_list(self):
li = [4, '5']
res = hash_list(li)
self.assertEqual(res, "1402b9d4")
li = []
res = hash_list(li)
self.assertEqual(res, "d41d8cd9")
def test_process_argv_for_unittest(self):
li = ['unittests', '-d', '5']
res = process_argv_for_unittest(li, None)
self.assertNotEmpty(res)
li = ['unittests']
res = process_argv_for_unittest(li, None)
self.assertEmpty(res)
li = ['unittests', '-e', '.*']
res = process_argv_for_unittest(li, None)
self.assertNotEmpty(res)
li = ['unittests', '-g', '.*']
res = process_argv_for_unittest(li, None)
self.assertNotEmpty(res)
li = ['unittests', '-f', 'test.py']
res = process_argv_for_unittest(li, None)
self.assertNotEmpty(res)
if __name__ == "__main__":
unittest.main()
| sdpython/pyquickhelper | _unittests/ut_pycode/test_missing_function_pycode.py | Python | mit | 3,947 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Any, Dict, List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._iot_hub_client_enums import *
class ArmIdentity(msrest.serialization.Model):
"""ArmIdentity.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: Principal Id.
:vartype principal_id: str
:ivar tenant_id: Tenant Id.
:vartype tenant_id: str
:ivar type: The type of identity used for the resource. The type 'SystemAssigned,UserAssigned'
includes both an implicitly created identity and a set of user assigned identities. The type
'None' will remove any identities from the service. Possible values include: "SystemAssigned",
"UserAssigned", "SystemAssigned, UserAssigned", "None".
:vartype type: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.ResourceIdentityType
:ivar user_assigned_identities: Dictionary of :code:`<ArmUserIdentity>`.
:vartype user_assigned_identities: dict[str,
~azure.mgmt.iothub.v2021_03_03_preview.models.ArmUserIdentity]
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{ArmUserIdentity}'},
}
def __init__(
self,
*,
type: Optional[Union[str, "ResourceIdentityType"]] = None,
user_assigned_identities: Optional[Dict[str, "ArmUserIdentity"]] = None,
**kwargs
):
"""
:keyword type: The type of identity used for the resource. The type
'SystemAssigned,UserAssigned' includes both an implicitly created identity and a set of user
assigned identities. The type 'None' will remove any identities from the service. Possible
values include: "SystemAssigned", "UserAssigned", "SystemAssigned, UserAssigned", "None".
:paramtype type: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.ResourceIdentityType
:keyword user_assigned_identities: Dictionary of :code:`<ArmUserIdentity>`.
:paramtype user_assigned_identities: dict[str,
~azure.mgmt.iothub.v2021_03_03_preview.models.ArmUserIdentity]
"""
super(ArmIdentity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = type
self.user_assigned_identities = user_assigned_identities
class ArmUserIdentity(msrest.serialization.Model):
"""ArmUserIdentity.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id:
:vartype principal_id: str
:ivar client_id:
:vartype client_id: str
"""
_validation = {
'principal_id': {'readonly': True},
'client_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ArmUserIdentity, self).__init__(**kwargs)
self.principal_id = None
self.client_id = None
class CertificateBodyDescription(msrest.serialization.Model):
"""The JSON-serialized X509 Certificate.
:ivar certificate: base-64 representation of the X509 leaf certificate .cer file or just .pem
file content.
:vartype certificate: str
"""
_attribute_map = {
'certificate': {'key': 'certificate', 'type': 'str'},
}
def __init__(
self,
*,
certificate: Optional[str] = None,
**kwargs
):
"""
:keyword certificate: base-64 representation of the X509 leaf certificate .cer file or just
.pem file content.
:paramtype certificate: str
"""
super(CertificateBodyDescription, self).__init__(**kwargs)
self.certificate = certificate
class CertificateDescription(msrest.serialization.Model):
"""The X509 Certificate.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar properties: The description of an X509 CA Certificate.
:vartype properties: ~azure.mgmt.iothub.v2021_03_03_preview.models.CertificateProperties
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The name of the certificate.
:vartype name: str
:ivar etag: The entity tag.
:vartype etag: str
:ivar type: The resource type.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'etag': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'properties': {'key': 'properties', 'type': 'CertificateProperties'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
properties: Optional["CertificateProperties"] = None,
**kwargs
):
"""
:keyword properties: The description of an X509 CA Certificate.
:paramtype properties: ~azure.mgmt.iothub.v2021_03_03_preview.models.CertificateProperties
"""
super(CertificateDescription, self).__init__(**kwargs)
self.properties = properties
self.id = None
self.name = None
self.etag = None
self.type = None
class CertificateListDescription(msrest.serialization.Model):
"""The JSON-serialized array of Certificate objects.
:ivar value: The array of Certificate objects.
:vartype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.CertificateDescription]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[CertificateDescription]'},
}
def __init__(
self,
*,
value: Optional[List["CertificateDescription"]] = None,
**kwargs
):
"""
:keyword value: The array of Certificate objects.
:paramtype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.CertificateDescription]
"""
super(CertificateListDescription, self).__init__(**kwargs)
self.value = value
class CertificateProperties(msrest.serialization.Model):
"""The description of an X509 CA Certificate.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar subject: The certificate's subject name.
:vartype subject: str
:ivar expiry: The certificate's expiration date and time.
:vartype expiry: ~datetime.datetime
:ivar thumbprint: The certificate's thumbprint.
:vartype thumbprint: str
:ivar is_verified: Determines whether certificate has been verified.
:vartype is_verified: bool
:ivar created: The certificate's create date and time.
:vartype created: ~datetime.datetime
:ivar updated: The certificate's last update date and time.
:vartype updated: ~datetime.datetime
:ivar certificate: The certificate content.
:vartype certificate: str
"""
_validation = {
'subject': {'readonly': True},
'expiry': {'readonly': True},
'thumbprint': {'readonly': True},
'is_verified': {'readonly': True},
'created': {'readonly': True},
'updated': {'readonly': True},
}
_attribute_map = {
'subject': {'key': 'subject', 'type': 'str'},
'expiry': {'key': 'expiry', 'type': 'rfc-1123'},
'thumbprint': {'key': 'thumbprint', 'type': 'str'},
'is_verified': {'key': 'isVerified', 'type': 'bool'},
'created': {'key': 'created', 'type': 'rfc-1123'},
'updated': {'key': 'updated', 'type': 'rfc-1123'},
'certificate': {'key': 'certificate', 'type': 'str'},
}
def __init__(
self,
*,
certificate: Optional[str] = None,
**kwargs
):
"""
:keyword certificate: The certificate content.
:paramtype certificate: str
"""
super(CertificateProperties, self).__init__(**kwargs)
self.subject = None
self.expiry = None
self.thumbprint = None
self.is_verified = None
self.created = None
self.updated = None
self.certificate = certificate
class CertificatePropertiesWithNonce(msrest.serialization.Model):
"""The description of an X509 CA Certificate including the challenge nonce issued for the Proof-Of-Possession flow.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar subject: The certificate's subject name.
:vartype subject: str
:ivar expiry: The certificate's expiration date and time.
:vartype expiry: ~datetime.datetime
:ivar thumbprint: The certificate's thumbprint.
:vartype thumbprint: str
:ivar is_verified: Determines whether certificate has been verified.
:vartype is_verified: bool
:ivar created: The certificate's create date and time.
:vartype created: ~datetime.datetime
:ivar updated: The certificate's last update date and time.
:vartype updated: ~datetime.datetime
:ivar verification_code: The certificate's verification code that will be used for proof of
possession.
:vartype verification_code: str
:ivar certificate: The certificate content.
:vartype certificate: str
"""
_validation = {
'subject': {'readonly': True},
'expiry': {'readonly': True},
'thumbprint': {'readonly': True},
'is_verified': {'readonly': True},
'created': {'readonly': True},
'updated': {'readonly': True},
'verification_code': {'readonly': True},
'certificate': {'readonly': True},
}
_attribute_map = {
'subject': {'key': 'subject', 'type': 'str'},
'expiry': {'key': 'expiry', 'type': 'rfc-1123'},
'thumbprint': {'key': 'thumbprint', 'type': 'str'},
'is_verified': {'key': 'isVerified', 'type': 'bool'},
'created': {'key': 'created', 'type': 'rfc-1123'},
'updated': {'key': 'updated', 'type': 'rfc-1123'},
'verification_code': {'key': 'verificationCode', 'type': 'str'},
'certificate': {'key': 'certificate', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(CertificatePropertiesWithNonce, self).__init__(**kwargs)
self.subject = None
self.expiry = None
self.thumbprint = None
self.is_verified = None
self.created = None
self.updated = None
self.verification_code = None
self.certificate = None
class CertificateVerificationDescription(msrest.serialization.Model):
"""The JSON-serialized leaf certificate.
:ivar certificate: base-64 representation of X509 certificate .cer file or just .pem file
content.
:vartype certificate: str
"""
_attribute_map = {
'certificate': {'key': 'certificate', 'type': 'str'},
}
def __init__(
self,
*,
certificate: Optional[str] = None,
**kwargs
):
"""
:keyword certificate: base-64 representation of X509 certificate .cer file or just .pem file
content.
:paramtype certificate: str
"""
super(CertificateVerificationDescription, self).__init__(**kwargs)
self.certificate = certificate
class CertificateWithNonceDescription(msrest.serialization.Model):
"""The X509 Certificate.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar properties: The description of an X509 CA Certificate including the challenge nonce
issued for the Proof-Of-Possession flow.
:vartype properties:
~azure.mgmt.iothub.v2021_03_03_preview.models.CertificatePropertiesWithNonce
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The name of the certificate.
:vartype name: str
:ivar etag: The entity tag.
:vartype etag: str
:ivar type: The resource type.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'etag': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'properties': {'key': 'properties', 'type': 'CertificatePropertiesWithNonce'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
properties: Optional["CertificatePropertiesWithNonce"] = None,
**kwargs
):
"""
:keyword properties: The description of an X509 CA Certificate including the challenge nonce
issued for the Proof-Of-Possession flow.
:paramtype properties:
~azure.mgmt.iothub.v2021_03_03_preview.models.CertificatePropertiesWithNonce
"""
super(CertificateWithNonceDescription, self).__init__(**kwargs)
self.properties = properties
self.id = None
self.name = None
self.etag = None
self.type = None
class CloudToDeviceProperties(msrest.serialization.Model):
"""The IoT hub cloud-to-device messaging properties.
:ivar max_delivery_count: The max delivery count for cloud-to-device messages in the device
queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:vartype max_delivery_count: int
:ivar default_ttl_as_iso8601: The default time to live for cloud-to-device messages in the
device queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:vartype default_ttl_as_iso8601: ~datetime.timedelta
:ivar feedback: The properties of the feedback queue for cloud-to-device messages.
:vartype feedback: ~azure.mgmt.iothub.v2021_03_03_preview.models.FeedbackProperties
"""
_validation = {
'max_delivery_count': {'maximum': 100, 'minimum': 1},
}
_attribute_map = {
'max_delivery_count': {'key': 'maxDeliveryCount', 'type': 'int'},
'default_ttl_as_iso8601': {'key': 'defaultTtlAsIso8601', 'type': 'duration'},
'feedback': {'key': 'feedback', 'type': 'FeedbackProperties'},
}
def __init__(
self,
*,
max_delivery_count: Optional[int] = None,
default_ttl_as_iso8601: Optional[datetime.timedelta] = None,
feedback: Optional["FeedbackProperties"] = None,
**kwargs
):
"""
:keyword max_delivery_count: The max delivery count for cloud-to-device messages in the device
queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:paramtype max_delivery_count: int
:keyword default_ttl_as_iso8601: The default time to live for cloud-to-device messages in the
device queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:paramtype default_ttl_as_iso8601: ~datetime.timedelta
:keyword feedback: The properties of the feedback queue for cloud-to-device messages.
:paramtype feedback: ~azure.mgmt.iothub.v2021_03_03_preview.models.FeedbackProperties
"""
super(CloudToDeviceProperties, self).__init__(**kwargs)
self.max_delivery_count = max_delivery_count
self.default_ttl_as_iso8601 = default_ttl_as_iso8601
self.feedback = feedback
class EncryptionPropertiesDescription(msrest.serialization.Model):
"""The encryption properties for the IoT hub.
:ivar key_source: The source of the key.
:vartype key_source: str
:ivar key_vault_properties: The properties of the KeyVault key.
:vartype key_vault_properties:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.KeyVaultKeyProperties]
"""
_attribute_map = {
'key_source': {'key': 'keySource', 'type': 'str'},
'key_vault_properties': {'key': 'keyVaultProperties', 'type': '[KeyVaultKeyProperties]'},
}
def __init__(
self,
*,
key_source: Optional[str] = None,
key_vault_properties: Optional[List["KeyVaultKeyProperties"]] = None,
**kwargs
):
"""
:keyword key_source: The source of the key.
:paramtype key_source: str
:keyword key_vault_properties: The properties of the KeyVault key.
:paramtype key_vault_properties:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.KeyVaultKeyProperties]
"""
super(EncryptionPropertiesDescription, self).__init__(**kwargs)
self.key_source = key_source
self.key_vault_properties = key_vault_properties
class EndpointHealthData(msrest.serialization.Model):
"""The health data for an endpoint.
:ivar endpoint_id: Id of the endpoint.
:vartype endpoint_id: str
:ivar health_status: Health statuses have following meanings. The 'healthy' status shows that
the endpoint is accepting messages as expected. The 'unhealthy' status shows that the endpoint
is not accepting messages as expected and IoT Hub is retrying to send data to this endpoint.
The status of an unhealthy endpoint will be updated to healthy when IoT Hub has established an
eventually consistent state of health. The 'dead' status shows that the endpoint is not
accepting messages, after IoT Hub retried sending messages for the retrial period. See IoT Hub
metrics to identify errors and monitor issues with endpoints. The 'unknown' status shows that
the IoT Hub has not established a connection with the endpoint. No messages have been delivered
to or rejected from this endpoint. Possible values include: "unknown", "healthy", "degraded",
"unhealthy", "dead".
:vartype health_status: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.EndpointHealthStatus
:ivar last_known_error: Last error obtained when a message failed to be delivered to iot hub.
:vartype last_known_error: str
:ivar last_known_error_time: Time at which the last known error occurred.
:vartype last_known_error_time: ~datetime.datetime
:ivar last_successful_send_attempt_time: Last time iot hub successfully sent a message to the
endpoint.
:vartype last_successful_send_attempt_time: ~datetime.datetime
:ivar last_send_attempt_time: Last time iot hub tried to send a message to the endpoint.
:vartype last_send_attempt_time: ~datetime.datetime
"""
_attribute_map = {
'endpoint_id': {'key': 'endpointId', 'type': 'str'},
'health_status': {'key': 'healthStatus', 'type': 'str'},
'last_known_error': {'key': 'lastKnownError', 'type': 'str'},
'last_known_error_time': {'key': 'lastKnownErrorTime', 'type': 'rfc-1123'},
'last_successful_send_attempt_time': {'key': 'lastSuccessfulSendAttemptTime', 'type': 'rfc-1123'},
'last_send_attempt_time': {'key': 'lastSendAttemptTime', 'type': 'rfc-1123'},
}
def __init__(
self,
*,
endpoint_id: Optional[str] = None,
health_status: Optional[Union[str, "EndpointHealthStatus"]] = None,
last_known_error: Optional[str] = None,
last_known_error_time: Optional[datetime.datetime] = None,
last_successful_send_attempt_time: Optional[datetime.datetime] = None,
last_send_attempt_time: Optional[datetime.datetime] = None,
**kwargs
):
"""
:keyword endpoint_id: Id of the endpoint.
:paramtype endpoint_id: str
:keyword health_status: Health statuses have following meanings. The 'healthy' status shows
that the endpoint is accepting messages as expected. The 'unhealthy' status shows that the
endpoint is not accepting messages as expected and IoT Hub is retrying to send data to this
endpoint. The status of an unhealthy endpoint will be updated to healthy when IoT Hub has
established an eventually consistent state of health. The 'dead' status shows that the endpoint
is not accepting messages, after IoT Hub retried sending messages for the retrial period. See
IoT Hub metrics to identify errors and monitor issues with endpoints. The 'unknown' status
shows that the IoT Hub has not established a connection with the endpoint. No messages have
been delivered to or rejected from this endpoint. Possible values include: "unknown",
"healthy", "degraded", "unhealthy", "dead".
:paramtype health_status: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.EndpointHealthStatus
:keyword last_known_error: Last error obtained when a message failed to be delivered to iot
hub.
:paramtype last_known_error: str
:keyword last_known_error_time: Time at which the last known error occurred.
:paramtype last_known_error_time: ~datetime.datetime
:keyword last_successful_send_attempt_time: Last time iot hub successfully sent a message to
the endpoint.
:paramtype last_successful_send_attempt_time: ~datetime.datetime
:keyword last_send_attempt_time: Last time iot hub tried to send a message to the endpoint.
:paramtype last_send_attempt_time: ~datetime.datetime
"""
super(EndpointHealthData, self).__init__(**kwargs)
self.endpoint_id = endpoint_id
self.health_status = health_status
self.last_known_error = last_known_error
self.last_known_error_time = last_known_error_time
self.last_successful_send_attempt_time = last_successful_send_attempt_time
self.last_send_attempt_time = last_send_attempt_time
class EndpointHealthDataListResult(msrest.serialization.Model):
"""The JSON-serialized array of EndpointHealthData objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: JSON-serialized array of Endpoint health data.
:vartype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.EndpointHealthData]
:ivar next_link: Link to more results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[EndpointHealthData]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["EndpointHealthData"]] = None,
**kwargs
):
"""
:keyword value: JSON-serialized array of Endpoint health data.
:paramtype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.EndpointHealthData]
"""
super(EndpointHealthDataListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class EnrichmentProperties(msrest.serialization.Model):
"""The properties of an enrichment that your IoT hub applies to messages delivered to endpoints.
All required parameters must be populated in order to send to Azure.
:ivar key: Required. The key or name for the enrichment property.
:vartype key: str
:ivar value: Required. The value for the enrichment property.
:vartype value: str
:ivar endpoint_names: Required. The list of endpoints for which the enrichment is applied to
the message.
:vartype endpoint_names: list[str]
"""
_validation = {
'key': {'required': True},
'value': {'required': True},
'endpoint_names': {'required': True, 'min_items': 1},
}
_attribute_map = {
'key': {'key': 'key', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
'endpoint_names': {'key': 'endpointNames', 'type': '[str]'},
}
def __init__(
self,
*,
key: str,
value: str,
endpoint_names: List[str],
**kwargs
):
"""
:keyword key: Required. The key or name for the enrichment property.
:paramtype key: str
:keyword value: Required. The value for the enrichment property.
:paramtype value: str
:keyword endpoint_names: Required. The list of endpoints for which the enrichment is applied to
the message.
:paramtype endpoint_names: list[str]
"""
super(EnrichmentProperties, self).__init__(**kwargs)
self.key = key
self.value = value
self.endpoint_names = endpoint_names
class ErrorDetails(msrest.serialization.Model):
"""Error details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar http_status_code: The HTTP status code.
:vartype http_status_code: str
:ivar message: The error message.
:vartype message: str
:ivar details: The error details.
:vartype details: str
"""
_validation = {
'code': {'readonly': True},
'http_status_code': {'readonly': True},
'message': {'readonly': True},
'details': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'http_status_code': {'key': 'httpStatusCode', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ErrorDetails, self).__init__(**kwargs)
self.code = None
self.http_status_code = None
self.message = None
self.details = None
class EventHubConsumerGroupBodyDescription(msrest.serialization.Model):
"""The EventHub consumer group.
:ivar properties: The EventHub consumer group name.
:vartype properties: ~azure.mgmt.iothub.v2021_03_03_preview.models.EventHubConsumerGroupName
"""
_attribute_map = {
'properties': {'key': 'properties', 'type': 'EventHubConsumerGroupName'},
}
def __init__(
self,
*,
properties: Optional["EventHubConsumerGroupName"] = None,
**kwargs
):
"""
:keyword properties: The EventHub consumer group name.
:paramtype properties: ~azure.mgmt.iothub.v2021_03_03_preview.models.EventHubConsumerGroupName
"""
super(EventHubConsumerGroupBodyDescription, self).__init__(**kwargs)
self.properties = properties
class EventHubConsumerGroupInfo(msrest.serialization.Model):
"""The properties of the EventHubConsumerGroupInfo object.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar properties: The tags.
:vartype properties: dict[str, str]
:ivar id: The Event Hub-compatible consumer group identifier.
:vartype id: str
:ivar name: The Event Hub-compatible consumer group name.
:vartype name: str
:ivar type: the resource type.
:vartype type: str
:ivar etag: The etag.
:vartype etag: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'properties': {'key': 'properties', 'type': '{str}'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(
self,
*,
properties: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword properties: The tags.
:paramtype properties: dict[str, str]
"""
super(EventHubConsumerGroupInfo, self).__init__(**kwargs)
self.properties = properties
self.id = None
self.name = None
self.type = None
self.etag = None
class EventHubConsumerGroupName(msrest.serialization.Model):
"""The EventHub consumer group name.
:ivar name: EventHub consumer group name.
:vartype name: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
**kwargs
):
"""
:keyword name: EventHub consumer group name.
:paramtype name: str
"""
super(EventHubConsumerGroupName, self).__init__(**kwargs)
self.name = name
class EventHubConsumerGroupsListResult(msrest.serialization.Model):
"""The JSON-serialized array of Event Hub-compatible consumer group names with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of consumer groups objects.
:vartype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.EventHubConsumerGroupInfo]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[EventHubConsumerGroupInfo]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["EventHubConsumerGroupInfo"]] = None,
**kwargs
):
"""
:keyword value: List of consumer groups objects.
:paramtype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.EventHubConsumerGroupInfo]
"""
super(EventHubConsumerGroupsListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class EventHubProperties(msrest.serialization.Model):
"""The properties of the provisioned Event Hub-compatible endpoint used by the IoT hub.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar retention_time_in_days: The retention time for device-to-cloud messages in days. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages.
:vartype retention_time_in_days: long
:ivar partition_count: The number of partitions for receiving device-to-cloud messages in the
Event Hub-compatible endpoint. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages.
:vartype partition_count: int
:ivar partition_ids: The partition ids in the Event Hub-compatible endpoint.
:vartype partition_ids: list[str]
:ivar path: The Event Hub-compatible name.
:vartype path: str
:ivar endpoint: The Event Hub-compatible endpoint.
:vartype endpoint: str
"""
_validation = {
'partition_ids': {'readonly': True},
'path': {'readonly': True},
'endpoint': {'readonly': True},
}
_attribute_map = {
'retention_time_in_days': {'key': 'retentionTimeInDays', 'type': 'long'},
'partition_count': {'key': 'partitionCount', 'type': 'int'},
'partition_ids': {'key': 'partitionIds', 'type': '[str]'},
'path': {'key': 'path', 'type': 'str'},
'endpoint': {'key': 'endpoint', 'type': 'str'},
}
def __init__(
self,
*,
retention_time_in_days: Optional[int] = None,
partition_count: Optional[int] = None,
**kwargs
):
"""
:keyword retention_time_in_days: The retention time for device-to-cloud messages in days. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages.
:paramtype retention_time_in_days: long
:keyword partition_count: The number of partitions for receiving device-to-cloud messages in
the Event Hub-compatible endpoint. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages.
:paramtype partition_count: int
"""
super(EventHubProperties, self).__init__(**kwargs)
self.retention_time_in_days = retention_time_in_days
self.partition_count = partition_count
self.partition_ids = None
self.path = None
self.endpoint = None
class ExportDevicesRequest(msrest.serialization.Model):
"""Use to provide parameters when requesting an export of all devices in the IoT hub.
All required parameters must be populated in order to send to Azure.
:ivar export_blob_container_uri: Required. The export blob container URI.
:vartype export_blob_container_uri: str
:ivar exclude_keys: Required. The value indicating whether keys should be excluded during
export.
:vartype exclude_keys: bool
:ivar export_blob_name: The name of the blob that will be created in the provided output blob
container. This blob will contain the exported device registry information for the IoT Hub.
:vartype export_blob_name: str
:ivar authentication_type: Specifies authentication type being used for connecting to the
storage account. Possible values include: "keyBased", "identityBased".
:vartype authentication_type: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.AuthenticationType
:ivar identity: Managed identity properties of storage endpoint for export devices.
:vartype identity: ~azure.mgmt.iothub.v2021_03_03_preview.models.ManagedIdentity
"""
_validation = {
'export_blob_container_uri': {'required': True},
'exclude_keys': {'required': True},
}
_attribute_map = {
'export_blob_container_uri': {'key': 'exportBlobContainerUri', 'type': 'str'},
'exclude_keys': {'key': 'excludeKeys', 'type': 'bool'},
'export_blob_name': {'key': 'exportBlobName', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ManagedIdentity'},
}
def __init__(
self,
*,
export_blob_container_uri: str,
exclude_keys: bool,
export_blob_name: Optional[str] = None,
authentication_type: Optional[Union[str, "AuthenticationType"]] = None,
identity: Optional["ManagedIdentity"] = None,
**kwargs
):
"""
:keyword export_blob_container_uri: Required. The export blob container URI.
:paramtype export_blob_container_uri: str
:keyword exclude_keys: Required. The value indicating whether keys should be excluded during
export.
:paramtype exclude_keys: bool
:keyword export_blob_name: The name of the blob that will be created in the provided output
blob container. This blob will contain the exported device registry information for the IoT
Hub.
:paramtype export_blob_name: str
:keyword authentication_type: Specifies authentication type being used for connecting to the
storage account. Possible values include: "keyBased", "identityBased".
:paramtype authentication_type: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.AuthenticationType
:keyword identity: Managed identity properties of storage endpoint for export devices.
:paramtype identity: ~azure.mgmt.iothub.v2021_03_03_preview.models.ManagedIdentity
"""
super(ExportDevicesRequest, self).__init__(**kwargs)
self.export_blob_container_uri = export_blob_container_uri
self.exclude_keys = exclude_keys
self.export_blob_name = export_blob_name
self.authentication_type = authentication_type
self.identity = identity
class FailoverInput(msrest.serialization.Model):
"""Use to provide failover region when requesting manual Failover for a hub.
All required parameters must be populated in order to send to Azure.
:ivar failover_region: Required. Region the hub will be failed over to.
:vartype failover_region: str
"""
_validation = {
'failover_region': {'required': True},
}
_attribute_map = {
'failover_region': {'key': 'failoverRegion', 'type': 'str'},
}
def __init__(
self,
*,
failover_region: str,
**kwargs
):
"""
:keyword failover_region: Required. Region the hub will be failed over to.
:paramtype failover_region: str
"""
super(FailoverInput, self).__init__(**kwargs)
self.failover_region = failover_region
class FallbackRouteProperties(msrest.serialization.Model):
"""The properties of the fallback route. IoT Hub uses these properties when it routes messages to the fallback endpoint.
All required parameters must be populated in order to send to Azure.
:ivar name: The name of the route. The name can only include alphanumeric characters, periods,
underscores, hyphens, has a maximum length of 64 characters, and must be unique.
:vartype name: str
:ivar source: Required. The source to which the routing rule is to be applied to. For example,
DeviceMessages. Possible values include: "Invalid", "DeviceMessages", "TwinChangeEvents",
"DeviceLifecycleEvents", "DeviceJobLifecycleEvents", "DigitalTwinChangeEvents",
"DeviceConnectionStateEvents".
:vartype source: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingSource
:ivar condition: The condition which is evaluated in order to apply the fallback route. If the
condition is not provided it will evaluate to true by default. For grammar, See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language.
:vartype condition: str
:ivar endpoint_names: Required. The list of endpoints to which the messages that satisfy the
condition are routed to. Currently only 1 endpoint is allowed.
:vartype endpoint_names: list[str]
:ivar is_enabled: Required. Used to specify whether the fallback route is enabled.
:vartype is_enabled: bool
"""
_validation = {
'source': {'required': True},
'endpoint_names': {'required': True, 'max_items': 1, 'min_items': 1},
'is_enabled': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'source': {'key': 'source', 'type': 'str'},
'condition': {'key': 'condition', 'type': 'str'},
'endpoint_names': {'key': 'endpointNames', 'type': '[str]'},
'is_enabled': {'key': 'isEnabled', 'type': 'bool'},
}
def __init__(
self,
*,
source: Union[str, "RoutingSource"],
endpoint_names: List[str],
is_enabled: bool,
name: Optional[str] = None,
condition: Optional[str] = None,
**kwargs
):
"""
:keyword name: The name of the route. The name can only include alphanumeric characters,
periods, underscores, hyphens, has a maximum length of 64 characters, and must be unique.
:paramtype name: str
:keyword source: Required. The source to which the routing rule is to be applied to. For
example, DeviceMessages. Possible values include: "Invalid", "DeviceMessages",
"TwinChangeEvents", "DeviceLifecycleEvents", "DeviceJobLifecycleEvents",
"DigitalTwinChangeEvents", "DeviceConnectionStateEvents".
:paramtype source: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingSource
:keyword condition: The condition which is evaluated in order to apply the fallback route. If
the condition is not provided it will evaluate to true by default. For grammar, See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language.
:paramtype condition: str
:keyword endpoint_names: Required. The list of endpoints to which the messages that satisfy the
condition are routed to. Currently only 1 endpoint is allowed.
:paramtype endpoint_names: list[str]
:keyword is_enabled: Required. Used to specify whether the fallback route is enabled.
:paramtype is_enabled: bool
"""
super(FallbackRouteProperties, self).__init__(**kwargs)
self.name = name
self.source = source
self.condition = condition
self.endpoint_names = endpoint_names
self.is_enabled = is_enabled
class FeedbackProperties(msrest.serialization.Model):
"""The properties of the feedback queue for cloud-to-device messages.
:ivar lock_duration_as_iso8601: The lock duration for the feedback queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:vartype lock_duration_as_iso8601: ~datetime.timedelta
:ivar ttl_as_iso8601: The period of time for which a message is available to consume before it
is expired by the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:vartype ttl_as_iso8601: ~datetime.timedelta
:ivar max_delivery_count: The number of times the IoT hub attempts to deliver a message on the
feedback queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:vartype max_delivery_count: int
"""
_validation = {
'max_delivery_count': {'maximum': 100, 'minimum': 1},
}
_attribute_map = {
'lock_duration_as_iso8601': {'key': 'lockDurationAsIso8601', 'type': 'duration'},
'ttl_as_iso8601': {'key': 'ttlAsIso8601', 'type': 'duration'},
'max_delivery_count': {'key': 'maxDeliveryCount', 'type': 'int'},
}
def __init__(
self,
*,
lock_duration_as_iso8601: Optional[datetime.timedelta] = None,
ttl_as_iso8601: Optional[datetime.timedelta] = None,
max_delivery_count: Optional[int] = None,
**kwargs
):
"""
:keyword lock_duration_as_iso8601: The lock duration for the feedback queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:paramtype lock_duration_as_iso8601: ~datetime.timedelta
:keyword ttl_as_iso8601: The period of time for which a message is available to consume before
it is expired by the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:paramtype ttl_as_iso8601: ~datetime.timedelta
:keyword max_delivery_count: The number of times the IoT hub attempts to deliver a message on
the feedback queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:paramtype max_delivery_count: int
"""
super(FeedbackProperties, self).__init__(**kwargs)
self.lock_duration_as_iso8601 = lock_duration_as_iso8601
self.ttl_as_iso8601 = ttl_as_iso8601
self.max_delivery_count = max_delivery_count
class GroupIdInformation(msrest.serialization.Model):
"""The group information for creating a private endpoint on an IotHub.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:ivar properties: Required. The properties for a group information object.
:vartype properties: ~azure.mgmt.iothub.v2021_03_03_preview.models.GroupIdInformationProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True, 'pattern': r'^(?![0-9]+$)(?!-)[a-zA-Z0-9-]{2,49}[a-zA-Z0-9]$'},
'type': {'readonly': True},
'properties': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'GroupIdInformationProperties'},
}
def __init__(
self,
*,
properties: "GroupIdInformationProperties",
**kwargs
):
"""
:keyword properties: Required. The properties for a group information object.
:paramtype properties:
~azure.mgmt.iothub.v2021_03_03_preview.models.GroupIdInformationProperties
"""
super(GroupIdInformation, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.properties = properties
class GroupIdInformationProperties(msrest.serialization.Model):
"""The properties for a group information object.
:ivar group_id: The group id.
:vartype group_id: str
:ivar required_members: The required members for a specific group id.
:vartype required_members: list[str]
:ivar required_zone_names: The required DNS zones for a specific group id.
:vartype required_zone_names: list[str]
"""
_attribute_map = {
'group_id': {'key': 'groupId', 'type': 'str'},
'required_members': {'key': 'requiredMembers', 'type': '[str]'},
'required_zone_names': {'key': 'requiredZoneNames', 'type': '[str]'},
}
def __init__(
self,
*,
group_id: Optional[str] = None,
required_members: Optional[List[str]] = None,
required_zone_names: Optional[List[str]] = None,
**kwargs
):
"""
:keyword group_id: The group id.
:paramtype group_id: str
:keyword required_members: The required members for a specific group id.
:paramtype required_members: list[str]
:keyword required_zone_names: The required DNS zones for a specific group id.
:paramtype required_zone_names: list[str]
"""
super(GroupIdInformationProperties, self).__init__(**kwargs)
self.group_id = group_id
self.required_members = required_members
self.required_zone_names = required_zone_names
class ImportDevicesRequest(msrest.serialization.Model):
"""Use to provide parameters when requesting an import of all devices in the hub.
All required parameters must be populated in order to send to Azure.
:ivar input_blob_container_uri: Required. The input blob container URI.
:vartype input_blob_container_uri: str
:ivar output_blob_container_uri: Required. The output blob container URI.
:vartype output_blob_container_uri: str
:ivar input_blob_name: The blob name to be used when importing from the provided input blob
container.
:vartype input_blob_name: str
:ivar output_blob_name: The blob name to use for storing the status of the import job.
:vartype output_blob_name: str
:ivar authentication_type: Specifies authentication type being used for connecting to the
storage account. Possible values include: "keyBased", "identityBased".
:vartype authentication_type: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.AuthenticationType
:ivar identity: Managed identity properties of storage endpoint for import devices.
:vartype identity: ~azure.mgmt.iothub.v2021_03_03_preview.models.ManagedIdentity
"""
_validation = {
'input_blob_container_uri': {'required': True},
'output_blob_container_uri': {'required': True},
}
_attribute_map = {
'input_blob_container_uri': {'key': 'inputBlobContainerUri', 'type': 'str'},
'output_blob_container_uri': {'key': 'outputBlobContainerUri', 'type': 'str'},
'input_blob_name': {'key': 'inputBlobName', 'type': 'str'},
'output_blob_name': {'key': 'outputBlobName', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ManagedIdentity'},
}
def __init__(
self,
*,
input_blob_container_uri: str,
output_blob_container_uri: str,
input_blob_name: Optional[str] = None,
output_blob_name: Optional[str] = None,
authentication_type: Optional[Union[str, "AuthenticationType"]] = None,
identity: Optional["ManagedIdentity"] = None,
**kwargs
):
"""
:keyword input_blob_container_uri: Required. The input blob container URI.
:paramtype input_blob_container_uri: str
:keyword output_blob_container_uri: Required. The output blob container URI.
:paramtype output_blob_container_uri: str
:keyword input_blob_name: The blob name to be used when importing from the provided input blob
container.
:paramtype input_blob_name: str
:keyword output_blob_name: The blob name to use for storing the status of the import job.
:paramtype output_blob_name: str
:keyword authentication_type: Specifies authentication type being used for connecting to the
storage account. Possible values include: "keyBased", "identityBased".
:paramtype authentication_type: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.AuthenticationType
:keyword identity: Managed identity properties of storage endpoint for import devices.
:paramtype identity: ~azure.mgmt.iothub.v2021_03_03_preview.models.ManagedIdentity
"""
super(ImportDevicesRequest, self).__init__(**kwargs)
self.input_blob_container_uri = input_blob_container_uri
self.output_blob_container_uri = output_blob_container_uri
self.input_blob_name = input_blob_name
self.output_blob_name = output_blob_name
self.authentication_type = authentication_type
self.identity = identity
class IotHubCapacity(msrest.serialization.Model):
"""IoT Hub capacity information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar minimum: The minimum number of units.
:vartype minimum: long
:ivar maximum: The maximum number of units.
:vartype maximum: long
:ivar default: The default number of units.
:vartype default: long
:ivar scale_type: The type of the scaling enabled. Possible values include: "Automatic",
"Manual", "None".
:vartype scale_type: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubScaleType
"""
_validation = {
'minimum': {'readonly': True, 'maximum': 1, 'minimum': 1},
'maximum': {'readonly': True},
'default': {'readonly': True},
'scale_type': {'readonly': True},
}
_attribute_map = {
'minimum': {'key': 'minimum', 'type': 'long'},
'maximum': {'key': 'maximum', 'type': 'long'},
'default': {'key': 'default', 'type': 'long'},
'scale_type': {'key': 'scaleType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(IotHubCapacity, self).__init__(**kwargs)
self.minimum = None
self.maximum = None
self.default = None
self.scale_type = None
class Resource(msrest.serialization.Model):
"""The common properties of an Azure resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:ivar location: Required. The resource location.
:vartype location: str
:ivar tags: A set of tags. The resource tags.
:vartype tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True, 'pattern': r'^(?![0-9]+$)(?!-)[a-zA-Z0-9-]{2,49}[a-zA-Z0-9]$'},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword location: Required. The resource location.
:paramtype location: str
:keyword tags: A set of tags. The resource tags.
:paramtype tags: dict[str, str]
"""
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = location
self.tags = tags
class IotHubDescription(Resource):
"""The description of the IoT hub.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:ivar location: Required. The resource location.
:vartype location: str
:ivar tags: A set of tags. The resource tags.
:vartype tags: dict[str, str]
:ivar etag: The Etag field is *not* required. If it is provided in the response body, it must
also be provided as a header per the normal ETag convention.
:vartype etag: str
:ivar properties: IotHub properties.
:vartype properties: ~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubProperties
:ivar sku: Required. IotHub SKU info.
:vartype sku: ~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubSkuInfo
:ivar identity: The managed identities for the IotHub.
:vartype identity: ~azure.mgmt.iothub.v2021_03_03_preview.models.ArmIdentity
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True, 'pattern': r'^(?![0-9]+$)(?!-)[a-zA-Z0-9-]{2,49}[a-zA-Z0-9]$'},
'type': {'readonly': True},
'location': {'required': True},
'sku': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'IotHubProperties'},
'sku': {'key': 'sku', 'type': 'IotHubSkuInfo'},
'identity': {'key': 'identity', 'type': 'ArmIdentity'},
}
def __init__(
self,
*,
location: str,
sku: "IotHubSkuInfo",
tags: Optional[Dict[str, str]] = None,
etag: Optional[str] = None,
properties: Optional["IotHubProperties"] = None,
identity: Optional["ArmIdentity"] = None,
**kwargs
):
"""
:keyword location: Required. The resource location.
:paramtype location: str
:keyword tags: A set of tags. The resource tags.
:paramtype tags: dict[str, str]
:keyword etag: The Etag field is *not* required. If it is provided in the response body, it
must also be provided as a header per the normal ETag convention.
:paramtype etag: str
:keyword properties: IotHub properties.
:paramtype properties: ~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubProperties
:keyword sku: Required. IotHub SKU info.
:paramtype sku: ~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubSkuInfo
:keyword identity: The managed identities for the IotHub.
:paramtype identity: ~azure.mgmt.iothub.v2021_03_03_preview.models.ArmIdentity
"""
super(IotHubDescription, self).__init__(location=location, tags=tags, **kwargs)
self.etag = etag
self.properties = properties
self.sku = sku
self.identity = identity
class IotHubDescriptionListResult(msrest.serialization.Model):
"""The JSON-serialized array of IotHubDescription objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The array of IotHubDescription objects.
:vartype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubDescription]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[IotHubDescription]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["IotHubDescription"]] = None,
**kwargs
):
"""
:keyword value: The array of IotHubDescription objects.
:paramtype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubDescription]
"""
super(IotHubDescriptionListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class IotHubLocationDescription(msrest.serialization.Model):
"""Public representation of one of the locations where a resource is provisioned.
:ivar location: The name of the Azure region.
:vartype location: str
:ivar role: The role of the region, can be either primary or secondary. The primary region is
where the IoT hub is currently provisioned. The secondary region is the Azure disaster recovery
(DR) paired region and also the region where the IoT hub can failover to. Possible values
include: "primary", "secondary".
:vartype role: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubReplicaRoleType
"""
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'role': {'key': 'role', 'type': 'str'},
}
def __init__(
self,
*,
location: Optional[str] = None,
role: Optional[Union[str, "IotHubReplicaRoleType"]] = None,
**kwargs
):
"""
:keyword location: The name of the Azure region.
:paramtype location: str
:keyword role: The role of the region, can be either primary or secondary. The primary region
is where the IoT hub is currently provisioned. The secondary region is the Azure disaster
recovery (DR) paired region and also the region where the IoT hub can failover to. Possible
values include: "primary", "secondary".
:paramtype role: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubReplicaRoleType
"""
super(IotHubLocationDescription, self).__init__(**kwargs)
self.location = location
self.role = role
class IotHubNameAvailabilityInfo(msrest.serialization.Model):
"""The properties indicating whether a given IoT hub name is available.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name_available: The value which indicates whether the provided name is available.
:vartype name_available: bool
:ivar reason: The reason for unavailability. Possible values include: "Invalid",
"AlreadyExists".
:vartype reason: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubNameUnavailabilityReason
:ivar message: The detailed reason message.
:vartype message: str
"""
_validation = {
'name_available': {'readonly': True},
'reason': {'readonly': True},
}
_attribute_map = {
'name_available': {'key': 'nameAvailable', 'type': 'bool'},
'reason': {'key': 'reason', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
message: Optional[str] = None,
**kwargs
):
"""
:keyword message: The detailed reason message.
:paramtype message: str
"""
super(IotHubNameAvailabilityInfo, self).__init__(**kwargs)
self.name_available = None
self.reason = None
self.message = message
class IotHubProperties(msrest.serialization.Model):
"""The properties of an IoT hub.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar authorization_policies: The shared access policies you can use to secure a connection to
the IoT hub.
:vartype authorization_policies:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.SharedAccessSignatureAuthorizationRule]
:ivar public_network_access: Whether requests from Public Network are allowed. Possible values
include: "Enabled", "Disabled".
:vartype public_network_access: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.PublicNetworkAccess
:ivar ip_filter_rules: The IP filter rules.
:vartype ip_filter_rules: list[~azure.mgmt.iothub.v2021_03_03_preview.models.IpFilterRule]
:ivar network_rule_sets: Network Rule Set Properties of IotHub.
:vartype network_rule_sets:
~azure.mgmt.iothub.v2021_03_03_preview.models.NetworkRuleSetProperties
:ivar min_tls_version: Specifies the minimum TLS version to support for this hub. Can be set to
"1.2" to have clients that use a TLS version below 1.2 to be rejected.
:vartype min_tls_version: str
:ivar private_endpoint_connections: Private endpoint connections created on this IotHub.
:vartype private_endpoint_connections:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.PrivateEndpointConnection]
:ivar provisioning_state: The provisioning state.
:vartype provisioning_state: str
:ivar state: The hub state.
:vartype state: str
:ivar host_name: The name of the host.
:vartype host_name: str
:ivar event_hub_endpoints: The Event Hub-compatible endpoint properties. The only possible keys
to this dictionary is events. This key has to be present in the dictionary while making create
or update calls for the IoT hub.
:vartype event_hub_endpoints: dict[str,
~azure.mgmt.iothub.v2021_03_03_preview.models.EventHubProperties]
:ivar routing: The routing related properties of the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging.
:vartype routing: ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingProperties
:ivar storage_endpoints: The list of Azure Storage endpoints where you can upload files.
Currently you can configure only one Azure Storage account and that MUST have its key as
$default. Specifying more than one storage account causes an error to be thrown. Not specifying
a value for this property when the enableFileUploadNotifications property is set to True,
causes an error to be thrown.
:vartype storage_endpoints: dict[str,
~azure.mgmt.iothub.v2021_03_03_preview.models.StorageEndpointProperties]
:ivar messaging_endpoints: The messaging endpoint properties for the file upload notification
queue.
:vartype messaging_endpoints: dict[str,
~azure.mgmt.iothub.v2021_03_03_preview.models.MessagingEndpointProperties]
:ivar enable_file_upload_notifications: If True, file upload notifications are enabled.
:vartype enable_file_upload_notifications: bool
:ivar cloud_to_device: The IoT hub cloud-to-device messaging properties.
:vartype cloud_to_device: ~azure.mgmt.iothub.v2021_03_03_preview.models.CloudToDeviceProperties
:ivar comments: IoT hub comments.
:vartype comments: str
:ivar device_streams: The device streams properties of iothub.
:vartype device_streams:
~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubPropertiesDeviceStreams
:ivar features: The capabilities and features enabled for the IoT hub. Possible values include:
"None", "DeviceManagement".
:vartype features: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.Capabilities
:ivar encryption: The encryption properties for the IoT hub.
:vartype encryption:
~azure.mgmt.iothub.v2021_03_03_preview.models.EncryptionPropertiesDescription
:ivar locations: Primary and secondary location for iot hub.
:vartype locations:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubLocationDescription]
"""
_validation = {
'provisioning_state': {'readonly': True},
'state': {'readonly': True},
'host_name': {'readonly': True},
'locations': {'readonly': True},
}
_attribute_map = {
'authorization_policies': {'key': 'authorizationPolicies', 'type': '[SharedAccessSignatureAuthorizationRule]'},
'public_network_access': {'key': 'publicNetworkAccess', 'type': 'str'},
'ip_filter_rules': {'key': 'ipFilterRules', 'type': '[IpFilterRule]'},
'network_rule_sets': {'key': 'networkRuleSets', 'type': 'NetworkRuleSetProperties'},
'min_tls_version': {'key': 'minTlsVersion', 'type': 'str'},
'private_endpoint_connections': {'key': 'privateEndpointConnections', 'type': '[PrivateEndpointConnection]'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'host_name': {'key': 'hostName', 'type': 'str'},
'event_hub_endpoints': {'key': 'eventHubEndpoints', 'type': '{EventHubProperties}'},
'routing': {'key': 'routing', 'type': 'RoutingProperties'},
'storage_endpoints': {'key': 'storageEndpoints', 'type': '{StorageEndpointProperties}'},
'messaging_endpoints': {'key': 'messagingEndpoints', 'type': '{MessagingEndpointProperties}'},
'enable_file_upload_notifications': {'key': 'enableFileUploadNotifications', 'type': 'bool'},
'cloud_to_device': {'key': 'cloudToDevice', 'type': 'CloudToDeviceProperties'},
'comments': {'key': 'comments', 'type': 'str'},
'device_streams': {'key': 'deviceStreams', 'type': 'IotHubPropertiesDeviceStreams'},
'features': {'key': 'features', 'type': 'str'},
'encryption': {'key': 'encryption', 'type': 'EncryptionPropertiesDescription'},
'locations': {'key': 'locations', 'type': '[IotHubLocationDescription]'},
}
def __init__(
self,
*,
authorization_policies: Optional[List["SharedAccessSignatureAuthorizationRule"]] = None,
public_network_access: Optional[Union[str, "PublicNetworkAccess"]] = None,
ip_filter_rules: Optional[List["IpFilterRule"]] = None,
network_rule_sets: Optional["NetworkRuleSetProperties"] = None,
min_tls_version: Optional[str] = None,
private_endpoint_connections: Optional[List["PrivateEndpointConnection"]] = None,
event_hub_endpoints: Optional[Dict[str, "EventHubProperties"]] = None,
routing: Optional["RoutingProperties"] = None,
storage_endpoints: Optional[Dict[str, "StorageEndpointProperties"]] = None,
messaging_endpoints: Optional[Dict[str, "MessagingEndpointProperties"]] = None,
enable_file_upload_notifications: Optional[bool] = None,
cloud_to_device: Optional["CloudToDeviceProperties"] = None,
comments: Optional[str] = None,
device_streams: Optional["IotHubPropertiesDeviceStreams"] = None,
features: Optional[Union[str, "Capabilities"]] = None,
encryption: Optional["EncryptionPropertiesDescription"] = None,
**kwargs
):
"""
:keyword authorization_policies: The shared access policies you can use to secure a connection
to the IoT hub.
:paramtype authorization_policies:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.SharedAccessSignatureAuthorizationRule]
:keyword public_network_access: Whether requests from Public Network are allowed. Possible
values include: "Enabled", "Disabled".
:paramtype public_network_access: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.PublicNetworkAccess
:keyword ip_filter_rules: The IP filter rules.
:paramtype ip_filter_rules: list[~azure.mgmt.iothub.v2021_03_03_preview.models.IpFilterRule]
:keyword network_rule_sets: Network Rule Set Properties of IotHub.
:paramtype network_rule_sets:
~azure.mgmt.iothub.v2021_03_03_preview.models.NetworkRuleSetProperties
:keyword min_tls_version: Specifies the minimum TLS version to support for this hub. Can be set
to "1.2" to have clients that use a TLS version below 1.2 to be rejected.
:paramtype min_tls_version: str
:keyword private_endpoint_connections: Private endpoint connections created on this IotHub.
:paramtype private_endpoint_connections:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.PrivateEndpointConnection]
:keyword event_hub_endpoints: The Event Hub-compatible endpoint properties. The only possible
keys to this dictionary is events. This key has to be present in the dictionary while making
create or update calls for the IoT hub.
:paramtype event_hub_endpoints: dict[str,
~azure.mgmt.iothub.v2021_03_03_preview.models.EventHubProperties]
:keyword routing: The routing related properties of the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging.
:paramtype routing: ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingProperties
:keyword storage_endpoints: The list of Azure Storage endpoints where you can upload files.
Currently you can configure only one Azure Storage account and that MUST have its key as
$default. Specifying more than one storage account causes an error to be thrown. Not specifying
a value for this property when the enableFileUploadNotifications property is set to True,
causes an error to be thrown.
:paramtype storage_endpoints: dict[str,
~azure.mgmt.iothub.v2021_03_03_preview.models.StorageEndpointProperties]
:keyword messaging_endpoints: The messaging endpoint properties for the file upload
notification queue.
:paramtype messaging_endpoints: dict[str,
~azure.mgmt.iothub.v2021_03_03_preview.models.MessagingEndpointProperties]
:keyword enable_file_upload_notifications: If True, file upload notifications are enabled.
:paramtype enable_file_upload_notifications: bool
:keyword cloud_to_device: The IoT hub cloud-to-device messaging properties.
:paramtype cloud_to_device:
~azure.mgmt.iothub.v2021_03_03_preview.models.CloudToDeviceProperties
:keyword comments: IoT hub comments.
:paramtype comments: str
:keyword device_streams: The device streams properties of iothub.
:paramtype device_streams:
~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubPropertiesDeviceStreams
:keyword features: The capabilities and features enabled for the IoT hub. Possible values
include: "None", "DeviceManagement".
:paramtype features: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.Capabilities
:keyword encryption: The encryption properties for the IoT hub.
:paramtype encryption:
~azure.mgmt.iothub.v2021_03_03_preview.models.EncryptionPropertiesDescription
"""
super(IotHubProperties, self).__init__(**kwargs)
self.authorization_policies = authorization_policies
self.public_network_access = public_network_access
self.ip_filter_rules = ip_filter_rules
self.network_rule_sets = network_rule_sets
self.min_tls_version = min_tls_version
self.private_endpoint_connections = private_endpoint_connections
self.provisioning_state = None
self.state = None
self.host_name = None
self.event_hub_endpoints = event_hub_endpoints
self.routing = routing
self.storage_endpoints = storage_endpoints
self.messaging_endpoints = messaging_endpoints
self.enable_file_upload_notifications = enable_file_upload_notifications
self.cloud_to_device = cloud_to_device
self.comments = comments
self.device_streams = device_streams
self.features = features
self.encryption = encryption
self.locations = None
class IotHubPropertiesDeviceStreams(msrest.serialization.Model):
"""The device streams properties of iothub.
:ivar streaming_endpoints: List of Device Streams Endpoints.
:vartype streaming_endpoints: list[str]
"""
_attribute_map = {
'streaming_endpoints': {'key': 'streamingEndpoints', 'type': '[str]'},
}
def __init__(
self,
*,
streaming_endpoints: Optional[List[str]] = None,
**kwargs
):
"""
:keyword streaming_endpoints: List of Device Streams Endpoints.
:paramtype streaming_endpoints: list[str]
"""
super(IotHubPropertiesDeviceStreams, self).__init__(**kwargs)
self.streaming_endpoints = streaming_endpoints
class IotHubQuotaMetricInfo(msrest.serialization.Model):
"""Quota metrics properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the quota metric.
:vartype name: str
:ivar current_value: The current value for the quota metric.
:vartype current_value: long
:ivar max_value: The maximum value of the quota metric.
:vartype max_value: long
"""
_validation = {
'name': {'readonly': True},
'current_value': {'readonly': True},
'max_value': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'current_value': {'key': 'currentValue', 'type': 'long'},
'max_value': {'key': 'maxValue', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(IotHubQuotaMetricInfo, self).__init__(**kwargs)
self.name = None
self.current_value = None
self.max_value = None
class IotHubQuotaMetricInfoListResult(msrest.serialization.Model):
"""The JSON-serialized array of IotHubQuotaMetricInfo objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The array of quota metrics objects.
:vartype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubQuotaMetricInfo]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[IotHubQuotaMetricInfo]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["IotHubQuotaMetricInfo"]] = None,
**kwargs
):
"""
:keyword value: The array of quota metrics objects.
:paramtype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubQuotaMetricInfo]
"""
super(IotHubQuotaMetricInfoListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class IotHubSkuDescription(msrest.serialization.Model):
"""SKU properties.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar resource_type: The type of the resource.
:vartype resource_type: str
:ivar sku: Required. The type of the resource.
:vartype sku: ~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubSkuInfo
:ivar capacity: Required. IotHub capacity.
:vartype capacity: ~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubCapacity
"""
_validation = {
'resource_type': {'readonly': True},
'sku': {'required': True},
'capacity': {'required': True},
}
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'IotHubSkuInfo'},
'capacity': {'key': 'capacity', 'type': 'IotHubCapacity'},
}
def __init__(
self,
*,
sku: "IotHubSkuInfo",
capacity: "IotHubCapacity",
**kwargs
):
"""
:keyword sku: Required. The type of the resource.
:paramtype sku: ~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubSkuInfo
:keyword capacity: Required. IotHub capacity.
:paramtype capacity: ~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubCapacity
"""
super(IotHubSkuDescription, self).__init__(**kwargs)
self.resource_type = None
self.sku = sku
self.capacity = capacity
class IotHubSkuDescriptionListResult(msrest.serialization.Model):
"""The JSON-serialized array of IotHubSkuDescription objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The array of IotHubSkuDescription.
:vartype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubSkuDescription]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[IotHubSkuDescription]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["IotHubSkuDescription"]] = None,
**kwargs
):
"""
:keyword value: The array of IotHubSkuDescription.
:paramtype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubSkuDescription]
"""
super(IotHubSkuDescriptionListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class IotHubSkuInfo(msrest.serialization.Model):
"""Information about the SKU of the IoT hub.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar name: Required. The name of the SKU. Possible values include: "F1", "S1", "S2", "S3",
"B1", "B2", "B3".
:vartype name: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubSku
:ivar tier: The billing tier for the IoT hub. Possible values include: "Free", "Standard",
"Basic".
:vartype tier: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubSkuTier
:ivar capacity: The number of provisioned IoT Hub units. See:
https://docs.microsoft.com/azure/azure-subscription-service-limits#iot-hub-limits.
:vartype capacity: long
"""
_validation = {
'name': {'required': True},
'tier': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'capacity': {'key': 'capacity', 'type': 'long'},
}
def __init__(
self,
*,
name: Union[str, "IotHubSku"],
capacity: Optional[int] = None,
**kwargs
):
"""
:keyword name: Required. The name of the SKU. Possible values include: "F1", "S1", "S2", "S3",
"B1", "B2", "B3".
:paramtype name: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.IotHubSku
:keyword capacity: The number of provisioned IoT Hub units. See:
https://docs.microsoft.com/azure/azure-subscription-service-limits#iot-hub-limits.
:paramtype capacity: long
"""
super(IotHubSkuInfo, self).__init__(**kwargs)
self.name = name
self.tier = None
self.capacity = capacity
class IpFilterRule(msrest.serialization.Model):
"""The IP filter rules for the IoT hub.
All required parameters must be populated in order to send to Azure.
:ivar filter_name: Required. The name of the IP filter rule.
:vartype filter_name: str
:ivar action: Required. The desired action for requests captured by this rule. Possible values
include: "Accept", "Reject".
:vartype action: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.IpFilterActionType
:ivar ip_mask: Required. A string that contains the IP address range in CIDR notation for the
rule.
:vartype ip_mask: str
"""
_validation = {
'filter_name': {'required': True},
'action': {'required': True},
'ip_mask': {'required': True},
}
_attribute_map = {
'filter_name': {'key': 'filterName', 'type': 'str'},
'action': {'key': 'action', 'type': 'str'},
'ip_mask': {'key': 'ipMask', 'type': 'str'},
}
def __init__(
self,
*,
filter_name: str,
action: Union[str, "IpFilterActionType"],
ip_mask: str,
**kwargs
):
"""
:keyword filter_name: Required. The name of the IP filter rule.
:paramtype filter_name: str
:keyword action: Required. The desired action for requests captured by this rule. Possible
values include: "Accept", "Reject".
:paramtype action: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.IpFilterActionType
:keyword ip_mask: Required. A string that contains the IP address range in CIDR notation for
the rule.
:paramtype ip_mask: str
"""
super(IpFilterRule, self).__init__(**kwargs)
self.filter_name = filter_name
self.action = action
self.ip_mask = ip_mask
class JobResponse(msrest.serialization.Model):
"""The properties of the Job Response object.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar job_id: The job identifier.
:vartype job_id: str
:ivar start_time_utc: The start time of the job.
:vartype start_time_utc: ~datetime.datetime
:ivar end_time_utc: The time the job stopped processing.
:vartype end_time_utc: ~datetime.datetime
:ivar type: The type of the job. Possible values include: "unknown", "export", "import",
"backup", "readDeviceProperties", "writeDeviceProperties", "updateDeviceConfiguration",
"rebootDevice", "factoryResetDevice", "firmwareUpdate".
:vartype type: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.JobType
:ivar status: The status of the job. Possible values include: "unknown", "enqueued", "running",
"completed", "failed", "cancelled".
:vartype status: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.JobStatus
:ivar failure_reason: If status == failed, this string containing the reason for the failure.
:vartype failure_reason: str
:ivar status_message: The status message for the job.
:vartype status_message: str
:ivar parent_job_id: The job identifier of the parent job, if any.
:vartype parent_job_id: str
"""
_validation = {
'job_id': {'readonly': True},
'start_time_utc': {'readonly': True},
'end_time_utc': {'readonly': True},
'type': {'readonly': True},
'status': {'readonly': True},
'failure_reason': {'readonly': True},
'status_message': {'readonly': True},
'parent_job_id': {'readonly': True},
}
_attribute_map = {
'job_id': {'key': 'jobId', 'type': 'str'},
'start_time_utc': {'key': 'startTimeUtc', 'type': 'rfc-1123'},
'end_time_utc': {'key': 'endTimeUtc', 'type': 'rfc-1123'},
'type': {'key': 'type', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'failure_reason': {'key': 'failureReason', 'type': 'str'},
'status_message': {'key': 'statusMessage', 'type': 'str'},
'parent_job_id': {'key': 'parentJobId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(JobResponse, self).__init__(**kwargs)
self.job_id = None
self.start_time_utc = None
self.end_time_utc = None
self.type = None
self.status = None
self.failure_reason = None
self.status_message = None
self.parent_job_id = None
class JobResponseListResult(msrest.serialization.Model):
"""The JSON-serialized array of JobResponse objects with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The array of JobResponse objects.
:vartype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.JobResponse]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[JobResponse]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["JobResponse"]] = None,
**kwargs
):
"""
:keyword value: The array of JobResponse objects.
:paramtype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.JobResponse]
"""
super(JobResponseListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class KeyVaultKeyProperties(msrest.serialization.Model):
"""The properties of the KeyVault key.
:ivar key_identifier: The identifier of the key.
:vartype key_identifier: str
:ivar identity: Managed identity properties of KeyVault Key.
:vartype identity: ~azure.mgmt.iothub.v2021_03_03_preview.models.ManagedIdentity
"""
_attribute_map = {
'key_identifier': {'key': 'keyIdentifier', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ManagedIdentity'},
}
def __init__(
self,
*,
key_identifier: Optional[str] = None,
identity: Optional["ManagedIdentity"] = None,
**kwargs
):
"""
:keyword key_identifier: The identifier of the key.
:paramtype key_identifier: str
:keyword identity: Managed identity properties of KeyVault Key.
:paramtype identity: ~azure.mgmt.iothub.v2021_03_03_preview.models.ManagedIdentity
"""
super(KeyVaultKeyProperties, self).__init__(**kwargs)
self.key_identifier = key_identifier
self.identity = identity
class ManagedIdentity(msrest.serialization.Model):
"""The properties of the Managed identity.
:ivar user_assigned_identity: The user assigned identity.
:vartype user_assigned_identity: str
"""
_attribute_map = {
'user_assigned_identity': {'key': 'userAssignedIdentity', 'type': 'str'},
}
def __init__(
self,
*,
user_assigned_identity: Optional[str] = None,
**kwargs
):
"""
:keyword user_assigned_identity: The user assigned identity.
:paramtype user_assigned_identity: str
"""
super(ManagedIdentity, self).__init__(**kwargs)
self.user_assigned_identity = user_assigned_identity
class MatchedRoute(msrest.serialization.Model):
"""Routes that matched.
:ivar properties: Properties of routes that matched.
:vartype properties: ~azure.mgmt.iothub.v2021_03_03_preview.models.RouteProperties
"""
_attribute_map = {
'properties': {'key': 'properties', 'type': 'RouteProperties'},
}
def __init__(
self,
*,
properties: Optional["RouteProperties"] = None,
**kwargs
):
"""
:keyword properties: Properties of routes that matched.
:paramtype properties: ~azure.mgmt.iothub.v2021_03_03_preview.models.RouteProperties
"""
super(MatchedRoute, self).__init__(**kwargs)
self.properties = properties
class MessagingEndpointProperties(msrest.serialization.Model):
"""The properties of the messaging endpoints used by this IoT hub.
:ivar lock_duration_as_iso8601: The lock duration. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:vartype lock_duration_as_iso8601: ~datetime.timedelta
:ivar ttl_as_iso8601: The period of time for which a message is available to consume before it
is expired by the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:vartype ttl_as_iso8601: ~datetime.timedelta
:ivar max_delivery_count: The number of times the IoT hub attempts to deliver a message. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:vartype max_delivery_count: int
"""
_validation = {
'max_delivery_count': {'maximum': 100, 'minimum': 1},
}
_attribute_map = {
'lock_duration_as_iso8601': {'key': 'lockDurationAsIso8601', 'type': 'duration'},
'ttl_as_iso8601': {'key': 'ttlAsIso8601', 'type': 'duration'},
'max_delivery_count': {'key': 'maxDeliveryCount', 'type': 'int'},
}
def __init__(
self,
*,
lock_duration_as_iso8601: Optional[datetime.timedelta] = None,
ttl_as_iso8601: Optional[datetime.timedelta] = None,
max_delivery_count: Optional[int] = None,
**kwargs
):
"""
:keyword lock_duration_as_iso8601: The lock duration. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:paramtype lock_duration_as_iso8601: ~datetime.timedelta
:keyword ttl_as_iso8601: The period of time for which a message is available to consume before
it is expired by the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:paramtype ttl_as_iso8601: ~datetime.timedelta
:keyword max_delivery_count: The number of times the IoT hub attempts to deliver a message.
See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload.
:paramtype max_delivery_count: int
"""
super(MessagingEndpointProperties, self).__init__(**kwargs)
self.lock_duration_as_iso8601 = lock_duration_as_iso8601
self.ttl_as_iso8601 = ttl_as_iso8601
self.max_delivery_count = max_delivery_count
class Name(msrest.serialization.Model):
"""Name of Iot Hub type.
:ivar value: IotHub type.
:vartype value: str
:ivar localized_value: Localized value of name.
:vartype localized_value: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[str] = None,
localized_value: Optional[str] = None,
**kwargs
):
"""
:keyword value: IotHub type.
:paramtype value: str
:keyword localized_value: Localized value of name.
:paramtype localized_value: str
"""
super(Name, self).__init__(**kwargs)
self.value = value
self.localized_value = localized_value
class NetworkRuleSetIpRule(msrest.serialization.Model):
"""IP Rule to be applied as part of Network Rule Set.
All required parameters must be populated in order to send to Azure.
:ivar filter_name: Required. Name of the IP filter rule.
:vartype filter_name: str
:ivar action: IP Filter Action. Possible values include: "Allow". Default value: "Allow".
:vartype action: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.NetworkRuleIPAction
:ivar ip_mask: Required. A string that contains the IP address range in CIDR notation for the
rule.
:vartype ip_mask: str
"""
_validation = {
'filter_name': {'required': True},
'ip_mask': {'required': True},
}
_attribute_map = {
'filter_name': {'key': 'filterName', 'type': 'str'},
'action': {'key': 'action', 'type': 'str'},
'ip_mask': {'key': 'ipMask', 'type': 'str'},
}
def __init__(
self,
*,
filter_name: str,
ip_mask: str,
action: Optional[Union[str, "NetworkRuleIPAction"]] = "Allow",
**kwargs
):
"""
:keyword filter_name: Required. Name of the IP filter rule.
:paramtype filter_name: str
:keyword action: IP Filter Action. Possible values include: "Allow". Default value: "Allow".
:paramtype action: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.NetworkRuleIPAction
:keyword ip_mask: Required. A string that contains the IP address range in CIDR notation for
the rule.
:paramtype ip_mask: str
"""
super(NetworkRuleSetIpRule, self).__init__(**kwargs)
self.filter_name = filter_name
self.action = action
self.ip_mask = ip_mask
class NetworkRuleSetProperties(msrest.serialization.Model):
"""Network Rule Set Properties of IotHub.
All required parameters must be populated in order to send to Azure.
:ivar default_action: Default Action for Network Rule Set. Possible values include: "Deny",
"Allow". Default value: "Deny".
:vartype default_action: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.DefaultAction
:ivar apply_to_built_in_event_hub_endpoint: Required. If True, then Network Rule Set is also
applied to BuiltIn EventHub EndPoint of IotHub.
:vartype apply_to_built_in_event_hub_endpoint: bool
:ivar ip_rules: Required. List of IP Rules.
:vartype ip_rules: list[~azure.mgmt.iothub.v2021_03_03_preview.models.NetworkRuleSetIpRule]
"""
_validation = {
'apply_to_built_in_event_hub_endpoint': {'required': True},
'ip_rules': {'required': True},
}
_attribute_map = {
'default_action': {'key': 'defaultAction', 'type': 'str'},
'apply_to_built_in_event_hub_endpoint': {'key': 'applyToBuiltInEventHubEndpoint', 'type': 'bool'},
'ip_rules': {'key': 'ipRules', 'type': '[NetworkRuleSetIpRule]'},
}
def __init__(
self,
*,
apply_to_built_in_event_hub_endpoint: bool,
ip_rules: List["NetworkRuleSetIpRule"],
default_action: Optional[Union[str, "DefaultAction"]] = "Deny",
**kwargs
):
"""
:keyword default_action: Default Action for Network Rule Set. Possible values include: "Deny",
"Allow". Default value: "Deny".
:paramtype default_action: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.DefaultAction
:keyword apply_to_built_in_event_hub_endpoint: Required. If True, then Network Rule Set is also
applied to BuiltIn EventHub EndPoint of IotHub.
:paramtype apply_to_built_in_event_hub_endpoint: bool
:keyword ip_rules: Required. List of IP Rules.
:paramtype ip_rules: list[~azure.mgmt.iothub.v2021_03_03_preview.models.NetworkRuleSetIpRule]
"""
super(NetworkRuleSetProperties, self).__init__(**kwargs)
self.default_action = default_action
self.apply_to_built_in_event_hub_endpoint = apply_to_built_in_event_hub_endpoint
self.ip_rules = ip_rules
class Operation(msrest.serialization.Model):
"""IoT Hub REST API operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Operation name: {provider}/{resource}/{read | write | action | delete}.
:vartype name: str
:ivar display: The object that represents the operation.
:vartype display: ~azure.mgmt.iothub.v2021_03_03_preview.models.OperationDisplay
"""
_validation = {
'name': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
}
def __init__(
self,
*,
display: Optional["OperationDisplay"] = None,
**kwargs
):
"""
:keyword display: The object that represents the operation.
:paramtype display: ~azure.mgmt.iothub.v2021_03_03_preview.models.OperationDisplay
"""
super(Operation, self).__init__(**kwargs)
self.name = None
self.display = display
class OperationDisplay(msrest.serialization.Model):
"""The object that represents the operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provider: Service provider: Microsoft Devices.
:vartype provider: str
:ivar resource: Resource Type: IotHubs.
:vartype resource: str
:ivar operation: Name of the operation.
:vartype operation: str
:ivar description: Description of the operation.
:vartype description: str
"""
_validation = {
'provider': {'readonly': True},
'resource': {'readonly': True},
'operation': {'readonly': True},
'description': {'readonly': True},
}
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(OperationDisplay, self).__init__(**kwargs)
self.provider = None
self.resource = None
self.operation = None
self.description = None
class OperationInputs(msrest.serialization.Model):
"""Input values.
All required parameters must be populated in order to send to Azure.
:ivar name: Required. The name of the IoT hub to check.
:vartype name: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
**kwargs
):
"""
:keyword name: Required. The name of the IoT hub to check.
:paramtype name: str
"""
super(OperationInputs, self).__init__(**kwargs)
self.name = name
class OperationListResult(msrest.serialization.Model):
"""Result of the request to list IoT Hub operations. It contains a list of operations and a URL link to get the next set of results.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of IoT Hub operations supported by the Microsoft.Devices resource provider.
:vartype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.Operation]
:ivar next_link: URL to get the next set of operation list results if there are any.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(OperationListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class PrivateEndpoint(msrest.serialization.Model):
"""The private endpoint property of a private endpoint connection.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource identifier.
:vartype id: str
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(PrivateEndpoint, self).__init__(**kwargs)
self.id = None
class PrivateEndpointConnection(msrest.serialization.Model):
"""The private endpoint connection of an IotHub.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The resource identifier.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:ivar properties: Required. The properties of a private endpoint connection.
:vartype properties:
~azure.mgmt.iothub.v2021_03_03_preview.models.PrivateEndpointConnectionProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True, 'pattern': r'^(?![0-9]+$)(?!-)[a-zA-Z0-9-]{2,49}[a-zA-Z0-9]$'},
'type': {'readonly': True},
'properties': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'PrivateEndpointConnectionProperties'},
}
def __init__(
self,
*,
properties: "PrivateEndpointConnectionProperties",
**kwargs
):
"""
:keyword properties: Required. The properties of a private endpoint connection.
:paramtype properties:
~azure.mgmt.iothub.v2021_03_03_preview.models.PrivateEndpointConnectionProperties
"""
super(PrivateEndpointConnection, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.properties = properties
class PrivateEndpointConnectionProperties(msrest.serialization.Model):
"""The properties of a private endpoint connection.
All required parameters must be populated in order to send to Azure.
:ivar private_endpoint: The private endpoint property of a private endpoint connection.
:vartype private_endpoint: ~azure.mgmt.iothub.v2021_03_03_preview.models.PrivateEndpoint
:ivar private_link_service_connection_state: Required. The current state of a private endpoint
connection.
:vartype private_link_service_connection_state:
~azure.mgmt.iothub.v2021_03_03_preview.models.PrivateLinkServiceConnectionState
"""
_validation = {
'private_link_service_connection_state': {'required': True},
}
_attribute_map = {
'private_endpoint': {'key': 'privateEndpoint', 'type': 'PrivateEndpoint'},
'private_link_service_connection_state': {'key': 'privateLinkServiceConnectionState', 'type': 'PrivateLinkServiceConnectionState'},
}
def __init__(
self,
*,
private_link_service_connection_state: "PrivateLinkServiceConnectionState",
private_endpoint: Optional["PrivateEndpoint"] = None,
**kwargs
):
"""
:keyword private_endpoint: The private endpoint property of a private endpoint connection.
:paramtype private_endpoint: ~azure.mgmt.iothub.v2021_03_03_preview.models.PrivateEndpoint
:keyword private_link_service_connection_state: Required. The current state of a private
endpoint connection.
:paramtype private_link_service_connection_state:
~azure.mgmt.iothub.v2021_03_03_preview.models.PrivateLinkServiceConnectionState
"""
super(PrivateEndpointConnectionProperties, self).__init__(**kwargs)
self.private_endpoint = private_endpoint
self.private_link_service_connection_state = private_link_service_connection_state
class PrivateLinkResources(msrest.serialization.Model):
"""The available private link resources for an IotHub.
:ivar value: The list of available private link resources for an IotHub.
:vartype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.GroupIdInformation]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[GroupIdInformation]'},
}
def __init__(
self,
*,
value: Optional[List["GroupIdInformation"]] = None,
**kwargs
):
"""
:keyword value: The list of available private link resources for an IotHub.
:paramtype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.GroupIdInformation]
"""
super(PrivateLinkResources, self).__init__(**kwargs)
self.value = value
class PrivateLinkServiceConnectionState(msrest.serialization.Model):
"""The current state of a private endpoint connection.
All required parameters must be populated in order to send to Azure.
:ivar status: Required. The status of a private endpoint connection. Possible values include:
"Pending", "Approved", "Rejected", "Disconnected".
:vartype status: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.PrivateLinkServiceConnectionStatus
:ivar description: Required. The description for the current state of a private endpoint
connection.
:vartype description: str
:ivar actions_required: Actions required for a private endpoint connection.
:vartype actions_required: str
"""
_validation = {
'status': {'required': True},
'description': {'required': True},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'actions_required': {'key': 'actionsRequired', 'type': 'str'},
}
def __init__(
self,
*,
status: Union[str, "PrivateLinkServiceConnectionStatus"],
description: str,
actions_required: Optional[str] = None,
**kwargs
):
"""
:keyword status: Required. The status of a private endpoint connection. Possible values
include: "Pending", "Approved", "Rejected", "Disconnected".
:paramtype status: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.PrivateLinkServiceConnectionStatus
:keyword description: Required. The description for the current state of a private endpoint
connection.
:paramtype description: str
:keyword actions_required: Actions required for a private endpoint connection.
:paramtype actions_required: str
"""
super(PrivateLinkServiceConnectionState, self).__init__(**kwargs)
self.status = status
self.description = description
self.actions_required = actions_required
class RegistryStatistics(msrest.serialization.Model):
"""Identity registry statistics.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar total_device_count: The total count of devices in the identity registry.
:vartype total_device_count: long
:ivar enabled_device_count: The count of enabled devices in the identity registry.
:vartype enabled_device_count: long
:ivar disabled_device_count: The count of disabled devices in the identity registry.
:vartype disabled_device_count: long
"""
_validation = {
'total_device_count': {'readonly': True},
'enabled_device_count': {'readonly': True},
'disabled_device_count': {'readonly': True},
}
_attribute_map = {
'total_device_count': {'key': 'totalDeviceCount', 'type': 'long'},
'enabled_device_count': {'key': 'enabledDeviceCount', 'type': 'long'},
'disabled_device_count': {'key': 'disabledDeviceCount', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(RegistryStatistics, self).__init__(**kwargs)
self.total_device_count = None
self.enabled_device_count = None
self.disabled_device_count = None
class RouteCompilationError(msrest.serialization.Model):
"""Compilation error when evaluating route.
:ivar message: Route error message.
:vartype message: str
:ivar severity: Severity of the route error. Possible values include: "error", "warning".
:vartype severity: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.RouteErrorSeverity
:ivar location: Location where the route error happened.
:vartype location: ~azure.mgmt.iothub.v2021_03_03_preview.models.RouteErrorRange
"""
_attribute_map = {
'message': {'key': 'message', 'type': 'str'},
'severity': {'key': 'severity', 'type': 'str'},
'location': {'key': 'location', 'type': 'RouteErrorRange'},
}
def __init__(
self,
*,
message: Optional[str] = None,
severity: Optional[Union[str, "RouteErrorSeverity"]] = None,
location: Optional["RouteErrorRange"] = None,
**kwargs
):
"""
:keyword message: Route error message.
:paramtype message: str
:keyword severity: Severity of the route error. Possible values include: "error", "warning".
:paramtype severity: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.RouteErrorSeverity
:keyword location: Location where the route error happened.
:paramtype location: ~azure.mgmt.iothub.v2021_03_03_preview.models.RouteErrorRange
"""
super(RouteCompilationError, self).__init__(**kwargs)
self.message = message
self.severity = severity
self.location = location
class RouteErrorPosition(msrest.serialization.Model):
"""Position where the route error happened.
:ivar line: Line where the route error happened.
:vartype line: int
:ivar column: Column where the route error happened.
:vartype column: int
"""
_attribute_map = {
'line': {'key': 'line', 'type': 'int'},
'column': {'key': 'column', 'type': 'int'},
}
def __init__(
self,
*,
line: Optional[int] = None,
column: Optional[int] = None,
**kwargs
):
"""
:keyword line: Line where the route error happened.
:paramtype line: int
:keyword column: Column where the route error happened.
:paramtype column: int
"""
super(RouteErrorPosition, self).__init__(**kwargs)
self.line = line
self.column = column
class RouteErrorRange(msrest.serialization.Model):
"""Range of route errors.
:ivar start: Start where the route error happened.
:vartype start: ~azure.mgmt.iothub.v2021_03_03_preview.models.RouteErrorPosition
:ivar end: End where the route error happened.
:vartype end: ~azure.mgmt.iothub.v2021_03_03_preview.models.RouteErrorPosition
"""
_attribute_map = {
'start': {'key': 'start', 'type': 'RouteErrorPosition'},
'end': {'key': 'end', 'type': 'RouteErrorPosition'},
}
def __init__(
self,
*,
start: Optional["RouteErrorPosition"] = None,
end: Optional["RouteErrorPosition"] = None,
**kwargs
):
"""
:keyword start: Start where the route error happened.
:paramtype start: ~azure.mgmt.iothub.v2021_03_03_preview.models.RouteErrorPosition
:keyword end: End where the route error happened.
:paramtype end: ~azure.mgmt.iothub.v2021_03_03_preview.models.RouteErrorPosition
"""
super(RouteErrorRange, self).__init__(**kwargs)
self.start = start
self.end = end
class RouteProperties(msrest.serialization.Model):
"""The properties of a routing rule that your IoT hub uses to route messages to endpoints.
All required parameters must be populated in order to send to Azure.
:ivar name: Required. The name of the route. The name can only include alphanumeric characters,
periods, underscores, hyphens, has a maximum length of 64 characters, and must be unique.
:vartype name: str
:ivar source: Required. The source that the routing rule is to be applied to, such as
DeviceMessages. Possible values include: "Invalid", "DeviceMessages", "TwinChangeEvents",
"DeviceLifecycleEvents", "DeviceJobLifecycleEvents", "DigitalTwinChangeEvents",
"DeviceConnectionStateEvents".
:vartype source: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingSource
:ivar condition: The condition that is evaluated to apply the routing rule. If no condition is
provided, it evaluates to true by default. For grammar, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language.
:vartype condition: str
:ivar endpoint_names: Required. The list of endpoints to which messages that satisfy the
condition are routed. Currently only one endpoint is allowed.
:vartype endpoint_names: list[str]
:ivar is_enabled: Required. Used to specify whether a route is enabled.
:vartype is_enabled: bool
"""
_validation = {
'name': {'required': True, 'pattern': r'^[A-Za-z0-9-._]{1,64}$'},
'source': {'required': True},
'endpoint_names': {'required': True, 'max_items': 1, 'min_items': 1},
'is_enabled': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'source': {'key': 'source', 'type': 'str'},
'condition': {'key': 'condition', 'type': 'str'},
'endpoint_names': {'key': 'endpointNames', 'type': '[str]'},
'is_enabled': {'key': 'isEnabled', 'type': 'bool'},
}
def __init__(
self,
*,
name: str,
source: Union[str, "RoutingSource"],
endpoint_names: List[str],
is_enabled: bool,
condition: Optional[str] = None,
**kwargs
):
"""
:keyword name: Required. The name of the route. The name can only include alphanumeric
characters, periods, underscores, hyphens, has a maximum length of 64 characters, and must be
unique.
:paramtype name: str
:keyword source: Required. The source that the routing rule is to be applied to, such as
DeviceMessages. Possible values include: "Invalid", "DeviceMessages", "TwinChangeEvents",
"DeviceLifecycleEvents", "DeviceJobLifecycleEvents", "DigitalTwinChangeEvents",
"DeviceConnectionStateEvents".
:paramtype source: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingSource
:keyword condition: The condition that is evaluated to apply the routing rule. If no condition
is provided, it evaluates to true by default. For grammar, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language.
:paramtype condition: str
:keyword endpoint_names: Required. The list of endpoints to which messages that satisfy the
condition are routed. Currently only one endpoint is allowed.
:paramtype endpoint_names: list[str]
:keyword is_enabled: Required. Used to specify whether a route is enabled.
:paramtype is_enabled: bool
"""
super(RouteProperties, self).__init__(**kwargs)
self.name = name
self.source = source
self.condition = condition
self.endpoint_names = endpoint_names
self.is_enabled = is_enabled
class RoutingEndpoints(msrest.serialization.Model):
"""The properties related to the custom endpoints to which your IoT hub routes messages based on the routing rules. A maximum of 10 custom endpoints are allowed across all endpoint types for paid hubs and only 1 custom endpoint is allowed across all endpoint types for free hubs.
:ivar service_bus_queues: The list of Service Bus queue endpoints that IoT hub routes the
messages to, based on the routing rules.
:vartype service_bus_queues:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingServiceBusQueueEndpointProperties]
:ivar service_bus_topics: The list of Service Bus topic endpoints that the IoT hub routes the
messages to, based on the routing rules.
:vartype service_bus_topics:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingServiceBusTopicEndpointProperties]
:ivar event_hubs: The list of Event Hubs endpoints that IoT hub routes messages to, based on
the routing rules. This list does not include the built-in Event Hubs endpoint.
:vartype event_hubs:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingEventHubProperties]
:ivar storage_containers: The list of storage container endpoints that IoT hub routes messages
to, based on the routing rules.
:vartype storage_containers:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingStorageContainerProperties]
"""
_attribute_map = {
'service_bus_queues': {'key': 'serviceBusQueues', 'type': '[RoutingServiceBusQueueEndpointProperties]'},
'service_bus_topics': {'key': 'serviceBusTopics', 'type': '[RoutingServiceBusTopicEndpointProperties]'},
'event_hubs': {'key': 'eventHubs', 'type': '[RoutingEventHubProperties]'},
'storage_containers': {'key': 'storageContainers', 'type': '[RoutingStorageContainerProperties]'},
}
def __init__(
self,
*,
service_bus_queues: Optional[List["RoutingServiceBusQueueEndpointProperties"]] = None,
service_bus_topics: Optional[List["RoutingServiceBusTopicEndpointProperties"]] = None,
event_hubs: Optional[List["RoutingEventHubProperties"]] = None,
storage_containers: Optional[List["RoutingStorageContainerProperties"]] = None,
**kwargs
):
"""
:keyword service_bus_queues: The list of Service Bus queue endpoints that IoT hub routes the
messages to, based on the routing rules.
:paramtype service_bus_queues:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingServiceBusQueueEndpointProperties]
:keyword service_bus_topics: The list of Service Bus topic endpoints that the IoT hub routes
the messages to, based on the routing rules.
:paramtype service_bus_topics:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingServiceBusTopicEndpointProperties]
:keyword event_hubs: The list of Event Hubs endpoints that IoT hub routes messages to, based on
the routing rules. This list does not include the built-in Event Hubs endpoint.
:paramtype event_hubs:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingEventHubProperties]
:keyword storage_containers: The list of storage container endpoints that IoT hub routes
messages to, based on the routing rules.
:paramtype storage_containers:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingStorageContainerProperties]
"""
super(RoutingEndpoints, self).__init__(**kwargs)
self.service_bus_queues = service_bus_queues
self.service_bus_topics = service_bus_topics
self.event_hubs = event_hubs
self.storage_containers = storage_containers
class RoutingEventHubProperties(msrest.serialization.Model):
"""The properties related to an event hub endpoint.
All required parameters must be populated in order to send to Azure.
:ivar id: Id of the event hub endpoint.
:vartype id: str
:ivar connection_string: The connection string of the event hub endpoint.
:vartype connection_string: str
:ivar endpoint_uri: The url of the event hub endpoint. It must include the protocol sb://.
:vartype endpoint_uri: str
:ivar entity_path: Event hub name on the event hub namespace.
:vartype entity_path: str
:ivar authentication_type: Method used to authenticate against the event hub endpoint. Possible
values include: "keyBased", "identityBased".
:vartype authentication_type: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.AuthenticationType
:ivar identity: Managed identity properties of routing event hub endpoint.
:vartype identity: ~azure.mgmt.iothub.v2021_03_03_preview.models.ManagedIdentity
:ivar name: Required. The name that identifies this endpoint. The name can only include
alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64
characters. The following names are reserved: events, fileNotifications, $default. Endpoint
names must be unique across endpoint types.
:vartype name: str
:ivar subscription_id: The subscription identifier of the event hub endpoint.
:vartype subscription_id: str
:ivar resource_group: The name of the resource group of the event hub endpoint.
:vartype resource_group: str
"""
_validation = {
'name': {'required': True, 'pattern': r'^[A-Za-z0-9-._]{1,64}$'},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'connection_string': {'key': 'connectionString', 'type': 'str'},
'endpoint_uri': {'key': 'endpointUri', 'type': 'str'},
'entity_path': {'key': 'entityPath', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ManagedIdentity'},
'name': {'key': 'name', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'resource_group': {'key': 'resourceGroup', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
id: Optional[str] = None,
connection_string: Optional[str] = None,
endpoint_uri: Optional[str] = None,
entity_path: Optional[str] = None,
authentication_type: Optional[Union[str, "AuthenticationType"]] = None,
identity: Optional["ManagedIdentity"] = None,
subscription_id: Optional[str] = None,
resource_group: Optional[str] = None,
**kwargs
):
"""
:keyword id: Id of the event hub endpoint.
:paramtype id: str
:keyword connection_string: The connection string of the event hub endpoint.
:paramtype connection_string: str
:keyword endpoint_uri: The url of the event hub endpoint. It must include the protocol sb://.
:paramtype endpoint_uri: str
:keyword entity_path: Event hub name on the event hub namespace.
:paramtype entity_path: str
:keyword authentication_type: Method used to authenticate against the event hub endpoint.
Possible values include: "keyBased", "identityBased".
:paramtype authentication_type: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.AuthenticationType
:keyword identity: Managed identity properties of routing event hub endpoint.
:paramtype identity: ~azure.mgmt.iothub.v2021_03_03_preview.models.ManagedIdentity
:keyword name: Required. The name that identifies this endpoint. The name can only include
alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64
characters. The following names are reserved: events, fileNotifications, $default. Endpoint
names must be unique across endpoint types.
:paramtype name: str
:keyword subscription_id: The subscription identifier of the event hub endpoint.
:paramtype subscription_id: str
:keyword resource_group: The name of the resource group of the event hub endpoint.
:paramtype resource_group: str
"""
super(RoutingEventHubProperties, self).__init__(**kwargs)
self.id = id
self.connection_string = connection_string
self.endpoint_uri = endpoint_uri
self.entity_path = entity_path
self.authentication_type = authentication_type
self.identity = identity
self.name = name
self.subscription_id = subscription_id
self.resource_group = resource_group
class RoutingMessage(msrest.serialization.Model):
"""Routing message.
:ivar body: Body of routing message.
:vartype body: str
:ivar app_properties: App properties.
:vartype app_properties: dict[str, str]
:ivar system_properties: System properties.
:vartype system_properties: dict[str, str]
"""
_attribute_map = {
'body': {'key': 'body', 'type': 'str'},
'app_properties': {'key': 'appProperties', 'type': '{str}'},
'system_properties': {'key': 'systemProperties', 'type': '{str}'},
}
def __init__(
self,
*,
body: Optional[str] = None,
app_properties: Optional[Dict[str, str]] = None,
system_properties: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword body: Body of routing message.
:paramtype body: str
:keyword app_properties: App properties.
:paramtype app_properties: dict[str, str]
:keyword system_properties: System properties.
:paramtype system_properties: dict[str, str]
"""
super(RoutingMessage, self).__init__(**kwargs)
self.body = body
self.app_properties = app_properties
self.system_properties = system_properties
class RoutingProperties(msrest.serialization.Model):
"""The routing related properties of the IoT hub. See: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging.
:ivar endpoints: The properties related to the custom endpoints to which your IoT hub routes
messages based on the routing rules. A maximum of 10 custom endpoints are allowed across all
endpoint types for paid hubs and only 1 custom endpoint is allowed across all endpoint types
for free hubs.
:vartype endpoints: ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingEndpoints
:ivar routes: The list of user-provided routing rules that the IoT hub uses to route messages
to built-in and custom endpoints. A maximum of 100 routing rules are allowed for paid hubs and
a maximum of 5 routing rules are allowed for free hubs.
:vartype routes: list[~azure.mgmt.iothub.v2021_03_03_preview.models.RouteProperties]
:ivar fallback_route: The properties of the route that is used as a fall-back route when none
of the conditions specified in the 'routes' section are met. This is an optional parameter.
When this property is not set, the messages which do not meet any of the conditions specified
in the 'routes' section get routed to the built-in eventhub endpoint.
:vartype fallback_route: ~azure.mgmt.iothub.v2021_03_03_preview.models.FallbackRouteProperties
:ivar enrichments: The list of user-provided enrichments that the IoT hub applies to messages
to be delivered to built-in and custom endpoints. See: https://aka.ms/telemetryoneventgrid.
:vartype enrichments: list[~azure.mgmt.iothub.v2021_03_03_preview.models.EnrichmentProperties]
"""
_attribute_map = {
'endpoints': {'key': 'endpoints', 'type': 'RoutingEndpoints'},
'routes': {'key': 'routes', 'type': '[RouteProperties]'},
'fallback_route': {'key': 'fallbackRoute', 'type': 'FallbackRouteProperties'},
'enrichments': {'key': 'enrichments', 'type': '[EnrichmentProperties]'},
}
def __init__(
self,
*,
endpoints: Optional["RoutingEndpoints"] = None,
routes: Optional[List["RouteProperties"]] = None,
fallback_route: Optional["FallbackRouteProperties"] = None,
enrichments: Optional[List["EnrichmentProperties"]] = None,
**kwargs
):
"""
:keyword endpoints: The properties related to the custom endpoints to which your IoT hub routes
messages based on the routing rules. A maximum of 10 custom endpoints are allowed across all
endpoint types for paid hubs and only 1 custom endpoint is allowed across all endpoint types
for free hubs.
:paramtype endpoints: ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingEndpoints
:keyword routes: The list of user-provided routing rules that the IoT hub uses to route
messages to built-in and custom endpoints. A maximum of 100 routing rules are allowed for paid
hubs and a maximum of 5 routing rules are allowed for free hubs.
:paramtype routes: list[~azure.mgmt.iothub.v2021_03_03_preview.models.RouteProperties]
:keyword fallback_route: The properties of the route that is used as a fall-back route when
none of the conditions specified in the 'routes' section are met. This is an optional
parameter. When this property is not set, the messages which do not meet any of the conditions
specified in the 'routes' section get routed to the built-in eventhub endpoint.
:paramtype fallback_route:
~azure.mgmt.iothub.v2021_03_03_preview.models.FallbackRouteProperties
:keyword enrichments: The list of user-provided enrichments that the IoT hub applies to
messages to be delivered to built-in and custom endpoints. See:
https://aka.ms/telemetryoneventgrid.
:paramtype enrichments:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.EnrichmentProperties]
"""
super(RoutingProperties, self).__init__(**kwargs)
self.endpoints = endpoints
self.routes = routes
self.fallback_route = fallback_route
self.enrichments = enrichments
class RoutingServiceBusQueueEndpointProperties(msrest.serialization.Model):
"""The properties related to service bus queue endpoint types.
All required parameters must be populated in order to send to Azure.
:ivar id: Id of the service bus queue endpoint.
:vartype id: str
:ivar connection_string: The connection string of the service bus queue endpoint.
:vartype connection_string: str
:ivar endpoint_uri: The url of the service bus queue endpoint. It must include the protocol
sb://.
:vartype endpoint_uri: str
:ivar entity_path: Queue name on the service bus namespace.
:vartype entity_path: str
:ivar authentication_type: Method used to authenticate against the service bus queue endpoint.
Possible values include: "keyBased", "identityBased".
:vartype authentication_type: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.AuthenticationType
:ivar identity: Managed identity properties of routing service bus queue endpoint.
:vartype identity: ~azure.mgmt.iothub.v2021_03_03_preview.models.ManagedIdentity
:ivar name: Required. The name that identifies this endpoint. The name can only include
alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64
characters. The following names are reserved: events, fileNotifications, $default. Endpoint
names must be unique across endpoint types. The name need not be the same as the actual queue
name.
:vartype name: str
:ivar subscription_id: The subscription identifier of the service bus queue endpoint.
:vartype subscription_id: str
:ivar resource_group: The name of the resource group of the service bus queue endpoint.
:vartype resource_group: str
"""
_validation = {
'name': {'required': True, 'pattern': r'^[A-Za-z0-9-._]{1,64}$'},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'connection_string': {'key': 'connectionString', 'type': 'str'},
'endpoint_uri': {'key': 'endpointUri', 'type': 'str'},
'entity_path': {'key': 'entityPath', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ManagedIdentity'},
'name': {'key': 'name', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'resource_group': {'key': 'resourceGroup', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
id: Optional[str] = None,
connection_string: Optional[str] = None,
endpoint_uri: Optional[str] = None,
entity_path: Optional[str] = None,
authentication_type: Optional[Union[str, "AuthenticationType"]] = None,
identity: Optional["ManagedIdentity"] = None,
subscription_id: Optional[str] = None,
resource_group: Optional[str] = None,
**kwargs
):
"""
:keyword id: Id of the service bus queue endpoint.
:paramtype id: str
:keyword connection_string: The connection string of the service bus queue endpoint.
:paramtype connection_string: str
:keyword endpoint_uri: The url of the service bus queue endpoint. It must include the protocol
sb://.
:paramtype endpoint_uri: str
:keyword entity_path: Queue name on the service bus namespace.
:paramtype entity_path: str
:keyword authentication_type: Method used to authenticate against the service bus queue
endpoint. Possible values include: "keyBased", "identityBased".
:paramtype authentication_type: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.AuthenticationType
:keyword identity: Managed identity properties of routing service bus queue endpoint.
:paramtype identity: ~azure.mgmt.iothub.v2021_03_03_preview.models.ManagedIdentity
:keyword name: Required. The name that identifies this endpoint. The name can only include
alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64
characters. The following names are reserved: events, fileNotifications, $default. Endpoint
names must be unique across endpoint types. The name need not be the same as the actual queue
name.
:paramtype name: str
:keyword subscription_id: The subscription identifier of the service bus queue endpoint.
:paramtype subscription_id: str
:keyword resource_group: The name of the resource group of the service bus queue endpoint.
:paramtype resource_group: str
"""
super(RoutingServiceBusQueueEndpointProperties, self).__init__(**kwargs)
self.id = id
self.connection_string = connection_string
self.endpoint_uri = endpoint_uri
self.entity_path = entity_path
self.authentication_type = authentication_type
self.identity = identity
self.name = name
self.subscription_id = subscription_id
self.resource_group = resource_group
class RoutingServiceBusTopicEndpointProperties(msrest.serialization.Model):
"""The properties related to service bus topic endpoint types.
All required parameters must be populated in order to send to Azure.
:ivar id: Id of the service bus topic endpoint.
:vartype id: str
:ivar connection_string: The connection string of the service bus topic endpoint.
:vartype connection_string: str
:ivar endpoint_uri: The url of the service bus topic endpoint. It must include the protocol
sb://.
:vartype endpoint_uri: str
:ivar entity_path: Queue name on the service bus topic.
:vartype entity_path: str
:ivar authentication_type: Method used to authenticate against the service bus topic endpoint.
Possible values include: "keyBased", "identityBased".
:vartype authentication_type: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.AuthenticationType
:ivar identity: Managed identity properties of routing service bus topic endpoint.
:vartype identity: ~azure.mgmt.iothub.v2021_03_03_preview.models.ManagedIdentity
:ivar name: Required. The name that identifies this endpoint. The name can only include
alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64
characters. The following names are reserved: events, fileNotifications, $default. Endpoint
names must be unique across endpoint types. The name need not be the same as the actual topic
name.
:vartype name: str
:ivar subscription_id: The subscription identifier of the service bus topic endpoint.
:vartype subscription_id: str
:ivar resource_group: The name of the resource group of the service bus topic endpoint.
:vartype resource_group: str
"""
_validation = {
'name': {'required': True, 'pattern': r'^[A-Za-z0-9-._]{1,64}$'},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'connection_string': {'key': 'connectionString', 'type': 'str'},
'endpoint_uri': {'key': 'endpointUri', 'type': 'str'},
'entity_path': {'key': 'entityPath', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ManagedIdentity'},
'name': {'key': 'name', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'resource_group': {'key': 'resourceGroup', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
id: Optional[str] = None,
connection_string: Optional[str] = None,
endpoint_uri: Optional[str] = None,
entity_path: Optional[str] = None,
authentication_type: Optional[Union[str, "AuthenticationType"]] = None,
identity: Optional["ManagedIdentity"] = None,
subscription_id: Optional[str] = None,
resource_group: Optional[str] = None,
**kwargs
):
"""
:keyword id: Id of the service bus topic endpoint.
:paramtype id: str
:keyword connection_string: The connection string of the service bus topic endpoint.
:paramtype connection_string: str
:keyword endpoint_uri: The url of the service bus topic endpoint. It must include the protocol
sb://.
:paramtype endpoint_uri: str
:keyword entity_path: Queue name on the service bus topic.
:paramtype entity_path: str
:keyword authentication_type: Method used to authenticate against the service bus topic
endpoint. Possible values include: "keyBased", "identityBased".
:paramtype authentication_type: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.AuthenticationType
:keyword identity: Managed identity properties of routing service bus topic endpoint.
:paramtype identity: ~azure.mgmt.iothub.v2021_03_03_preview.models.ManagedIdentity
:keyword name: Required. The name that identifies this endpoint. The name can only include
alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64
characters. The following names are reserved: events, fileNotifications, $default. Endpoint
names must be unique across endpoint types. The name need not be the same as the actual topic
name.
:paramtype name: str
:keyword subscription_id: The subscription identifier of the service bus topic endpoint.
:paramtype subscription_id: str
:keyword resource_group: The name of the resource group of the service bus topic endpoint.
:paramtype resource_group: str
"""
super(RoutingServiceBusTopicEndpointProperties, self).__init__(**kwargs)
self.id = id
self.connection_string = connection_string
self.endpoint_uri = endpoint_uri
self.entity_path = entity_path
self.authentication_type = authentication_type
self.identity = identity
self.name = name
self.subscription_id = subscription_id
self.resource_group = resource_group
class RoutingStorageContainerProperties(msrest.serialization.Model):
"""The properties related to a storage container endpoint.
All required parameters must be populated in order to send to Azure.
:ivar id: Id of the storage container endpoint.
:vartype id: str
:ivar connection_string: The connection string of the storage account.
:vartype connection_string: str
:ivar endpoint_uri: The url of the storage endpoint. It must include the protocol https://.
:vartype endpoint_uri: str
:ivar authentication_type: Method used to authenticate against the storage endpoint. Possible
values include: "keyBased", "identityBased".
:vartype authentication_type: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.AuthenticationType
:ivar identity: Managed identity properties of routing storage endpoint.
:vartype identity: ~azure.mgmt.iothub.v2021_03_03_preview.models.ManagedIdentity
:ivar name: Required. The name that identifies this endpoint. The name can only include
alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64
characters. The following names are reserved: events, fileNotifications, $default. Endpoint
names must be unique across endpoint types.
:vartype name: str
:ivar subscription_id: The subscription identifier of the storage account.
:vartype subscription_id: str
:ivar resource_group: The name of the resource group of the storage account.
:vartype resource_group: str
:ivar container_name: Required. The name of storage container in the storage account.
:vartype container_name: str
:ivar file_name_format: File name format for the blob. Default format is
{iothub}/{partition}/{YYYY}/{MM}/{DD}/{HH}/{mm}. All parameters are mandatory but can be
reordered.
:vartype file_name_format: str
:ivar batch_frequency_in_seconds: Time interval at which blobs are written to storage. Value
should be between 60 and 720 seconds. Default value is 300 seconds.
:vartype batch_frequency_in_seconds: int
:ivar max_chunk_size_in_bytes: Maximum number of bytes for each blob written to storage. Value
should be between 10485760(10MB) and 524288000(500MB). Default value is 314572800(300MB).
:vartype max_chunk_size_in_bytes: int
:ivar encoding: Encoding that is used to serialize messages to blobs. Supported values are
'avro', 'avrodeflate', and 'JSON'. Default value is 'avro'. Possible values include: "Avro",
"AvroDeflate", "JSON".
:vartype encoding: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingStorageContainerPropertiesEncoding
"""
_validation = {
'name': {'required': True, 'pattern': r'^[A-Za-z0-9-._]{1,64}$'},
'container_name': {'required': True},
'batch_frequency_in_seconds': {'maximum': 720, 'minimum': 60},
'max_chunk_size_in_bytes': {'maximum': 524288000, 'minimum': 10485760},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'connection_string': {'key': 'connectionString', 'type': 'str'},
'endpoint_uri': {'key': 'endpointUri', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ManagedIdentity'},
'name': {'key': 'name', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'resource_group': {'key': 'resourceGroup', 'type': 'str'},
'container_name': {'key': 'containerName', 'type': 'str'},
'file_name_format': {'key': 'fileNameFormat', 'type': 'str'},
'batch_frequency_in_seconds': {'key': 'batchFrequencyInSeconds', 'type': 'int'},
'max_chunk_size_in_bytes': {'key': 'maxChunkSizeInBytes', 'type': 'int'},
'encoding': {'key': 'encoding', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
container_name: str,
id: Optional[str] = None,
connection_string: Optional[str] = None,
endpoint_uri: Optional[str] = None,
authentication_type: Optional[Union[str, "AuthenticationType"]] = None,
identity: Optional["ManagedIdentity"] = None,
subscription_id: Optional[str] = None,
resource_group: Optional[str] = None,
file_name_format: Optional[str] = None,
batch_frequency_in_seconds: Optional[int] = None,
max_chunk_size_in_bytes: Optional[int] = None,
encoding: Optional[Union[str, "RoutingStorageContainerPropertiesEncoding"]] = None,
**kwargs
):
"""
:keyword id: Id of the storage container endpoint.
:paramtype id: str
:keyword connection_string: The connection string of the storage account.
:paramtype connection_string: str
:keyword endpoint_uri: The url of the storage endpoint. It must include the protocol https://.
:paramtype endpoint_uri: str
:keyword authentication_type: Method used to authenticate against the storage endpoint.
Possible values include: "keyBased", "identityBased".
:paramtype authentication_type: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.AuthenticationType
:keyword identity: Managed identity properties of routing storage endpoint.
:paramtype identity: ~azure.mgmt.iothub.v2021_03_03_preview.models.ManagedIdentity
:keyword name: Required. The name that identifies this endpoint. The name can only include
alphanumeric characters, periods, underscores, hyphens and has a maximum length of 64
characters. The following names are reserved: events, fileNotifications, $default. Endpoint
names must be unique across endpoint types.
:paramtype name: str
:keyword subscription_id: The subscription identifier of the storage account.
:paramtype subscription_id: str
:keyword resource_group: The name of the resource group of the storage account.
:paramtype resource_group: str
:keyword container_name: Required. The name of storage container in the storage account.
:paramtype container_name: str
:keyword file_name_format: File name format for the blob. Default format is
{iothub}/{partition}/{YYYY}/{MM}/{DD}/{HH}/{mm}. All parameters are mandatory but can be
reordered.
:paramtype file_name_format: str
:keyword batch_frequency_in_seconds: Time interval at which blobs are written to storage. Value
should be between 60 and 720 seconds. Default value is 300 seconds.
:paramtype batch_frequency_in_seconds: int
:keyword max_chunk_size_in_bytes: Maximum number of bytes for each blob written to storage.
Value should be between 10485760(10MB) and 524288000(500MB). Default value is 314572800(300MB).
:paramtype max_chunk_size_in_bytes: int
:keyword encoding: Encoding that is used to serialize messages to blobs. Supported values are
'avro', 'avrodeflate', and 'JSON'. Default value is 'avro'. Possible values include: "Avro",
"AvroDeflate", "JSON".
:paramtype encoding: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingStorageContainerPropertiesEncoding
"""
super(RoutingStorageContainerProperties, self).__init__(**kwargs)
self.id = id
self.connection_string = connection_string
self.endpoint_uri = endpoint_uri
self.authentication_type = authentication_type
self.identity = identity
self.name = name
self.subscription_id = subscription_id
self.resource_group = resource_group
self.container_name = container_name
self.file_name_format = file_name_format
self.batch_frequency_in_seconds = batch_frequency_in_seconds
self.max_chunk_size_in_bytes = max_chunk_size_in_bytes
self.encoding = encoding
class RoutingTwin(msrest.serialization.Model):
"""Twin reference input parameter. This is an optional parameter.
:ivar tags: A set of tags. Twin Tags.
:vartype tags: any
:ivar properties:
:vartype properties: ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingTwinProperties
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': 'object'},
'properties': {'key': 'properties', 'type': 'RoutingTwinProperties'},
}
def __init__(
self,
*,
tags: Optional[Any] = None,
properties: Optional["RoutingTwinProperties"] = None,
**kwargs
):
"""
:keyword tags: A set of tags. Twin Tags.
:paramtype tags: any
:keyword properties:
:paramtype properties: ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingTwinProperties
"""
super(RoutingTwin, self).__init__(**kwargs)
self.tags = tags
self.properties = properties
class RoutingTwinProperties(msrest.serialization.Model):
"""RoutingTwinProperties.
:ivar desired: Twin desired properties.
:vartype desired: any
:ivar reported: Twin desired properties.
:vartype reported: any
"""
_attribute_map = {
'desired': {'key': 'desired', 'type': 'object'},
'reported': {'key': 'reported', 'type': 'object'},
}
def __init__(
self,
*,
desired: Optional[Any] = None,
reported: Optional[Any] = None,
**kwargs
):
"""
:keyword desired: Twin desired properties.
:paramtype desired: any
:keyword reported: Twin desired properties.
:paramtype reported: any
"""
super(RoutingTwinProperties, self).__init__(**kwargs)
self.desired = desired
self.reported = reported
class SharedAccessSignatureAuthorizationRule(msrest.serialization.Model):
"""The properties of an IoT hub shared access policy.
All required parameters must be populated in order to send to Azure.
:ivar key_name: Required. The name of the shared access policy.
:vartype key_name: str
:ivar primary_key: The primary key.
:vartype primary_key: str
:ivar secondary_key: The secondary key.
:vartype secondary_key: str
:ivar rights: Required. The permissions assigned to the shared access policy. Possible values
include: "RegistryRead", "RegistryWrite", "ServiceConnect", "DeviceConnect", "RegistryRead,
RegistryWrite", "RegistryRead, ServiceConnect", "RegistryRead, DeviceConnect", "RegistryWrite,
ServiceConnect", "RegistryWrite, DeviceConnect", "ServiceConnect, DeviceConnect",
"RegistryRead, RegistryWrite, ServiceConnect", "RegistryRead, RegistryWrite, DeviceConnect",
"RegistryRead, ServiceConnect, DeviceConnect", "RegistryWrite, ServiceConnect, DeviceConnect",
"RegistryRead, RegistryWrite, ServiceConnect, DeviceConnect".
:vartype rights: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.AccessRights
"""
_validation = {
'key_name': {'required': True},
'rights': {'required': True},
}
_attribute_map = {
'key_name': {'key': 'keyName', 'type': 'str'},
'primary_key': {'key': 'primaryKey', 'type': 'str'},
'secondary_key': {'key': 'secondaryKey', 'type': 'str'},
'rights': {'key': 'rights', 'type': 'str'},
}
def __init__(
self,
*,
key_name: str,
rights: Union[str, "AccessRights"],
primary_key: Optional[str] = None,
secondary_key: Optional[str] = None,
**kwargs
):
"""
:keyword key_name: Required. The name of the shared access policy.
:paramtype key_name: str
:keyword primary_key: The primary key.
:paramtype primary_key: str
:keyword secondary_key: The secondary key.
:paramtype secondary_key: str
:keyword rights: Required. The permissions assigned to the shared access policy. Possible
values include: "RegistryRead", "RegistryWrite", "ServiceConnect", "DeviceConnect",
"RegistryRead, RegistryWrite", "RegistryRead, ServiceConnect", "RegistryRead, DeviceConnect",
"RegistryWrite, ServiceConnect", "RegistryWrite, DeviceConnect", "ServiceConnect,
DeviceConnect", "RegistryRead, RegistryWrite, ServiceConnect", "RegistryRead, RegistryWrite,
DeviceConnect", "RegistryRead, ServiceConnect, DeviceConnect", "RegistryWrite, ServiceConnect,
DeviceConnect", "RegistryRead, RegistryWrite, ServiceConnect, DeviceConnect".
:paramtype rights: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.AccessRights
"""
super(SharedAccessSignatureAuthorizationRule, self).__init__(**kwargs)
self.key_name = key_name
self.primary_key = primary_key
self.secondary_key = secondary_key
self.rights = rights
class SharedAccessSignatureAuthorizationRuleListResult(msrest.serialization.Model):
"""The list of shared access policies with a next link.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of shared access policies.
:vartype value:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.SharedAccessSignatureAuthorizationRule]
:ivar next_link: The next link.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SharedAccessSignatureAuthorizationRule]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["SharedAccessSignatureAuthorizationRule"]] = None,
**kwargs
):
"""
:keyword value: The list of shared access policies.
:paramtype value:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.SharedAccessSignatureAuthorizationRule]
"""
super(SharedAccessSignatureAuthorizationRuleListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class StorageEndpointProperties(msrest.serialization.Model):
"""The properties of the Azure Storage endpoint for file upload.
All required parameters must be populated in order to send to Azure.
:ivar sas_ttl_as_iso8601: The period of time for which the SAS URI generated by IoT Hub for
file upload is valid. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload#file-upload-notification-configuration-options.
:vartype sas_ttl_as_iso8601: ~datetime.timedelta
:ivar connection_string: Required. The connection string for the Azure Storage account to which
files are uploaded.
:vartype connection_string: str
:ivar container_name: Required. The name of the root container where you upload files. The
container need not exist but should be creatable using the connectionString specified.
:vartype container_name: str
:ivar authentication_type: Specifies authentication type being used for connecting to the
storage account. Possible values include: "keyBased", "identityBased".
:vartype authentication_type: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.AuthenticationType
:ivar identity: Managed identity properties of storage endpoint for file upload.
:vartype identity: ~azure.mgmt.iothub.v2021_03_03_preview.models.ManagedIdentity
"""
_validation = {
'connection_string': {'required': True},
'container_name': {'required': True},
}
_attribute_map = {
'sas_ttl_as_iso8601': {'key': 'sasTtlAsIso8601', 'type': 'duration'},
'connection_string': {'key': 'connectionString', 'type': 'str'},
'container_name': {'key': 'containerName', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ManagedIdentity'},
}
def __init__(
self,
*,
connection_string: str,
container_name: str,
sas_ttl_as_iso8601: Optional[datetime.timedelta] = None,
authentication_type: Optional[Union[str, "AuthenticationType"]] = None,
identity: Optional["ManagedIdentity"] = None,
**kwargs
):
"""
:keyword sas_ttl_as_iso8601: The period of time for which the SAS URI generated by IoT Hub for
file upload is valid. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-file-upload#file-upload-notification-configuration-options.
:paramtype sas_ttl_as_iso8601: ~datetime.timedelta
:keyword connection_string: Required. The connection string for the Azure Storage account to
which files are uploaded.
:paramtype connection_string: str
:keyword container_name: Required. The name of the root container where you upload files. The
container need not exist but should be creatable using the connectionString specified.
:paramtype container_name: str
:keyword authentication_type: Specifies authentication type being used for connecting to the
storage account. Possible values include: "keyBased", "identityBased".
:paramtype authentication_type: str or
~azure.mgmt.iothub.v2021_03_03_preview.models.AuthenticationType
:keyword identity: Managed identity properties of storage endpoint for file upload.
:paramtype identity: ~azure.mgmt.iothub.v2021_03_03_preview.models.ManagedIdentity
"""
super(StorageEndpointProperties, self).__init__(**kwargs)
self.sas_ttl_as_iso8601 = sas_ttl_as_iso8601
self.connection_string = connection_string
self.container_name = container_name
self.authentication_type = authentication_type
self.identity = identity
class TagsResource(msrest.serialization.Model):
"""A container holding only the Tags for a resource, allowing the user to update the tags on an IoT Hub instance.
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
"""
super(TagsResource, self).__init__(**kwargs)
self.tags = tags
class TestAllRoutesInput(msrest.serialization.Model):
"""Input for testing all routes.
:ivar routing_source: Routing source. Possible values include: "Invalid", "DeviceMessages",
"TwinChangeEvents", "DeviceLifecycleEvents", "DeviceJobLifecycleEvents",
"DigitalTwinChangeEvents", "DeviceConnectionStateEvents".
:vartype routing_source: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingSource
:ivar message: Routing message.
:vartype message: ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingMessage
:ivar twin: Routing Twin Reference.
:vartype twin: ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingTwin
"""
_attribute_map = {
'routing_source': {'key': 'routingSource', 'type': 'str'},
'message': {'key': 'message', 'type': 'RoutingMessage'},
'twin': {'key': 'twin', 'type': 'RoutingTwin'},
}
def __init__(
self,
*,
routing_source: Optional[Union[str, "RoutingSource"]] = None,
message: Optional["RoutingMessage"] = None,
twin: Optional["RoutingTwin"] = None,
**kwargs
):
"""
:keyword routing_source: Routing source. Possible values include: "Invalid", "DeviceMessages",
"TwinChangeEvents", "DeviceLifecycleEvents", "DeviceJobLifecycleEvents",
"DigitalTwinChangeEvents", "DeviceConnectionStateEvents".
:paramtype routing_source: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingSource
:keyword message: Routing message.
:paramtype message: ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingMessage
:keyword twin: Routing Twin Reference.
:paramtype twin: ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingTwin
"""
super(TestAllRoutesInput, self).__init__(**kwargs)
self.routing_source = routing_source
self.message = message
self.twin = twin
class TestAllRoutesResult(msrest.serialization.Model):
"""Result of testing all routes.
:ivar routes: JSON-serialized array of matched routes.
:vartype routes: list[~azure.mgmt.iothub.v2021_03_03_preview.models.MatchedRoute]
"""
_attribute_map = {
'routes': {'key': 'routes', 'type': '[MatchedRoute]'},
}
def __init__(
self,
*,
routes: Optional[List["MatchedRoute"]] = None,
**kwargs
):
"""
:keyword routes: JSON-serialized array of matched routes.
:paramtype routes: list[~azure.mgmt.iothub.v2021_03_03_preview.models.MatchedRoute]
"""
super(TestAllRoutesResult, self).__init__(**kwargs)
self.routes = routes
class TestRouteInput(msrest.serialization.Model):
"""Input for testing route.
All required parameters must be populated in order to send to Azure.
:ivar message: Routing message.
:vartype message: ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingMessage
:ivar route: Required. Route properties.
:vartype route: ~azure.mgmt.iothub.v2021_03_03_preview.models.RouteProperties
:ivar twin: Routing Twin Reference.
:vartype twin: ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingTwin
"""
_validation = {
'route': {'required': True},
}
_attribute_map = {
'message': {'key': 'message', 'type': 'RoutingMessage'},
'route': {'key': 'route', 'type': 'RouteProperties'},
'twin': {'key': 'twin', 'type': 'RoutingTwin'},
}
def __init__(
self,
*,
route: "RouteProperties",
message: Optional["RoutingMessage"] = None,
twin: Optional["RoutingTwin"] = None,
**kwargs
):
"""
:keyword message: Routing message.
:paramtype message: ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingMessage
:keyword route: Required. Route properties.
:paramtype route: ~azure.mgmt.iothub.v2021_03_03_preview.models.RouteProperties
:keyword twin: Routing Twin Reference.
:paramtype twin: ~azure.mgmt.iothub.v2021_03_03_preview.models.RoutingTwin
"""
super(TestRouteInput, self).__init__(**kwargs)
self.message = message
self.route = route
self.twin = twin
class TestRouteResult(msrest.serialization.Model):
"""Result of testing one route.
:ivar result: Result of testing route. Possible values include: "undefined", "false", "true".
:vartype result: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.TestResultStatus
:ivar details: Detailed result of testing route.
:vartype details: ~azure.mgmt.iothub.v2021_03_03_preview.models.TestRouteResultDetails
"""
_attribute_map = {
'result': {'key': 'result', 'type': 'str'},
'details': {'key': 'details', 'type': 'TestRouteResultDetails'},
}
def __init__(
self,
*,
result: Optional[Union[str, "TestResultStatus"]] = None,
details: Optional["TestRouteResultDetails"] = None,
**kwargs
):
"""
:keyword result: Result of testing route. Possible values include: "undefined", "false",
"true".
:paramtype result: str or ~azure.mgmt.iothub.v2021_03_03_preview.models.TestResultStatus
:keyword details: Detailed result of testing route.
:paramtype details: ~azure.mgmt.iothub.v2021_03_03_preview.models.TestRouteResultDetails
"""
super(TestRouteResult, self).__init__(**kwargs)
self.result = result
self.details = details
class TestRouteResultDetails(msrest.serialization.Model):
"""Detailed result of testing a route.
:ivar compilation_errors: JSON-serialized list of route compilation errors.
:vartype compilation_errors:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.RouteCompilationError]
"""
_attribute_map = {
'compilation_errors': {'key': 'compilationErrors', 'type': '[RouteCompilationError]'},
}
def __init__(
self,
*,
compilation_errors: Optional[List["RouteCompilationError"]] = None,
**kwargs
):
"""
:keyword compilation_errors: JSON-serialized list of route compilation errors.
:paramtype compilation_errors:
list[~azure.mgmt.iothub.v2021_03_03_preview.models.RouteCompilationError]
"""
super(TestRouteResultDetails, self).__init__(**kwargs)
self.compilation_errors = compilation_errors
class UserSubscriptionQuota(msrest.serialization.Model):
"""User subscription quota response.
:ivar id: IotHub type id.
:vartype id: str
:ivar type: Response type.
:vartype type: str
:ivar unit: Unit of IotHub type.
:vartype unit: str
:ivar current_value: Current number of IotHub type.
:vartype current_value: int
:ivar limit: Numerical limit on IotHub type.
:vartype limit: int
:ivar name: IotHub type.
:vartype name: ~azure.mgmt.iothub.v2021_03_03_preview.models.Name
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'current_value': {'key': 'currentValue', 'type': 'int'},
'limit': {'key': 'limit', 'type': 'int'},
'name': {'key': 'name', 'type': 'Name'},
}
def __init__(
self,
*,
id: Optional[str] = None,
type: Optional[str] = None,
unit: Optional[str] = None,
current_value: Optional[int] = None,
limit: Optional[int] = None,
name: Optional["Name"] = None,
**kwargs
):
"""
:keyword id: IotHub type id.
:paramtype id: str
:keyword type: Response type.
:paramtype type: str
:keyword unit: Unit of IotHub type.
:paramtype unit: str
:keyword current_value: Current number of IotHub type.
:paramtype current_value: int
:keyword limit: Numerical limit on IotHub type.
:paramtype limit: int
:keyword name: IotHub type.
:paramtype name: ~azure.mgmt.iothub.v2021_03_03_preview.models.Name
"""
super(UserSubscriptionQuota, self).__init__(**kwargs)
self.id = id
self.type = type
self.unit = unit
self.current_value = current_value
self.limit = limit
self.name = name
class UserSubscriptionQuotaListResult(msrest.serialization.Model):
"""Json-serialized array of User subscription quota response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value:
:vartype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.UserSubscriptionQuota]
:ivar next_link:
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[UserSubscriptionQuota]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["UserSubscriptionQuota"]] = None,
**kwargs
):
"""
:keyword value:
:paramtype value: list[~azure.mgmt.iothub.v2021_03_03_preview.models.UserSubscriptionQuota]
"""
super(UserSubscriptionQuotaListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
| Azure/azure-sdk-for-python | sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2021_03_03_preview/models/_models_py3.py | Python | mit | 167,573 |
# binary tag: refers to tags that have open and close element
# eg: [a]some content[/a]
# standalone tag: refers to tags that are self contained
# eg: [b some content]
# Assuming that the text input is well formatted
# open input and output file
with open('textin.txt', 'r') as f1, open('textout.txt', 'w') as f2:
lines = f1.readlines()
# keep track of all binary tags (i.e. closeable tag)
closeables = set()
for line in lines:
if line.startswith('[/'): # closing tag
tag = line[2:].split(']')[0] #
closeables.add(tag) # add to set
# each line updates its position = prev position + directive
# and give a new directive to the next line (1 for indent)
# we start at 0, we use 3 blank spaces for indent
position = 0; indent = ' '; directive = 0
for line in lines:
if line.startswith('[/'): # we are closing
position -= 1 # since we are closing, unindent immediately
position = position + directive
directive = 0 # done
f2.write(indent*position+line)
elif line.startswith('['):
# get the tag
tag = line[1:].split(']',1)[0].split(' ',1)[0]
if tag in closeables: # tag is binary
position = position + directive
directive = 1 # indent next line
else: # tag is standalone
position = position + directive
directive = 0 # done
f2.write(indent*position+line)
else:
# we don't touch lines in between
f2.write(line)
| julio73/scratchbook | code/work/alu/indenter.py | Python | mit | 1,460 |
from django.conf.urls import patterns
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('staticblog.views',
(r'^$', 'archive'),
(r'^([\-\w]+)$', 'render_post'),
(r'^git/receive', 'handle_hook'),
)
| cgrice/django-staticblog | staticblog/urls.py | Python | mit | 294 |
# -*- coding: utf-8 -*-
"""
Copyright (c) 2014 l8orre
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
#import sys
from PyQt4 import QtGui, Qt, QtCore
from PyQt4.QtCore import QObject , pyqtSignal, pyqtSlot, SIGNAL
from PyQt4.QtCore import QObject
#import numpy as np
from os import listdir as ls
#from PyQt4.Qt import QPixmap
import os
import time
import nxtPwt
#import requests, json
class nxtWin5Control(QObject):
""" class nxtWin5Control(): here"""
def __init__(self, app): #, application
super(QObject, self, ).__init__()
import nxtPwt.ui_nxtWin5 as nxtWin5 # the QtCreator-generated Widget.py!!
ui = nxtWin5.Ui_MainWindow() # Ui_MainWindow() is the autogenerated class in m2def.py
self.ui_nxtWin5 = ui
self.app = app #
# self.userDataContainer = self.app.nxtMain.userDataContainer
self.server = ''
self.account =''
self.secretPhrase = ''
#self.app.algo.ui_nxtWin = ui # make the ui_AlgoWin known to the Algo!!! this is N( at the algo when init'ing
self.app.nxtWin5 = self # make this WinControl1 known
def init(self): #, ui_AlgoWin):
""" the AlgoWin """
# maybe this gives trouble w/ MainWIn, self.app.algo = Algo1(self.app, ui)
### re - init hte algo here!!!
ui = self.ui_nxtWin5
############################
############################
############################
########## Window Maintenance
def show(self):
self.uiFrame = QtGui.QMainWindow()
self.ui_nxtWin5.setupUi(self.uiFrame)
self.init() #self.ui_AlgoWin)
self.uiFrame.show()
| l8orre/nxtPwt | nxtPwt/nxtWin5Control1.py | Python | mit | 2,721 |
"""tictactoe URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from tictactoe import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('tictactoe/', include('tictactoe.game.urls'), name='game'),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
# serving static files like this should not be done in production
| jsonbrazeal/tictactoe | tictactoe/urls.py | Python | mit | 1,037 |
from flask import request, abort, jsonify, render_template
from flask.ext.sqlalchemy import BaseQuery
import math
class PaginatedQuery(object):
def __init__(self, query_or_model, paginate_by, page_var='page',
check_bounds=False):
self.paginate_by = paginate_by
self.page_var = page_var
self.check_bounds = check_bounds
if isinstance(query_or_model, BaseQuery):
self.query = query_or_model
else:
self.model = query_or_model
self.query = self.model.all()
def get_page(self):
curr_page = request.args.get(self.page_var)
if curr_page and curr_page.isdigit():
return max(1, int(curr_page))
return 1
def get_page_count(self):
return int(math.ceil(float(self.query.count()) / self.paginate_by))
def get_object_list(self):
if self.get_page_count() == 0:
return []
if self.check_bounds and self.get_page() > self.get_page_count():
abort(404)
return self.query.paginate(self.get_page(), self.paginate_by).items
def object_list(template_name, query, context_variable='object_list',
paginate_by=20, page_var='page', check_bounds=True, **kwargs):
paginated_query = PaginatedQuery(
query,
paginate_by,
page_var,
check_bounds)
kwargs[context_variable] = paginated_query.get_object_list()
return render_template(
template_name,
pagination=paginated_query,
page=paginated_query.get_page(),
**kwargs)
def json_object_list(query, context_variable='object_list',
paginate_by=20, page_var='page', check_bounds=True, **kwargs):
paginated_query = PaginatedQuery(
query,
paginate_by,
page_var,
check_bounds)
kwargs[context_variable] = paginated_query.get_object_list()
return jsonify(
pagination=paginated_query,
page=paginated_query.get_page(),
**kwargs)
def get_object_or_404(query, criteria):
q = query.filter(criteria)
if q.first():
return q.first()
else:
abort(404)
| pbecotte/devblog | backend/blog/utils.py | Python | mit | 2,165 |
# -*- coding: utf-8 -*-
from __future__ import print_function
import logging
import os
import re
import socket
import sys
import time
import django
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand, CommandError
from django.core.servers.basehttp import get_internal_wsgi_application
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.backends import utils
from django.db.migrations.executor import MigrationExecutor
from django.utils.autoreload import gen_filenames
from django_extensions.management.technical_response import \
null_technical_500_response
from django_extensions.management.utils import (
RedirectHandler, has_ipdb, setup_logger, signalcommand,
)
try:
if 'whitenoise.runserver_nostatic' in settings.INSTALLED_APPS:
USE_STATICFILES = False
elif 'django.contrib.staticfiles' in settings.INSTALLED_APPS:
from django.contrib.staticfiles.handlers import StaticFilesHandler
USE_STATICFILES = True
elif 'staticfiles' in settings.INSTALLED_APPS:
from staticfiles.handlers import StaticFilesHandler # noqa
USE_STATICFILES = True
else:
USE_STATICFILES = False
except ImportError:
USE_STATICFILES = False
naiveip_re = re.compile(r"""^(?:
(?P<addr>
(?P<ipv4>\d{1,3}(?:\.\d{1,3}){3}) | # IPv4 address
(?P<ipv6>\[[a-fA-F0-9:]+\]) | # IPv6 address
(?P<fqdn>[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*) # FQDN
):)?(?P<port>\d+)$""", re.X)
DEFAULT_PORT = "8000"
DEFAULT_POLLER_RELOADER_INTERVAL = getattr(settings, 'RUNSERVERPLUS_POLLER_RELOADER_INTERVAL', 1)
DEFAULT_POLLER_RELOADER_TYPE = getattr(settings, 'RUNSERVERPLUS_POLLER_RELOADER_TYPE', 'auto')
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Starts a lightweight Web server for development."
# Validation is called explicitly each time the server is reloaded.
requires_system_checks = False
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument('addrport', nargs='?',
help='Optional port number, or ipaddr:port')
parser.add_argument('--ipv6', '-6', action='store_true', dest='use_ipv6', default=False,
help='Tells Django to use a IPv6 address.')
parser.add_argument('--noreload', action='store_false', dest='use_reloader', default=True,
help='Tells Django to NOT use the auto-reloader.')
parser.add_argument('--browser', action='store_true', dest='open_browser',
help='Tells Django to open a browser.')
parser.add_argument('--nothreading', action='store_false', dest='threaded',
help='Do not run in multithreaded mode.')
parser.add_argument('--threaded', action='store_true', dest='threaded',
help='Run in multithreaded mode.')
parser.add_argument('--output', dest='output_file', default=None,
help='Specifies an output file to send a copy of all messages (not flushed immediately).')
parser.add_argument('--print-sql', action='store_true', default=False,
help="Print SQL queries as they're executed")
cert_group = parser.add_mutually_exclusive_group()
cert_group.add_argument('--cert', dest='cert_path', action="store", type=str,
help='Deprecated alias for --cert-file option.')
cert_group.add_argument('--cert-file', dest='cert_path', action="store", type=str,
help='SSL .cert file path. If not provided path from --key-file will be selected. '
'Either --cert-file or --key-file must be provided to use SSL.')
parser.add_argument('--key-file', dest='key_file_path', action="store", type=str,
help='SSL .key file path. If not provided path from --cert-file will be selected. '
'Either --cert-file or --key-file must be provided to use SSL.')
parser.add_argument('--extra-file', dest='extra_files', action="append", type=str,
help='auto-reload whenever the given file changes too (can be specified multiple times)')
parser.add_argument('--reloader-interval', dest='reloader_interval', action="store", type=int, default=DEFAULT_POLLER_RELOADER_INTERVAL,
help='After how many seconds auto-reload should scan for updates in poller-mode [default=%s]' % DEFAULT_POLLER_RELOADER_INTERVAL)
parser.add_argument('--reloader-type', dest='reloader_type', action="store", type=str, default=DEFAULT_POLLER_RELOADER_TYPE,
help='Werkzeug reloader type [options are auto, watchdog, or stat, default=%s]' % DEFAULT_POLLER_RELOADER_TYPE)
parser.add_argument('--pdb', action='store_true', dest='pdb', default=False,
help='Drop into pdb shell at the start of any view.')
parser.add_argument('--ipdb', action='store_true', dest='ipdb', default=False,
help='Drop into ipdb shell at the start of any view.')
parser.add_argument('--pm', action='store_true', dest='pm', default=False,
help='Drop into (i)pdb shell if an exception is raised in a view.')
parser.add_argument('--startup-messages', dest='startup_messages', action="store", default='reload',
help='When to show startup messages: reload [default], once, always, never.')
parser.add_argument('--keep-meta-shutdown', dest='keep_meta_shutdown_func', action='store_true', default=False,
help="Keep request.META['werkzeug.server.shutdown'] function which is automatically removed "
"because Django debug pages tries to call the function and unintentionally shuts down "
"the Werkzeug server.")
parser.add_argument("--nopin", dest="nopin", action="store_true", default=False,
help="Disable the PIN in werkzeug. USE IT WISELY!"),
if USE_STATICFILES:
parser.add_argument('--nostatic', action="store_false", dest='use_static_handler', default=True,
help='Tells Django to NOT automatically serve static files at STATIC_URL.')
parser.add_argument('--insecure', action="store_true", dest='insecure_serving', default=False,
help='Allows serving static files even if DEBUG is False.')
@signalcommand
def handle(self, *args, **options):
addrport = options.get('addrport')
startup_messages = options.get('startup_messages', 'reload')
if startup_messages == "reload":
self.show_startup_messages = os.environ.get('RUNSERVER_PLUS_SHOW_MESSAGES')
elif startup_messages == "once":
self.show_startup_messages = not os.environ.get('RUNSERVER_PLUS_SHOW_MESSAGES')
elif startup_messages == "never":
self.show_startup_messages = False
else:
self.show_startup_messages = True
os.environ['RUNSERVER_PLUS_SHOW_MESSAGES'] = '1'
# Do not use default ending='\n', because StreamHandler() takes care of it
if hasattr(self.stderr, 'ending'):
self.stderr.ending = None
setup_logger(logger, self.stderr, filename=options.get('output_file', None)) # , fmt="[%(name)s] %(message)s")
logredirect = RedirectHandler(__name__)
# Redirect werkzeug log items
werklogger = logging.getLogger('werkzeug')
werklogger.setLevel(logging.INFO)
werklogger.addHandler(logredirect)
werklogger.propagate = False
if options.get("print_sql", False):
try:
import sqlparse
except ImportError:
sqlparse = None # noqa
try:
import pygments.lexers
import pygments.formatters
except ImportError:
pygments = None
truncate = getattr(settings, 'RUNSERVER_PLUS_PRINT_SQL_TRUNCATE', 1000)
class PrintQueryWrapper(utils.CursorDebugWrapper):
def execute(self, sql, params=()):
starttime = time.time()
try:
return self.cursor.execute(sql, params)
finally:
execution_time = time.time() - starttime
raw_sql = self.db.ops.last_executed_query(self.cursor, sql, params)
if sqlparse:
raw_sql = raw_sql[:truncate]
raw_sql = sqlparse.format(raw_sql, reindent_aligned=True, truncate_strings=500)
if pygments:
raw_sql = pygments.highlight(
raw_sql,
pygments.lexers.get_lexer_by_name("sql"),
pygments.formatters.TerminalFormatter()
)
logger.info(raw_sql)
logger.info("")
logger.info('[Execution time: %.6fs] [Database: %s]' % (execution_time, self.db.alias))
logger.info("")
utils.CursorDebugWrapper = PrintQueryWrapper
pdb_option = options.get('pdb', False)
ipdb_option = options.get('ipdb', False)
pm = options.get('pm', False)
try:
from django_pdb.middleware import PdbMiddleware
except ImportError:
if pdb_option or ipdb_option or pm:
raise CommandError("django-pdb is required for --pdb, --ipdb and --pm options. Please visit https://pypi.python.org/pypi/django-pdb or install via pip. (pip install django-pdb)")
pm = False
else:
# Add pdb middleware if --pdb is specified or if in DEBUG mode
if (pdb_option or ipdb_option or settings.DEBUG):
middleware = 'django_pdb.middleware.PdbMiddleware'
settings_middleware = getattr(settings, 'MIDDLEWARE', None) or settings.MIDDLEWARE_CLASSES
if middleware not in settings_middleware:
if isinstance(settings_middleware, tuple):
settings_middleware += (middleware,)
else:
settings_middleware += [middleware]
# If --pdb is specified then always break at the start of views.
# Otherwise break only if a 'pdb' query parameter is set in the url
if pdb_option:
PdbMiddleware.always_break = 'pdb'
elif ipdb_option:
PdbMiddleware.always_break = 'ipdb'
def postmortem(request, exc_type, exc_value, tb):
if has_ipdb():
import ipdb
p = ipdb
else:
import pdb
p = pdb
print("Exception occured: %s, %s" % (exc_type, exc_value), file=sys.stderr)
p.post_mortem(tb)
# usurp django's handler
from django.views import debug
debug.technical_500_response = postmortem if pm else null_technical_500_response
self.use_ipv6 = options.get('use_ipv6')
if self.use_ipv6 and not socket.has_ipv6:
raise CommandError('Your Python does not support IPv6.')
self._raw_ipv6 = False
if not addrport:
try:
addrport = settings.RUNSERVERPLUS_SERVER_ADDRESS_PORT
except AttributeError:
pass
if not addrport:
self.addr = ''
self.port = DEFAULT_PORT
else:
m = re.match(naiveip_re, addrport)
if m is None:
raise CommandError('"%s" is not a valid port number '
'or address:port pair.' % addrport)
self.addr, _ipv4, _ipv6, _fqdn, self.port = m.groups()
if not self.port.isdigit():
raise CommandError("%r is not a valid port number." %
self.port)
if self.addr:
if _ipv6:
self.addr = self.addr[1:-1]
self.use_ipv6 = True
self._raw_ipv6 = True
elif self.use_ipv6 and not _fqdn:
raise CommandError('"%s" is not a valid IPv6 address.'
% self.addr)
if not self.addr:
self.addr = '::1' if self.use_ipv6 else '127.0.0.1'
self._raw_ipv6 = True
self.inner_run(options)
def inner_run(self, options):
try:
from werkzeug import run_simple, DebuggedApplication
from werkzeug.serving import WSGIRequestHandler as _WSGIRequestHandler
# Set colored output
if settings.DEBUG:
try:
set_werkzeug_log_color()
except Exception: # We are dealing with some internals, anything could go wrong
if self.show_startup_messages:
print("Wrapping internal werkzeug logger for color highlighting has failed!")
pass
except ImportError:
raise CommandError("Werkzeug is required to use runserver_plus. Please visit http://werkzeug.pocoo.org/ or install via pip. (pip install Werkzeug)")
class WSGIRequestHandler(_WSGIRequestHandler):
def make_environ(self):
environ = super(WSGIRequestHandler, self).make_environ()
if not options.get('keep_meta_shutdown_func'):
del environ['werkzeug.server.shutdown']
return environ
threaded = options.get('threaded', True)
use_reloader = options.get('use_reloader', True)
open_browser = options.get('open_browser', False)
quit_command = (sys.platform == 'win32') and 'CTRL-BREAK' or 'CONTROL-C'
extra_files = options.get('extra_files', None) or []
reloader_interval = options.get('reloader_interval', 1)
reloader_type = options.get('reloader_type', 'auto')
self.nopin = options.get('nopin', False)
if self.show_startup_messages:
print("Performing system checks...\n")
if hasattr(self, 'check'):
self.check(display_num_errors=self.show_startup_messages)
else:
self.validate(display_num_errors=self.show_startup_messages)
try:
self.check_migrations()
except ImproperlyConfigured:
pass
handler = get_internal_wsgi_application()
if USE_STATICFILES:
use_static_handler = options.get('use_static_handler', True)
insecure_serving = options.get('insecure_serving', False)
if use_static_handler and (settings.DEBUG or insecure_serving):
handler = StaticFilesHandler(handler)
if options.get("cert_path") or options.get("key_file_path"):
"""
OpenSSL is needed for SSL support.
This will make flakes8 throw warning since OpenSSL is not used
directly, alas, this is the only way to show meaningful error
messages. See:
http://lucumr.pocoo.org/2011/9/21/python-import-blackbox/
for more information on python imports.
"""
try:
import OpenSSL # NOQA
except ImportError:
raise CommandError("Python OpenSSL Library is "
"required to use runserver_plus with ssl support. "
"Install via pip (pip install pyOpenSSL).")
certfile, keyfile = self.determine_ssl_files_paths(options)
dir_path, root = os.path.split(certfile)
root, _ = os.path.splitext(root)
try:
from werkzeug.serving import make_ssl_devcert
if os.path.exists(certfile) and os.path.exists(keyfile):
ssl_context = (certfile, keyfile)
else: # Create cert, key files ourselves.
ssl_context = make_ssl_devcert(os.path.join(dir_path, root), host='localhost')
except ImportError:
if self.show_startup_messages:
print("Werkzeug version is less than 0.9, trying adhoc certificate.")
ssl_context = "adhoc"
else:
ssl_context = None
bind_url = "%s://%s:%s/" % (
"https" if ssl_context else "http", self.addr if not self._raw_ipv6 else '[%s]' % self.addr, self.port)
if self.show_startup_messages:
print("\nDjango version %s, using settings %r" % (django.get_version(), settings.SETTINGS_MODULE))
print("Development server is running at %s" % (bind_url,))
print("Using the Werkzeug debugger (http://werkzeug.pocoo.org/)")
print("Quit the server with %s." % quit_command)
if open_browser:
import webbrowser
webbrowser.open(bind_url)
if use_reloader and settings.USE_I18N:
extra_files.extend(filter(lambda filename: filename.endswith('.mo'), gen_filenames()))
# Werkzeug needs to be clued in its the main instance if running
# without reloader or else it won't show key.
# https://git.io/vVIgo
if not use_reloader:
os.environ['WERKZEUG_RUN_MAIN'] = 'true'
# Don't run a second instance of the debugger / reloader
# See also: https://github.com/django-extensions/django-extensions/issues/832
if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
if self.nopin:
os.environ['WERKZEUG_DEBUG_PIN'] = 'off'
handler = DebuggedApplication(handler, True)
run_simple(
self.addr,
int(self.port),
handler,
use_reloader=use_reloader,
use_debugger=True,
extra_files=extra_files,
reloader_interval=reloader_interval,
reloader_type=reloader_type,
threaded=threaded,
request_handler=WSGIRequestHandler,
ssl_context=ssl_context,
)
@classmethod
def _create_path_with_extension_from(cls, file_path, extension):
dir_path, cert_file = os.path.split(file_path)
if not dir_path:
dir_path = os.getcwd()
file_name, _ = os.path.splitext(cert_file)
return os.path.join(dir_path, file_name + "." + extension)
@classmethod
def _determine_path_for_file(cls, current_file, other_file, extension):
""" Determine path with proper extension. If path is absent then use path from alternative file.
If path is relative than use current working directory.
:param current_file: path for current file
:param other_file: path for alternative file
:param extension: expected extension
:return: path of this file.
"""
if current_file is None:
return cls._create_path_with_extension_from(other_file, extension)
directory, file = os.path.split(current_file)
file_name, _ = os.path.splitext(file)
if not directory:
return cls._create_path_with_extension_from(current_file, extension)
else:
return os.path.join(directory, file_name + "." + extension)
@classmethod
def determine_ssl_files_paths(cls, options):
cert_file = cls._determine_path_for_file(options.get('cert_path'), options.get('key_file_path'), "crt")
key_file = cls._determine_path_for_file(options.get('key_file_path'), options.get('cert_path'), "key")
return cert_file, key_file
if django.VERSION[:2] <= (1, 9):
def check_migrations(self):
"""
Checks to see if the set of migrations on disk matches the
migrations in the database. Prints a warning if they don't match.
"""
executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS])
plan = executor.migration_plan(executor.loader.graph.leaf_nodes())
if plan and self.show_startup_messages:
self.stdout.write(self.style.NOTICE("\nYou have unapplied migrations; your app may not work properly until they are applied."))
self.stdout.write(self.style.NOTICE("Run 'python manage.py migrate' to apply them.\n"))
def set_werkzeug_log_color():
"""Try to set color to the werkzeug log.
"""
from django.core.management.color import color_style
from werkzeug.serving import WSGIRequestHandler
from werkzeug._internal import _log
_style = color_style()
_orig_log = WSGIRequestHandler.log
def werk_log(self, type, message, *args):
try:
msg = '%s - - [%s] %s' % (
self.address_string(),
self.log_date_time_string(),
message % args,
)
http_code = str(args[1])
except Exception:
return _orig_log(type, message, *args)
# Utilize terminal colors, if available
if http_code[0] == '2':
# Put 2XX first, since it should be the common case
msg = _style.HTTP_SUCCESS(msg)
elif http_code[0] == '1':
msg = _style.HTTP_INFO(msg)
elif http_code == '304':
msg = _style.HTTP_NOT_MODIFIED(msg)
elif http_code[0] == '3':
msg = _style.HTTP_REDIRECT(msg)
elif http_code == '404':
msg = _style.HTTP_NOT_FOUND(msg)
elif http_code[0] == '4':
msg = _style.HTTP_BAD_REQUEST(msg)
else:
# Any 5XX, or any other response
msg = _style.HTTP_SERVER_ERROR(msg)
_log(type, msg)
WSGIRequestHandler.log = werk_log
| haakenlid/django-extensions | django_extensions/management/commands/runserver_plus.py | Python | mit | 22,214 |
import pygame, sys
from pygame.locals import *
import re
import json
import imp
import copy
#chessboard = json.load(open("./common/initial_state.json"))
chessboard1 = json.load(open("./common/initial_state.json"))
chessboard2 = json.load(open("./common/initial_state.json"))
chessboard3 = json.load(open("./common/initial_state.json"))
#created 3 chessboards for now
chessboards = [chessboard1, chessboard2, chessboard3]
chessboard = chessboards[0] #current board set to the first.
image_dir = "./res/basic_chess_pieces/"
rules = imp.load_source('chess_basic_rules','./common/rules.py')
cpu = imp.load_source('chess_minimax_ai','./ai/cpu.py')
helper = imp.load_source('helper_functions','./common/helper_functions.py')
opposite = { "white" : "black" , "black" : "white" }
def get_chess_square(x,y,size):
return [ x/size+1,y/size+1]
def get_chess_square_reverse(a,b,size):
return ((a-1)*size/8,(b-1)*size/8)
def get_chess_square_border(r, s, size):
return((r-1)*size/8+2, (s-1)*size/8+2)
pygame.init()
screen = pygame.display.set_mode((600, 600))
def draw_chessboard( board, size,p_list = None):
SIZE = size
GRAY = (150, 150, 150)
WHITE = (255, 255, 255)
BLUE = ( 0 , 0 , 150)
screen.fill(WHITE)
#filling gray square blocks of size/8 alternatively
startX = 0
startY = 0
for e in range(0, 8):
if e%2 == 0 :
startX = 0
else:
startX = SIZE/8
for e2 in range(0, 8):
pygame.draw.rect(screen, GRAY, ((startX, startY), (SIZE/8, SIZE/8)))
startX += 2* SIZE/8
startY += SIZE/8
#placing the correspoding images of the pieces on the blocks
for army in board.keys():
for k in board[army].keys():
img = pygame.image.load(image_dir + army + "_" + re.findall('[a-z]+',k)[0]+'.png')
screen.blit(img,( board[army][k][1]*SIZE/8 - SIZE/8+SIZE/80, board[army][k][0] * SIZE/8 - SIZE/8+SIZE/80 ))
#if any piece is selected and has some legal moves then display blue squares on corresponding valid move block
if p_list:
for p in p_list:
pygame.draw.rect(screen,BLUE,(get_chess_square_reverse(p[1],p[0],SIZE),(SIZE/8,SIZE/8)))
if (p[1]+p[0])%2!=0:
pygame.draw.rect(screen, WHITE, (get_chess_square_border(p[1], p[0], SIZE), (SIZE/8-4, SIZE/8-4)))
else:
pygame.draw.rect(screen, GRAY, (get_chess_square_border(p[1], p[0], SIZE), (SIZE/8-4, SIZE/8-4)))
x, y = p[1], p[0]
for x in ['white','black']:
for k in board[x].keys():
if board[x][k][1] == p[1] and board[x][k][0] == p[0]: #print k
if "bishop" in k:
img = pygame.image.load(image_dir + x + "_" + re.findall('[a-z]+',k)[0]+'.png')
screen.blit(img,( board[x][k][1]*SIZE/8 - SIZE/8+SIZE/80, board[x][k][0] * SIZE/8 - SIZE/8+SIZE/80 ))
elif "pawn" in k:
img = pygame.image.load(image_dir + x + "_" + re.findall('[a-z]+',k)[0]+'.png')
screen.blit(img,( board[x][k][1]*SIZE/8 - SIZE/8+SIZE/80, board[x][k][0] * SIZE/8 - SIZE/8+SIZE/80 ))
elif "knight" in k:
img = pygame.image.load(image_dir + x + "_" + re.findall('[a-z]+',k)[0]+'.png')
screen.blit(img,( board[x][k][1]*SIZE/8 - SIZE/8+SIZE/80, board[x][k][0] * SIZE/8 - SIZE/8+SIZE/80 ))
elif "rook" in k:
img = pygame.image.load(image_dir + x + "_" + re.findall('[a-z]+',k)[0]+'.png')
screen.blit(img,( board[x][k][1]*SIZE/8 - SIZE/8+SIZE/80, board[x][k][0] * SIZE/8 - SIZE/8+SIZE/80 ))
elif "queen" in k:
img = pygame.image.load(image_dir + x + "_" + re.findall('[a-z]+',k)[0]+'.png')
screen.blit(img,( board[x][k][1]*SIZE/8 - SIZE/8+SIZE/80, board[x][k][0] * SIZE/8 - SIZE/8+SIZE/80 ))
elif "king" in k:
img = pygame.image.load(image_dir + x + "_" + re.findall('[a-z]+',k)[0]+'.png')
screen.blit(img,( board[x][k][1]*SIZE/8 - SIZE/8+SIZE/80, board[x][k][0] * SIZE/8 - SIZE/8+SIZE/80 ))
pygame.display.update()
def looping_cpu_vs_human(board,size):
global chessboards
global flag
SIZE = size
draw_chessboard(board,size)
cur=0
old_x=0
old_y=0
new_x=0
new_y=0
color = "white"
flag= 0
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
pygame.display.update()
#checking for keyboard events
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
cur = (cur+1)%3
board = chessboards[cur]
if event.key == pygame.K_LEFT:
cur = (cur+2)%3
board = chessboards[cur]
#updating the screen with the next or prev chessboard
draw_chessboard(board,size)
if event.type == pygame.MOUSEBUTTONDOWN:
if flag == 1:
flag =0
x,y= pygame.mouse.get_pos()
new_x,new_y = get_chess_square(x,y,SIZE/8)
#print new_x,new_y
valid = False
for x in ['white','black']:
for k in board[x].keys():
if board[x][k][1] == old_x and board[x][k][0] == old_y:
if "bishop" in k:
if [new_y,new_x] in rules.legal_bishop_moves(board,x,k): valid = True
elif "pawn" in k:
if [new_y,new_x] in rules.legal_pawn_moves(board,x,k): valid = True
elif "knight" in k:
if [new_y,new_x] in rules.legal_knight_moves(board,x,k): valid = True
elif "rook" in k:
if [new_y,new_x] in rules.legal_rook_moves(board,x,k): valid = True
elif "queen" in k:
if [new_y,new_x] in rules.legal_queen_moves(board,x,k): valid = True
elif "king" in k:
if [new_y,new_x] in rules.legal_king_moves(board,x,k): valid = True
#if piece is moved to valid position then update the piece's coordinates and check if it is killing other piece
if valid and x == color:
board[x][k][1] = new_x
board[x][k][0] = new_y
killed_piece = None
for k,v in board[opposite[x]].iteritems():
if v[0] == new_y and v[1] == new_x:
killed_piece = k
if killed_piece and (killed_piece in board[opposite[x]].keys()):
del board[opposite[x]][killed_piece]
break
draw_chessboard(board,size)
#move = cpu.minimax(board,opposite[x],1) ##depth is 1
#CPU turn
move = cpu.alpha_beta_pruning(board,opposite[x],3)
#board = helper.generate_board(board,move)
#referencing the new board generated by helper first to chessboard array element
chessboards[cur] = helper.generate_board(board,move)
board = chessboards[cur]
draw_chessboard(board,size)
break #Break here is necessary since we are deleting a key from the map on which we are iterating
else:
print "here"
x,y= pygame.mouse.get_pos()
old_x,old_y = get_chess_square(x,y,SIZE/8)
p= []
for x in ['white','black']:
for k in board[x].keys():
if board[x][k][1] == old_x and board[x][k][0] == old_y: #print k
if "bishop" in k:
p= rules.legal_bishop_moves(board,x,k)
elif "pawn" in k:
p= rules.legal_pawn_moves(board,x,k)
elif "knight" in k:
p= rules.legal_knight_moves(board,x,k)
elif "rook" in k:
p= rules.legal_rook_moves(board,x,k)
elif "queen" in k:
p= rules.legal_queen_moves(board,x,k)
elif "king" in k:
p= rules.legal_king_moves( board,x,k)
draw_chessboard(board,size,p)
#print old_x,old_y
if event.type == pygame.MOUSEBUTTONUP:
print "here1"
x,y= pygame.mouse.get_pos()
new_x,new_y = get_chess_square(x,y,SIZE/8)
if new_x == old_x and new_y == old_y:
flag = 1
continue
else:
#print new_x,new_y
valid = False
for x in ['white','black']:
for k in board[x].keys():
if board[x][k][1] == old_x and board[x][k][0] == old_y:
if "bishop" in k:
if [new_y,new_x] in rules.legal_bishop_moves(board,x,k): valid = True
elif "pawn" in k:
if [new_y,new_x] in rules.legal_pawn_moves(board,x,k): valid = True
elif "knight" in k:
if [new_y,new_x] in rules.legal_knight_moves(board,x,k): valid = True
elif "rook" in k:
if [new_y,new_x] in rules.legal_rook_moves(board,x,k): valid = True
elif "queen" in k:
if [new_y,new_x] in rules.legal_queen_moves(board,x,k): valid = True
elif "king" in k:
if [new_y,new_x] in rules.legal_king_moves(board,x,k): valid = True
#if piece is moved to valid position then update the piece's coordinates and check if it is killing other piece
if valid and x == color:
board[x][k][1] = new_x
board[x][k][0] = new_y
killed_piece = None
for k,v in board[opposite[x]].iteritems():
if v[0] == new_y and v[1] == new_x:
killed_piece = k
if killed_piece and (killed_piece in board[opposite[x]].keys()):
del board[opposite[x]][killed_piece]
break
draw_chessboard(board,size) #move = cpu.minimax(board,opposite[x],1) ##depth is 1
#CPU turn
move = cpu.alpha_beta_pruning(board,opposite[x],7)
#board = helper.generate_board(board,move)
#referencing the new board generated by helper first to chessboard array element
chessboards[cur] = helper.generate_board(board,move)
board = chessboards[cur]
draw_chessboard(board,size)
break #Break here is necessary since we are deleting a key from the map on which we are iterating
def looping_cpu_vs_cpu(board,size):
global chessboards
draw_chessboard(board,size)
color = "white"
cur = 0
#print board
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
pygame.display.update()
#checking for keyboard events
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
cur = (cur+1)%3
board = chessboards[cur]
if event.key == pygame.K_LEFT:
cur = (cur+2)%3
board = chessboards[cur]
#updating the screen with the next or prev chessboard
draw_chessboard(board,size)
move = cpu.alpha_beta_pruning_python_native(board,color,1) #depth is 1
#move = cpu.alpha_beta_pruning(board,color,7)
chessboards[cur] = helper.generate_board(board,move)
board = chessboards[cur]
color = opposite[color]
draw_chessboard(board,size)
def looping_human_vs_human(board, size):
global chessboards
global flag
SIZE = size
draw_chessboard(board,size)
cur=0
old_x=0
old_y=0
new_x=0
new_y=0
color = "white"
flag = 0
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
pygame.display.update()
#checking for keyboard events
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
cur = (cur+1)%3
board = chessboards[cur]
if event.key == pygame.K_LEFT:
cur = (cur+2)%3
board = chessboards[cur]
#updating the screen with the next or prev chessboard
draw_chessboard(board,size)
if event.type == pygame.MOUSEBUTTONDOWN:
if flag == 1:
x,y= pygame.mouse.get_pos()
new_x,new_y = get_chess_square(x,y,SIZE/8)
#print new_x,new_y
valid = False
for x in [color]:
for k in board[x].keys():
if board[x][k][1] == old_x and board[x][k][0] == old_y:
if "bishop" in k:
if [new_y,new_x] in rules.legal_bishop_moves(board,x,k): valid = True
elif "pawn" in k:
if [new_y,new_x] in rules.legal_pawn_moves(board,x,k): valid = True
elif "knight" in k:
if [new_y,new_x] in rules.legal_knight_moves(board,x,k): valid = True
elif "rook" in k:
if [new_y,new_x] in rules.legal_rook_moves(board,x,k): valid = True
elif "queen" in k:
if [new_y,new_x] in rules.legal_queen_moves(board,x,k): valid = True
elif "king" in k:
if [new_y,new_x] in rules.legal_king_moves(board,x,k): valid = True
#if piece is moved to valid position then update the piece's coordinates and check if it is killing other piece
if valid and x == color:
board[x][k][1] = new_x
board[x][k][0] = new_y
killed_piece = None
for k,v in board[opposite[x]].iteritems():
if v[0] == new_y and v[1] == new_x:
killed_piece = k
if killed_piece and (killed_piece in board[opposite[x]].keys()): del board[opposite[x]][killed_piece]
draw_chessboard(board,size)
color = opposite[color]
break
flag = 0
else:
x,y= pygame.mouse.get_pos()
old_x,old_y = get_chess_square(x,y,SIZE/8)
p= []
for x in [color]:
for k in board[x].keys():
if board[x][k][1] == old_x and board[x][k][0] == old_y:
#print k
if "bishop" in k:
p= rules.legal_bishop_moves(board,x,k)
elif "pawn" in k:
#print "hey"
p= rules.legal_pawn_moves(board,x,k)
elif "knight" in k:
p= rules.legal_knight_moves(board,x,k)
elif "rook" in k:
p= rules.legal_rook_moves(board,x,k)
elif "queen" in k:
p= rules.legal_queen_moves(board,x,k)
elif "king" in k:
p= rules.legal_king_moves( board,x,k)
draw_chessboard(board,size,p)
#print old_x,old_y
if event.type == pygame.MOUSEBUTTONUP:
x,y= pygame.mouse.get_pos()
new_x,new_y = get_chess_square(x,y,SIZE/8)
if new_x == old_x and new_y == old_y:
flag = 1
continue
else:
#print new_x,new_y
valid = False
for x in [color]:
for k in board[x].keys():
if board[x][k][1] == old_x and board[x][k][0] == old_y:
if "bishop" in k:
if [new_y,new_x] in rules.legal_bishop_moves(board,x,k): valid = True
elif "pawn" in k:
if [new_y,new_x] in rules.legal_pawn_moves(board,x,k): valid = True
elif "knight" in k:
if [new_y,new_x] in rules.legal_knight_moves(board,x,k): valid = True
elif "rook" in k:
if [new_y,new_x] in rules.legal_rook_moves(board,x,k): valid = True
elif "queen" in k:
if [new_y,new_x] in rules.legal_queen_moves(board,x,k): valid = True
elif "king" in k:
if [new_y,new_x] in rules.legal_king_moves(board,x,k): valid = True
#if piece is moved to valid position then update the piece's coordinates and check if it is killing other piece
if valid and x == color:
board[x][k][1] = new_x
board[x][k][0] = new_y
killed_piece = None
for k,v in board[opposite[x]].iteritems():
if v[0] == new_y and v[1] == new_x:
killed_piece = k
if killed_piece and (killed_piece in board[opposite[x]].keys()): del board[opposite[x]][killed_piece]
draw_chessboard(board,size)
color = opposite[color]
break
##main loop ...
#looping_cpu_vs_human( chessboard,600)
#looping_cpu_vs_cpu( chessboard,600)
| OpenC-IIIT/prosfair | gui/gui_basic.py | Python | mit | 23,370 |
import chess
chess.run() | renatopp/liac-chess | main.py | Python | mit | 25 |
"""Auto-generated file, do not edit by hand. PG metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_PG = PhoneMetadata(id='PG', country_code=675, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[1-9]\\d{6,7}', possible_length=(7, 8)),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:3[0-2]\\d|4[257]\\d|5[34]\\d|64[1-9]|77(?:[0-24]\\d|30)|85[02-46-9]|9[78]\\d)\\d{4}', example_number='3123456', possible_length=(7,)),
mobile=PhoneNumberDesc(national_number_pattern='(?:20150|68\\d{2}|7(?:[0-689]\\d|75)\\d{2})\\d{3}', example_number='6812345', possible_length=(7, 8)),
toll_free=PhoneNumberDesc(national_number_pattern='180\\d{4}', example_number='1801234', possible_length=(7,)),
voip=PhoneNumberDesc(national_number_pattern='27[568]\\d{4}', example_number='2751234', possible_length=(7,)),
number_format=[NumberFormat(pattern='(\\d{3})(\\d{4})', format='\\1 \\2', leading_digits_pattern=['[13-689]|27']),
NumberFormat(pattern='(\\d{4})(\\d{4})', format='\\1 \\2', leading_digits_pattern=['20|7'])])
| samdowd/drumm-farm | drumm_env/lib/python2.7/site-packages/phonenumbers/data/region_PG.py | Python | mit | 1,122 |
# -*- coding: utf-8 -*-
from ethereum.utils import sha3, encode_hex, denoms
from raiden.utils import privatekey_to_address
from raiden.tests.utils.blockchain import GENESIS_STUB
CLUSTER_NAME = 'raiden'
def generate_accounts(seeds):
"""Create private keys and addresses for all seeds.
"""
return {
seed: dict(
privatekey=encode_hex(sha3(seed)),
address=encode_hex(privatekey_to_address(sha3(seed)))
) for seed in seeds}
def mk_genesis(accounts, initial_alloc=denoms.ether * 100000000):
"""
Create a genesis-block dict with allocation for all `accounts`.
:param accounts: list of account addresses (hex)
:param initial_alloc: the amount to allocate for the `accounts`
:return: genesis dict
"""
genesis = GENESIS_STUB.copy()
genesis['extraData'] = CLUSTER_NAME
genesis['alloc'] = {
account: {
'balance': str(initial_alloc)
}
for account in accounts
}
# add the one-privatekey account ("1" * 64) for convenience
genesis['alloc']['19e7e376e7c213b7e7e7e46cc70a5dd086daff2a'] = dict(balance=str(initial_alloc))
return genesis
| tomaaron/raiden | tools/genesis_builder.py | Python | mit | 1,166 |
import pyintersim.corelib as corelib
import ctypes
### Load Library
_core = corelib.LoadCoreLibrary()
### State wrapper class to be used in 'with' statement
class State:
"""Wrapper Class for the State"""
def __init__(self):
self.p_state = setup()
def __enter__(self):
return self.p_state
def __exit__(self, exc_type, exc_value, traceback):
delete(self.p_state)
### Setup State
_State_Setup = _core.State_Setup
_State_Setup.argtypes = None
_State_Setup.restype = ctypes.c_void_p
def setup():
return _State_Setup()
### Delete State
_State_Delete = _core.State_Delete
_State_Delete.argtypes = [ctypes.c_void_p]
_State_Delete.restype = None
def delete(p_state):
return _State_Delete(p_state) | GPMueller/intersim | core/pyintersim/state.py | Python | mit | 743 |
import importlib
def import_string(import_name: str):
"""
Import an object based on the import string.
Separate module name from the object name with ":". For example,
"linuguee_api.downloaders:HTTPXDownloader"
"""
if ":" not in import_name:
raise RuntimeError(
f'{import_name} must separate module from object with ":". '
f'For example, "linguee_api.downloaders:HTTPXDownloader"'
)
module_name, object_name = import_name.rsplit(":", 1)
mod = importlib.import_module(module_name)
return getattr(mod, object_name)
| imankulov/linguee-api | linguee_api/utils.py | Python | mit | 592 |
import numpy as np
import pandas as pd
import pickle
# Return 0 or 1 based on whether Course fulfills a General Education Requirement
def lookupGenEd(cNum, college):
fileName = "data/Dietrich Gen Eds.csv"
picklepath = "data\\dietrich_gen_eds.p"
try:
with open(picklepath,'rb') as file:
gen_eds = pickle.load(file)
except:
df = pd.read_csv(fileName,names=['Dept','Num','Title','1','2'])
gen_eds = set(df['Dept'].values)
with open(picklepath,'wb') as file:
pickle.dump(gen_eds,file)
return cNum in gen_eds
'''
genEdubility = lookupGenEd(73100, "dietrich")
print("73100")
print('Is Gen Ed?:', genEdubility)
print()
genEdubility = lookupGenEd(70100, "tepper")
print("70100")
print('Is Gen Ed?:', genEdubility)
print()
genEdubility = lookupGenEd(15322, "scs")
print("15322")
print('Is Gen Ed?:', genEdubility)
print()
''' | calvinhklui/Schedulize | GenEdLookup.py | Python | mit | 931 |
import numpy as np, time, itertools
from collections import OrderedDict
from .misc_utils import *
from . import distributions
concat = np.concatenate
import theano.tensor as T, theano
from importlib import import_module
import scipy.optimize
from .keras_theano_setup import floatX, FNOPTS
from keras.layers.core import Layer
from .filters import *
from .filtered_env import *
import random
import copy
import opensim as osim
from osim.env import *
# ================================================================
# Make agent
# ================================================================
def get_agent_cls(name):
p, m = name.rsplit('.', 1)
mod = import_module(p)
constructor = getattr(mod, m)
return constructor
# ================================================================
# Stats
# ================================================================
def add_episode_stats(stats, paths):
reward_key = "reward_raw" if "reward_raw" in paths[0] else "reward"
episoderewards = np.array([path[reward_key].sum() for path in paths])
pathlengths = np.array([pathlength(path) for path in paths])
stats["EpisodeRewards"] = episoderewards
stats["EpisodeLengths"] = pathlengths
stats["NumEpBatch"] = len(episoderewards)
stats["EpRewMean"] = episoderewards.mean()
stats["EpRewSEM"] = episoderewards.std()/np.sqrt(len(paths))
stats["EpRewMax"] = episoderewards.max()
stats["EpRewMin"] = episoderewards.min()
stats["EpLenMean"] = pathlengths.mean()
stats["EpLenMax"] = pathlengths.max()
stats["EpLenMin"] = pathlengths.min()
stats["RewPerStep"] = episoderewards.sum()/pathlengths.sum()
def add_prefixed_stats(stats, prefix, d):
for (k,v) in d.items():
stats[prefix+"_"+k] = v
# ================================================================
# Policy Gradients
# ================================================================
def compute_advantage(vf, paths, gamma, lam):
# Compute return, baseline, advantage
for path in paths:
path["return"] = discount(path["reward"], gamma)
b = path["baseline"] = vf.predict(path)
b1 = np.append(b, 0 if path["terminated"] else b[-1])
deltas = path["reward"] + gamma*b1[1:] - b1[:-1]
path["advantage"] = discount(deltas, gamma * lam)
alladv = np.concatenate([path["advantage"] for path in paths])
# Standardize advantage
std = alladv.std()
mean = alladv.mean()
for path in paths:
path["advantage"] = (path["advantage"] - mean) / std
PG_OPTIONS = [
("timestep_limit", int, 0, "maximum length of trajectories"),
("n_iter", int, 200, "number of batch"),
("parallel", int, 0, "collect trajectories in parallel"),
("timesteps_per_batch", int, 10000, ""),
("gamma", float, 0.99, "discount"),
("lam", float, 1.0, "lambda parameter from generalized advantage estimation"),
]
def run_policy_gradient_algorithm(env, agent, usercfg=None, callback=None):
cfg = update_default_config(PG_OPTIONS, usercfg)
cfg.update(usercfg)
print("policy gradient config", cfg)
# if cfg["parallel"]:
# raise NotImplementedError
tstart = time.time()
seed_iter = itertools.count()
for _ in range(cfg["n_iter"]):
# Rollouts ========
paths = get_paths(env, agent, cfg, seed_iter)
paths_subsampled = paths #subsample_paths(paths)
compute_advantage(agent.baseline, paths_subsampled, gamma=cfg["gamma"], lam=cfg["lam"])
# VF Update ========
vf_stats = agent.baseline.fit(paths_subsampled)
# Pol Update ========
pol_stats = agent.updater(paths_subsampled)
# Stats ========
stats = OrderedDict()
add_episode_stats(stats, paths)
add_prefixed_stats(stats, "vf", vf_stats)
add_prefixed_stats(stats, "pol", pol_stats)
stats["TimeElapsed"] = time.time() - tstart
if callback: callback(stats)
def run_policy_gradient_algorithm_hardmining(env, agent, usercfg=None, callback=None, seed_iter=None):
cfg = update_default_config(PG_OPTIONS, usercfg)
cfg.update(usercfg)
print("policy gradient config", cfg)
# if cfg["parallel"]:
# raise NotImplementedError
tstart = time.time()
if seed_iter is None:
seed_iter = itertools.count()
for _ in range(cfg["n_iter"]):
# Rollouts ========
paths = get_paths(env, agent, cfg, seed_iter)
paths_subsampled = paths #subsample_paths(paths)
compute_advantage(agent.baseline, paths_subsampled, gamma=cfg["gamma"], lam=cfg["lam"])
# VF Update ========
vf_stats = agent.baseline.fit(paths_subsampled)
# Pol Update ========
pol_stats = agent.updater(paths_subsampled)
# Stats ========
stats = OrderedDict()
add_episode_stats(stats, paths)
add_prefixed_stats(stats, "vf", vf_stats)
add_prefixed_stats(stats, "pol", pol_stats)
stats["TimeElapsed"] = time.time() - tstart
if callback: callback(stats)
# def subsample_paths(gpaths):
# paths = copy.deepcopy(gpaths)
# for i in range(len(paths)):
# plen = paths[i]['action'].shape[0]
# rno = random.sample(range(plen), 2*plen/3)
# for j in paths[i].keys():
# paths[i][j] = np.delete(paths[i][j], rno, axis=0)
# return paths
def parallel_rollout_worker((agent, ts_limit, ts_batch, iffilter, seed)):
try:
# print("Paralel rollout has been called")
return do_rollouts_serial(agent, ts_limit, ts_batch, iffilter, seed)
except Exception, e:
print("Exception in rollout worker: %s" % e)
import traceback; traceback.print_exc()
raise
def get_paths(env, agent, cfg, seed_iter):
paths = []
if cfg["parallel"]:
start_time = time.time()
from multiprocessing import Pool
# from pathos.multiprocessing import ProcessPool as Pool
num_processes = int(cfg["parallel"])
pool = Pool(processes=num_processes)
# very simple scheme, split work evenly among pool workers (queue would be better)
try:
def callback(result):
print("Length of paths: ", len(result), type(result))
paths.extend([path for paths_list in result for path in paths_list])
args_list = [(agent,
cfg['timestep_limit'],
cfg['timesteps_per_batch'] / num_processes,
cfg['filter'], next(seed_iter)
) for _ in range(num_processes)]
print(args_list)
result = pool.map_async(parallel_rollout_worker, args_list, callback=callback)
# result = pool.map(parallel_rollout_worker, args_list)
result.wait()#1e5)
if not paths:
# print("Paths is still empty")
# raise Exception
result.get()
except KeyboardInterrupt:
pool.terminate()
raise
except Exception:
pool.terminate()
raise
else:
pool.close()
finally:
pool.join()
print("Time elapsed (%d workers): %.2f" % (num_processes, time.time() - start_time))
else:
paths = do_rollouts_serial(agent, cfg["timestep_limit"], cfg["timesteps_per_batch"], cfg["filter"], next(seed_iter))
return paths
def rollout(env, agent, timestep_limit, seed):
"""
Simulate the env and agent for timestep_limit steps
"""
ob = env._reset(difficulty = 2, seed = seed)
terminated = False
data = defaultdict(list)
for _ in range(timestep_limit):
ob = agent.obfilt(ob)
data["observation"].append(ob)
action, agentinfo = agent.act(ob)
data["action"].append(action)
for (k,v) in agentinfo.items():
data[k].append(v)
ob,rew,done,envinfo = env.step(action)
data["reward"].append(rew)
rew = agent.rewfilt(rew)
for (k,v) in envinfo.items():
data[k].append(v)
if done:
terminated = True
break
data = {k:np.array(v) for (k,v) in data.items()}
data["terminated"] = terminated
return data
def do_rollouts_serial(agent, timestep_limit, n_timesteps, iffilter, seed):
env = RunEnv(False)
if iffilter==2:
ofd = FeatureInducer(env.observation_space)
env = FilteredEnv(env, ob_filter=ofd)
elif iffilter==1:
ofd = ConcatPrevious(env.observation_space)
env = FilteredEnv(env, ob_filter=ofd)
paths = []
timesteps_sofar = 0
while True:
np.random.seed(seed)
path = rollout(env, agent, timestep_limit, seed)
paths.append(path)
timesteps_sofar += pathlength(path)
if timesteps_sofar > n_timesteps:
break
print("Length of paths: ", len(paths))
env.close()
return paths
def pathlength(path):
return len(path["action"])
def animate_rollout(env, agent, n_timesteps,delay=.01):
total_reward = 0.
ob = env.reset()
print("Applying filter on Environment")
ofd = ConcatPrevious(env.observation_space)
# ob = ofd(ob)
# env.render()
# ob = np.array(ob)
for i in range(n_timesteps):
ob = ofd(ob)
ob = agent.obfilt(ob)
a, _info = agent.act(ob)
ob, _rew, done, _info = env.step(a)
# _rew = agent.rewfilt(_rew)
total_reward += _rew
# env.render()
ob = np.array(ob)
if done:
print(("terminated after %s timesteps"%i))
break
time.sleep(delay)
print(a.tolist())
print("Total episode reward = {}".format(total_reward))
# ================================================================
# Stochastic policies
# ================================================================
class StochPolicy(object):
@property
def probtype(self):
raise NotImplementedError
@property
def trainable_variables(self):
raise NotImplementedError
@property
def input(self):
raise NotImplementedError
def get_output(self):
raise NotImplementedError
def act(self, ob, stochastic=True):
prob = self._act_prob(ob[None])
if stochastic:
return self.probtype.sample(prob)[0], {"prob" : prob[0]}
else:
return self.probtype.maxprob(prob)[0], {"prob" : prob[0]}
def finalize(self):
self._act_prob = theano.function([self.input], self.get_output(), **FNOPTS)
class ProbType(object):
def sampled_variable(self):
raise NotImplementedError
def prob_variable(self):
raise NotImplementedError
def likelihood(self, a, prob):
raise NotImplementedError
def loglikelihood(self, a, prob):
raise NotImplementedError
def kl(self, prob0, prob1):
raise NotImplementedError
def entropy(self, prob):
raise NotImplementedError
def maxprob(self, prob):
raise NotImplementedError
class StochPolicyKeras(StochPolicy, EzPickle):
def __init__(self, net, probtype):
EzPickle.__init__(self, net, probtype)
self._net = net
self._probtype = probtype
self.finalize()
@property
def probtype(self):
return self._probtype
@property
def net(self):
return self._net
@property
def trainable_variables(self):
return self._net.trainable_weights
@property
def variables(self):
return self._net.get_params()[0]
@property
def input(self):
return self._net.input
def get_output(self):
return self._net.output
def get_updates(self):
self._net.output #pylint: disable=W0104
return self._net.updates
def get_flat(self):
return flatten(self.net.get_weights())
def set_from_flat(self, th):
weights = self.net.get_weights()
self._weight_shapes = [weight.shape for weight in weights]
self.net.set_weights(unflatten(th, self._weight_shapes))
class Categorical(ProbType):
def __init__(self, n):
self.n = n
def sampled_variable(self):
return T.ivector('a')
def prob_variable(self):
return T.matrix('prob')
def likelihood(self, a, prob):
return prob[T.arange(prob.shape[0]), a]
def loglikelihood(self, a, prob):
return T.log(self.likelihood(a, prob))
def kl(self, prob0, prob1):
return (prob0 * T.log(prob0/prob1)).sum(axis=1)
def entropy(self, prob0):
return - (prob0 * T.log(prob0)).sum(axis=1)
def sample(self, prob):
return distributions.categorical_sample(prob)
def maxprob(self, prob):
return prob.argmax(axis=1)
class CategoricalOneHot(ProbType):
def __init__(self, n):
self.n = n
def sampled_variable(self):
return T.matrix('a')
def prob_variable(self):
return T.matrix('prob')
def likelihood(self, a, prob):
return (a * prob).sum(axis=1)
def loglikelihood(self, a, prob):
return T.log(self.likelihood(a, prob))
def kl(self, prob0, prob1):
return (prob0 * T.log(prob0/prob1)).sum(axis=1)
def entropy(self, prob0):
return - (prob0 * T.log(prob0)).sum(axis=1)
def sample(self, prob):
assert prob.ndim == 2
inds = distributions.categorical_sample(prob)
out = np.zeros_like(prob)
out[np.arange(prob.shape[0]), inds] = 1
return out
def maxprob(self, prob):
out = np.zeros_like(prob)
out[prob.argmax(axis=1)] = 1
class DiagGauss(ProbType):
def __init__(self, d):
self.d = d
def sampled_variable(self):
return T.matrix('a')
def prob_variable(self):
return T.matrix('prob')
def loglikelihood(self, a, prob):
mean0 = prob[:,:self.d]
std0 = prob[:, self.d:]
# exp[ -(a - mu)^2/(2*sigma^2) ] / sqrt(2*pi*sigma^2)
return - 0.5 * T.square((a - mean0) / std0).sum(axis=1) - 0.5 * T.log(2.0 * np.pi) * self.d - T.log(std0).sum(axis=1)
def likelihood(self, a, prob):
return T.exp(self.loglikelihood(a, prob))
def kl(self, prob0, prob1):
mean0 = prob0[:, :self.d]
std0 = prob0[:, self.d:]
mean1 = prob1[:, :self.d]
std1 = prob1[:, self.d:]
return T.log(std1 / std0).sum(axis=1) + ((T.square(std0) + T.square(mean0 - mean1)) / (2.0 * T.square(std1))).sum(axis=1) - 0.5 * self.d
def entropy(self, prob):
std_nd = prob[:, self.d:]
return T.log(std_nd).sum(axis=1) + .5 * np.log(2 * np.pi * np.e) * self.d
def sample(self, prob):
mean_nd = prob[:, :self.d]
std_nd = prob[:, self.d:]
return np.random.randn(prob.shape[0], self.d).astype(floatX) * std_nd + mean_nd
def maxprob(self, prob):
return prob[:, :self.d]
def test_probtypes():
theano.config.floatX = 'float64'
np.random.seed(0)
prob_diag_gauss = np.array([-.2, .3, .4, -.5, 1.1, 1.5, .1, 1.9])
diag_gauss = DiagGauss(prob_diag_gauss.size // 2)
yield validate_probtype, diag_gauss, prob_diag_gauss
prob_categorical = np.array([.2, .3, .5])
categorical = Categorical(prob_categorical.size)
yield validate_probtype, categorical, prob_categorical
def validate_probtype(probtype, prob):
N = 100000
# Check to see if mean negative log likelihood == differential entropy
Mval = np.repeat(prob[None, :], N, axis=0)
M = probtype.prob_variable()
X = probtype.sampled_variable()
calcloglik = theano.function([X, M], T.log(probtype.likelihood(X, M)), allow_input_downcast=True)
calcent = theano.function([M], probtype.entropy(M), allow_input_downcast=True)
Xval = probtype.sample(Mval)
logliks = calcloglik(Xval, Mval)
entval_ll = - logliks.mean()
entval_ll_stderr = logliks.std() / np.sqrt(N)
entval = calcent(Mval).mean()
print(entval, entval_ll, entval_ll_stderr)
assert np.abs(entval - entval_ll) < 3 * entval_ll_stderr # within 3 sigmas
# Check to see if kldiv[p,q] = - ent[p] - E_p[log q]
M2 = probtype.prob_variable()
q = prob + np.random.randn(prob.size) * 0.1
Mval2 = np.repeat(q[None, :], N, axis=0)
calckl = theano.function([M, M2], probtype.kl(M, M2), allow_input_downcast=True)
klval = calckl(Mval, Mval2).mean()
logliks = calcloglik(Xval, Mval2)
klval_ll = - entval - logliks.mean()
klval_ll_stderr = logliks.std() / np.sqrt(N)
print(klval, klval_ll, klval_ll_stderr)
assert np.abs(klval - klval_ll) < 3 * klval_ll_stderr # within 3 sigmas
# ================================================================
# Value functions
# ================================================================
class Baseline(object):
def fit(self, paths):
raise NotImplementedError
def predict(self, path):
raise NotImplementedError
class TimeDependentBaseline(Baseline):
def __init__(self):
self.baseline = None
def fit(self, paths):
rets = [path["return"] for path in paths]
maxlen = max(len(ret) for ret in rets)
retsum = np.zeros(maxlen)
retcount = np.zeros(maxlen)
for ret in rets:
retsum[:len(ret)] += ret
retcount[:len(ret)] += 1
retmean = retsum / retcount
i_depletion = np.searchsorted(-retcount, -4)
self.baseline = retmean[:i_depletion]
pred = concat([self.predict(path) for path in paths])
return {"EV" : explained_variance(pred, concat(rets))}
def predict(self, path):
if self.baseline is None:
return np.zeros(pathlength(path))
else:
lenpath = pathlength(path)
lenbase = len(self.baseline)
if lenpath > lenbase:
return concat([self.baseline, self.baseline[-1] + np.zeros(lenpath-lenbase)])
else:
return self.baseline[:lenpath]
class NnRegression(EzPickle):
def __init__(self, net, mixfrac=1.0, maxiter=25):
EzPickle.__init__(self, net, mixfrac, maxiter)
self.net = net
self.mixfrac = mixfrac
x_nx = net.input
self.predict = theano.function([x_nx], net.output, **FNOPTS)
ypred_ny = net.output
ytarg_ny = T.matrix("ytarg")
var_list = net.trainable_weights
l2 = 1e-3 * T.add(*[T.square(v).sum() for v in var_list])
N = x_nx.shape[0]
mse = T.sum(T.square(ytarg_ny - ypred_ny))/N
symb_args = [x_nx, ytarg_ny]
loss = mse + l2
self.opt = LbfgsOptimizer(loss, var_list, symb_args, maxiter=maxiter, extra_losses={"mse":mse, "l2":l2})
def fit(self, x_nx, ytarg_ny):
nY = ytarg_ny.shape[1]
ypredold_ny = self.predict(x_nx)
out = self.opt.update(x_nx, ytarg_ny*self.mixfrac + ypredold_ny*(1-self.mixfrac))
yprednew_ny = self.predict(x_nx)
out["PredStdevBefore"] = ypredold_ny.std()
out["PredStdevAfter"] = yprednew_ny.std()
out["TargStdev"] = ytarg_ny.std()
if nY==1:
out["EV_before"] = explained_variance_2d(ypredold_ny, ytarg_ny)[0]
out["EV_after"] = explained_variance_2d(yprednew_ny, ytarg_ny)[0]
else:
out["EV_avg"] = explained_variance(yprednew_ny.ravel(), ytarg_ny.ravel())
return out
class NnVf(object):
def __init__(self, net, timestep_limit, regression_params):
self.reg = NnRegression(net, **regression_params)
self.timestep_limit = timestep_limit
def predict(self, path):
ob_no = self.preproc(path["observation"])
return self.reg.predict(ob_no)[:,0]
def fit(self, paths):
ob_no = concat([self.preproc(path["observation"]) for path in paths], axis=0)
vtarg_n1 = concat([path["return"] for path in paths]).reshape(-1,1)
return self.reg.fit(ob_no, vtarg_n1)
def preproc(self, ob_no):
return concat([ob_no, np.arange(len(ob_no)).reshape(-1,1) / float(self.timestep_limit)], axis=1)
class NnCpd(EzPickle):
def __init__(self, net, probtype, maxiter=25):
EzPickle.__init__(self, net, probtype, maxiter)
self.net = net
x_nx = net.input
prob = net.output
a = probtype.sampled_variable()
var_list = net.trainable_weights
loglik = probtype.loglikelihood(a, prob)
self.loglikelihood = theano.function([a, x_nx], loglik, **FNOPTS)
loss = - loglik.mean()
symb_args = [x_nx, a]
self.opt = LbfgsOptimizer(loss, var_list, symb_args, maxiter=maxiter)
def fit(self, x_nx, a):
return self.opt.update(x_nx, a)
class SetFromFlat(object):
def __init__(self, var_list):
theta = T.vector()
start = 0
updates = []
for v in var_list:
shape = v.shape
size = T.prod(shape)
updates.append((v, theta[start:start+size].reshape(shape)))
start += size
self.op = theano.function([theta],[], updates=updates,**FNOPTS)
def __call__(self, theta):
self.op(theta.astype(floatX))
class GetFlat(object):
def __init__(self, var_list):
self.op = theano.function([], T.concatenate([v.flatten() for v in var_list]),**FNOPTS)
def __call__(self):
return self.op() #pylint: disable=E1101
class EzFlat(object):
def __init__(self, var_list):
self.gf = GetFlat(var_list)
self.sff = SetFromFlat(var_list)
def set_params_flat(self, theta):
self.sff(theta)
def get_params_flat(self):
return self.gf()
class LbfgsOptimizer(EzFlat):
def __init__(self, loss, params, symb_args, extra_losses=None, maxiter=25):
EzFlat.__init__(self, params)
self.all_losses = OrderedDict()
self.all_losses["loss"] = loss
if extra_losses is not None:
self.all_losses.update(extra_losses)
self.f_lossgrad = theano.function(list(symb_args), [loss, flatgrad(loss, params)],**FNOPTS)
self.f_losses = theano.function(symb_args, list(self.all_losses.values()),**FNOPTS)
self.maxiter=maxiter
def update(self, *args):
thprev = self.get_params_flat()
def lossandgrad(th):
self.set_params_flat(th)
l,g = self.f_lossgrad(*args)
g = g.astype('float64')
return (l,g)
losses_before = self.f_losses(*args)
theta, _, opt_info = scipy.optimize.fmin_l_bfgs_b(lossandgrad, thprev, maxiter=self.maxiter)
del opt_info['grad']
print(opt_info)
self.set_params_flat(theta)
losses_after = self.f_losses(*args)
info = OrderedDict()
for (name,lossbefore, lossafter) in zip(list(self.all_losses.keys()), losses_before, losses_after):
info[name+"_before"] = lossbefore
info[name+"_after"] = lossafter
return info
def numel(x):
return T.prod(x.shape)
def flatgrad(loss, var_list):
grads = T.grad(loss, var_list)
return T.concatenate([g.flatten() for g in grads])
# ================================================================
# Keras
# ================================================================
class ConcatFixedStd(Layer):
input_ndim = 2
def __init__(self, **kwargs):
Layer.__init__(self, **kwargs)
def build(self, input_shape):
input_dim = input_shape[1]
self.logstd = theano.shared(np.zeros(input_dim,floatX), name='{}_logstd'.format(self.name))
self.trainable_weights = [self.logstd]
super(ConcatFixedStd, self).build(input_shape)
def compute_ouput_shape(self, input_shape):
return (input_shape[0], input_shape[1] * 2)
def call(self, x, mask=None):
Mean = x
Std = T.repeat(T.exp(self.logstd)[None, :], Mean.shape[0], axis=0)
return T.concatenate([Mean, Std], axis=1)
# ================================================================
# Video monitoring
# ================================================================
def VIDEO_NEVER(_):
return False
def VIDEO_ALWAYS(_):
return True
| abhinavagarwalla/modular_rl | modular_rl/core.py | Python | mit | 24,088 |
#!/usr/bin/python
#################################################################################
#
# The MIT License (MIT)
#
# Copyright (c) 2015 Dmitry Sovetov
#
# https://github.com/dmsovetov
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#################################################################################
import os, argparse
from Workspace import Workspace
# class Pygling
class Pygling:
@staticmethod
def main():
name = os.path.basename( os.getcwd() )
# Parse arguments
parser = argparse.ArgumentParser( prog = 'Pygling', description = 'Pygling C++ workspace generator.', prefix_chars = '--', formatter_class = argparse.ArgumentDefaultsHelpFormatter )
parser.add_argument( "action", type = str, help = "Action", choices = ["configure", "build", "install"] )
parser.add_argument( "-p", "--platform", default = 'all', type = str, help = "Target platform" )
parser.add_argument( "-s", "--source", default = '.', type = str, help = "Project source path" )
parser.add_argument( "-o", "--output", default = 'projects', type = str, help = "Output path" )
parser.add_argument( "-n", "--name", default = name, type = str, help = "Workspace (solution) name" )
parser.add_argument( "-a", "--arch", default = 'default', type = str, help = "Target build architecture" )
parser.add_argument( "-x", "--std", default = 'cxx98', type = str, help = "C++ standard", choices = ['cxx99', 'cxx11'] )
parser.add_argument( "-c", "--configuration", default = 'Release', type = str, help = "Build configuration" )
parser.add_argument( "--package", type = str, help = "Application package identifier" )
parser.add_argument( "--platformSdk", type = str, help = "Platform SDK identifier" )
parser.add_argument( "--xcteam", type = str, help = "Xcode provisioning profile to be used" )
# Check action
args, unknown = parser.parse_known_args()
workspace = Workspace(args.name, args.source, args.output, args, unknown)
if args.action == 'configure': workspace.configure(args.platform)
elif args.action == 'build': workspace.build(args.platform)
elif args.action == 'install': workspace.install(args.platform)
# Entry point
if __name__ == "__main__":
Pygling.main() | dmsovetov/pygling | Pygling/__main__.py | Python | mit | 3,421 |
from captcha.fields import CaptchaField
from django import forms
from django.contrib.auth.models import User
from django.http import HttpResponse
try:
from django.template import engines
__is_18 = True
except ImportError:
from django.template import loader
__is_18 = False
TEST_TEMPLATE = r"""
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html>
<head>
<meta http-equiv="Content-type" content="text/html; charset=utf-8">
<title>captcha test</title>
</head>
<body>
{% if passed %}
<p style="color:green">Form validated</p>
{% endif %}
{% if form.errors %}
{{form.errors}}
{% endif %}
<form action="{% url 'captcha-test' %}" method="post">
{{form.as_p}}
<p><input type="submit" value="Continue →"></p>
</form>
</body>
</html>
"""
def _get_template(template_string):
if __is_18:
return engines["django"].from_string(template_string)
else:
return loader.get_template_from_string(template_string)
def _test(request, form_class):
passed = False
if request.POST:
form = form_class(request.POST)
if form.is_valid():
passed = True
else:
form = form_class()
t = _get_template(TEST_TEMPLATE)
return HttpResponse(t.render(context=dict(passed=passed, form=form), request=request))
def test(request):
class CaptchaTestForm(forms.Form):
subject = forms.CharField(max_length=100)
sender = forms.EmailField()
captcha = CaptchaField(help_text="asdasd")
return _test(request, CaptchaTestForm)
def test_model_form(request):
class CaptchaTestModelForm(forms.ModelForm):
subject = forms.CharField(max_length=100)
sender = forms.EmailField()
captcha = CaptchaField(help_text="asdasd")
class Meta:
model = User
fields = ("subject", "sender", "captcha")
return _test(request, CaptchaTestModelForm)
def test_custom_generator(request):
class CaptchaTestModelForm(forms.ModelForm):
subject = forms.CharField(max_length=100)
sender = forms.EmailField()
captcha = CaptchaField(generator=lambda: ("111111", "111111"))
class Meta:
model = User
fields = ("subject", "sender", "captcha")
return _test(request, CaptchaTestModelForm)
def test_custom_error_message(request):
class CaptchaTestErrorMessageForm(forms.Form):
captcha = CaptchaField(
help_text="asdasd", error_messages=dict(invalid="TEST CUSTOM ERROR MESSAGE")
)
return _test(request, CaptchaTestErrorMessageForm)
def test_per_form_format(request):
class CaptchaTestFormatForm(forms.Form):
captcha = CaptchaField(
help_text="asdasd",
error_messages=dict(invalid="TEST CUSTOM ERROR MESSAGE"),
output_format=(
"%(image)s testPerFieldCustomFormatString "
"%(hidden_field)s %(text_field)s"
),
)
return _test(request, CaptchaTestFormatForm)
def test_non_required(request):
class CaptchaTestForm(forms.Form):
sender = forms.EmailField()
subject = forms.CharField(max_length=100)
captcha = CaptchaField(help_text="asdasd", required=False)
return _test(request, CaptchaTestForm)
def test_id_prefix(request):
class CaptchaTestForm(forms.Form):
sender = forms.EmailField()
subject = forms.CharField(max_length=100)
captcha1 = CaptchaField(id_prefix="form1")
captcha2 = CaptchaField(id_prefix="form2")
return _test(request, CaptchaTestForm)
| mbi/django-simple-captcha | captcha/tests/views.py | Python | mit | 3,752 |
from __future__ import unicode_literals
import os
from mopidy import ext, config
__version__ = '1.0.1'
__url__ = 'https://github.com/dz0ny/mopidy-api-explorer'
class APIExplorerExtension(ext.Extension):
dist_name = 'Mopidy-API-Explorer'
ext_name = 'api_explorer'
version = __version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')
return config.read(conf_file)
def get_config_schema(self):
schema = super(APIExplorerExtension, self).get_config_schema()
return schema
def setup(self, registry):
registry.add('http:static', {
'name': self.ext_name,
'path': os.path.join(os.path.dirname(__file__), 'public'),
})
| dz0ny/mopidy-api-explorer | mopidy_explorer/__init__.py | Python | mit | 762 |
import twe_lite
import json
import queue
import sys
import time
import traceback
import yaml
from threading import Thread
from datetime import datetime
from pytz import timezone
from iothub_client import IoTHubClient, IoTHubClientError, IoTHubTransportProvider, IoTHubClientResult, IoTHubClientStatus
from iothub_client import IoTHubMessage, IoTHubMessageDispositionResult, IoTHubError, DeviceMethodReturnValue
SECRETS_FILE_NAME = "secrets.yml"
DEFAULT_PORT_NAME = '/dev/ttyUSB0'
TIME_ZONE = timezone('Asia/Tokyo')
def print_usage():
print('Usage: python gateway.py DEVICE_ID [SERIAL_PORT_NAME]')
print("if SERIAL_PORT_NAME is omitted, port name is '/dev/ttyUSB0' by default")
secrets = None
with open(SECRETS_FILE_NAME, 'r') as f:
secrets = yaml.load(f)
device_id = None
port_name = DEFAULT_PORT_NAME
if len(sys.argv) == 2:
device_id = sys.argv[1]
elif len(sys.argv) == 3:
device_id = sys.argv[1]
port_name = sys.argv[2]
else:
print_usage()
sys.exit(1)
print("Device ID: " + device_id)
print("Port name: " + port_name)
continues = True
class MonoStickTread(Thread):
def __init__(self, mono_stick, parser, queue):
super().__init__()
self.__mono_stick = mono_stick
self.__parser = parser
self.__queue = queue
def run(self):
print("Start reading data from monostick.")
while continues:
try:
data = mono_stick.read_line()
if len(data) == 0:
continue
print('Data: {0}'.format(data))
received_message = self.__parser.parse(data)
self.__queue.put(received_message, timeout=0.1)
except queue.Full as _:
print('Message queue is full')
except twe_lite.InvalidMessageFormatError as e:
print(traceback.format_exc())
class SendMessageThread(Thread):
def __init__(self, iothub_client, queue):
super().__init__()
self.__iothub_client = iothub_client
self.__queue = queue
def run(self):
print("Start sending data to Azure IoT Hub.")
while continues:
try:
try:
status_notification_message = self.__queue.get(timeout=0.1)
except queue.Empty as _:
continue
self.__queue.task_done()
print(str(status_notification_message))
if not status_notification_message.di1.changed:
continue
if status_notification_message.di1.state == twe_lite.DigitalPinState.HIGH:
continue
self.__send_message()
except IoTHubError as iothub_error:
print("Unexpected error %s from IoTHub" % iothub_error)
time.sleep(1)
except Exception as e:
print(traceback.format_exc())
def __send_message(self):
detected_json = self.__make_detected_json()
sending_message = IoTHubMessage(bytearray(detected_json, 'utf8'))
self.__iothub_client.send_event_async(sending_message, self.__event_confirmation_callback, None)
while True:
status = self.__iothub_client.get_send_status()
if status == IoTHubClientStatus.IDLE:
break
def __event_confirmation_callback(self, message, result, _):
print("Confirmation received for message with result = %s" % (result))
def __make_detected_json(self):
now = datetime.now(TIME_ZONE)
return json.dumps({
'MessageType': 'DeviceEvent',
'DeviceId': device_id,
'EventType': 'HumanDetected',
'EventTime': now.isoformat()
})
def iothub_client_init():
connection_string = secrets['iothub']['connection_string']
client = IoTHubClient(connection_string, IoTHubTransportProvider.MQTT)
client.set_option("messageTimeout", 10000)
client.set_option("logtrace", 0)
client.set_option("product_info", "TweLiteGateway")
return client
with twe_lite.MonoStick(port_name, 0.1, 0.1) as mono_stick:
client = iothub_client_init()
parser = twe_lite.Parser()
message_queue = queue.Queue()
threads = []
try:
threads.append(MonoStickTread(mono_stick, parser, message_queue))
threads.append(SendMessageThread(client, message_queue))
for thread in threads:
thread.start()
while continues:
print("Quit if 'q is entered.")
c = input()
if c == 'q':
continues = False
break
finally:
for thread in threads:
thread.join()
sys.exit(0) | locatw/Autonek | TweLiteGateway/twe_lite_gateway/gateway.py | Python | mit | 4,912 |
# direct inputs
# source to this solution and code:
# http://stackoverflow.com/questions/14489013/simulate-python-keypresses-for-controlling-a-game
# http://www.gamespp.com/directx/directInputKeyboardScanCodes.html
import ctypes
import time
HELD = set()
SendInput = ctypes.windll.user32.SendInput
mouse_button_down_mapping = {
'left': 0x0002,
'middle': 0x0020,
'right': 0x0008
}
mouse_button_up_mapping = {
'left': 0x0004,
'middle': 0x0040,
'right': 0x0010
}
CODES = {
'esc': 0x01,
'escape': 0x01,
'1': 0x02,
'2': 0x03,
'3': 0x04,
'4': 0x05,
'5': 0x06,
'6': 0x07,
'7': 0x08,
'8': 0x09,
'9': 0x10,
'q': 0x10,
'w': 0x11,
'e': 0x12,
'r': 0x13,
't': 0x14,
'y': 0x15,
'u': 0x16,
'i': 0x17,
'o': 0x18,
'p': 0x19,
'a': 0x1E,
's': 0x1F,
'd': 0x20,
'f': 0x21,
'g': 0x22,
'h': 0x23,
'j': 0x24,
'k': 0x25,
'l': 0x26,
'z': 0x2C,
'x': 0x2D,
'c': 0x2E,
'v': 0x2F,
'b': 0x30,
'n': 0x31,
'm': 0x32,
'ctrl': 0x1D,
'pageup': 0xC9 + 1024,
'pagedown': 0xD1 + 1024,
'up': 0xC8,
'left': 0xCB,
'right': 0xCD,
'down': 0xD0,
'alt': 0x38,
}
# C struct redefinitions
PUL = ctypes.POINTER(ctypes.c_ulong)
class KeyBdInput(ctypes.Structure):
_fields_ = [("wVk", ctypes.c_ushort),
("wScan", ctypes.c_ushort),
("dwFlags", ctypes.c_ulong),
("time", ctypes.c_ulong),
("dwExtraInfo", PUL)]
class HardwareInput(ctypes.Structure):
_fields_ = [("uMsg", ctypes.c_ulong),
("wParamL", ctypes.c_short),
("wParamH", ctypes.c_ushort)]
class MouseInput(ctypes.Structure):
_fields_ = [("dx", ctypes.c_long),
("dy", ctypes.c_long),
("mouseData", ctypes.c_ulong),
("dwFlags", ctypes.c_ulong),
("time",ctypes.c_ulong),
("dwExtraInfo", PUL)]
class Input_I(ctypes.Union):
_fields_ = [("ki", KeyBdInput),
("mi", MouseInput),
("hi", HardwareInput)]
class Input(ctypes.Structure):
_fields_ = [("type", ctypes.c_ulong),
("ii", Input_I)]
# Actuals Functions
def release_all():
held = list(HELD)
for key in held:
release(key)
try:
HELD.remove(key)
except KeyError:
pass
def hold(key):
hexKeyCode = CODES[str(key)]
extra = ctypes.c_ulong(0)
ii_ = Input_I()
ii_.ki = KeyBdInput( 0, hexKeyCode, 0x0008, 0, ctypes.pointer(extra) )
x = Input( ctypes.c_ulong(1), ii_ )
ctypes.windll.user32.SendInput(1, ctypes.pointer(x), ctypes.sizeof(x))
HELD.add(key)
def release(key):
hexKeyCode = CODES[str(key)]
extra = ctypes.c_ulong(0)
ii_ = Input_I()
ii_.ki = KeyBdInput( 0, hexKeyCode, 0x0008 | 0x0002, 0, ctypes.pointer(extra) )
x = Input( ctypes.c_ulong(1), ii_ )
ctypes.windll.user32.SendInput(1, ctypes.pointer(x), ctypes.sizeof(x))
HELD.remove(key)
def send(keys):
delay = .1
for key in keys:
hold(key)
time.sleep(delay)
release(key)
# for code in keycodes:
# time.sleep(delay)
def click_down(button='left'):
extra = ctypes.c_ulong(0)
ii_ = Input_I()
ii_.mi = MouseInput(0, 0, 0, mouse_button_down_mapping[button], 0, ctypes.pointer(extra))
x = Input(ctypes.c_ulong(0), ii_)
ctypes.windll.user32.SendInput(1, ctypes.pointer(x), ctypes.sizeof(x))
def click_up(button='left'):
extra = ctypes.c_ulong(0)
ii_ = Input_I()
ii_.mi = MouseInput(0, 0, 0, mouse_button_up_mapping[button], 0, ctypes.pointer(extra))
x = Input(ctypes.c_ulong(0), ii_)
ctypes.windll.user32.SendInput(1, ctypes.pointer(x), ctypes.sizeof(x))
def click(button='left', duration=0.05):
click_down(button=button)
time.sleep(duration)
click_up(button=button)
if __name__ == '__main__':
time.sleep(10)
click()
# send(['w'])
# for i in range(100):
# send('wasd')
# hold(CODES['w'])
# time.sleep(5)
# release(CODES['w'])
# time.sleep(5)
# hold(ONE)
# release(ONE)
# time.sleep(1)
# hold(TWO)
# time.sleep(1)
# release(TWO)
# time.sleep(1) | osspeak/osspeak | osspeak/recognition/actions/library/directinput.py | Python | mit | 4,326 |
# -*- test-case-name: twisted.test.test_factories,twisted.internet.test.test_protocol -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Standard implementations of Twisted protocol-related interfaces.
Start here if you are looking to write a new protocol implementation for
Twisted. The Protocol class contains some introductory material.
"""
#from __future__ import division, absolute_import
#import random
from zope.interface import implementer
#from twisted.python import log, failure, components
#from twisted.internet import interfaces, error, defer
from . import internet_interfaces as interfaces
@implementer(interfaces.IProtocolFactory, interfaces.ILoggingContext)
class Factory:
"""
This is a factory which produces protocols.
By default, buildProtocol will create a protocol of the class given in
self.protocol.
"""
# put a subclass of Protocol here:
protocol = None
numPorts = 0
noisy = True
@classmethod
def forProtocol(cls, protocol, *args, **kwargs):
"""
Create a factory for the given protocol.
It sets the C{protocol} attribute and returns the constructed factory
instance.
@param protocol: A L{Protocol} subclass
@param args: Positional arguments for the factory.
@param kwargs: Keyword arguments for the factory.
@return: A L{Factory} instance wired up to C{protocol}.
"""
factory = cls(*args, **kwargs)
factory.protocol = protocol
return factory
def logPrefix(self):
"""
Describe this factory for log messages.
"""
return self.__class__.__name__
def doStart(self):
"""Make sure startFactory is called.
Users should not call this function themselves!
"""
if not self.numPorts:
if self.noisy:
log.msg("Starting factory %r" % self)
self.startFactory()
self.numPorts = self.numPorts + 1
def doStop(self):
"""Make sure stopFactory is called.
Users should not call this function themselves!
"""
if self.numPorts == 0:
# this shouldn't happen, but does sometimes and this is better
# than blowing up in assert as we did previously.
return
self.numPorts = self.numPorts - 1
if not self.numPorts:
if self.noisy:
log.msg("Stopping factory %r" % self)
self.stopFactory()
def startFactory(self):
"""This will be called before I begin listening on a Port or Connector.
It will only be called once, even if the factory is connected
to multiple ports.
This can be used to perform 'unserialization' tasks that
are best put off until things are actually running, such
as connecting to a database, opening files, etcetera.
"""
def stopFactory(self):
"""This will be called before I stop listening on all Ports/Connectors.
This can be overridden to perform 'shutdown' tasks such as disconnecting
database connections, closing files, etc.
It will be called, for example, before an application shuts down,
if it was connected to a port. User code should not call this function
directly.
"""
def buildProtocol(self, addr):
"""Create an instance of a subclass of Protocol.
The returned instance will handle input on an incoming server
connection, and an attribute \"factory\" pointing to the creating
factory.
Override this method to alter how Protocol instances get created.
@param addr: an object implementing L{twisted.internet.interfaces.IAddress}
"""
p = self.protocol()
p.factory = self
return p
class BaseProtocol:
"""
This is the abstract superclass of all protocols.
Some methods have helpful default implementations here so that they can
easily be shared, but otherwise the direct subclasses of this class are more
interesting, L{Protocol} and L{ProcessProtocol}.
"""
connected = 0
transport = None
def makeConnection(self, transport):
"""Make a connection to a transport and a server.
This sets the 'transport' attribute of this Protocol, and calls the
connectionMade() callback.
"""
self.connected = 1
self.transport = transport
self.connectionMade()
def connectionMade(self):
"""Called when a connection is made.
This may be considered the initializer of the protocol, because
it is called when the connection is completed. For clients,
this is called once the connection to the server has been
established; for servers, this is called after an accept() call
stops blocking and a socket has been received. If you need to
send any greeting or initial message, do it here.
"""
#connectionDone=failure.Failure(error.ConnectionDone())
#connectionDone.cleanFailure()
connectionDone = None
@implementer(interfaces.IProtocol, interfaces.ILoggingContext)
class Protocol(BaseProtocol):
"""
This is the base class for streaming connection-oriented protocols.
If you are going to write a new connection-oriented protocol for Twisted,
start here. Any protocol implementation, either client or server, should
be a subclass of this class.
The API is quite simple. Implement L{dataReceived} to handle both
event-based and synchronous input; output can be sent through the
'transport' attribute, which is to be an instance that implements
L{twisted.internet.interfaces.ITransport}. Override C{connectionLost} to be
notified when the connection ends.
Some subclasses exist already to help you write common types of protocols:
see the L{twisted.protocols.basic} module for a few of them.
"""
def logPrefix(self):
"""
Return a prefix matching the class name, to identify log messages
related to this protocol instance.
"""
return self.__class__.__name__
def dataReceived(self, data):
"""Called whenever data is received.
Use this method to translate to a higher-level message. Usually, some
callback will be made upon the receipt of each complete protocol
message.
@param data: a string of indeterminate length. Please keep in mind
that you will probably need to buffer some data, as partial
(or multiple) protocol messages may be received! I recommend
that unit tests for protocols call through to this method with
differing chunk sizes, down to one byte at a time.
"""
def connectionLost(self, reason=connectionDone):
"""Called when the connection is shut down.
Clear any circular references here, and any external references
to this Protocol. The connection has been closed.
@type reason: L{twisted.python.failure.Failure}
"""
| amol9/mayloop | mayloop/imported/twisted/internet_protocol.py | Python | mit | 7,110 |
# -*- coding:utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import django
import patchy
from django.db.models.deletion import get_candidate_relations_to_delete
from django.db.models.query import QuerySet
from django.db.models.query_utils import Q
from django.db.models.sql.query import Query
def patch_ORM_to_be_deterministic():
"""
Django's ORM is non-deterministic with regards to the queries it outputs
for e.g. OR clauses. We need it to be deterministic so that we can compare
queries between runs, so we make a couple patches to its internals to do
this. Mostly this is done by adding sorted() in some places so we're not
affected by the vagaries of random dict iteration order.
There is no undo for this, but it doesn't make the ORM much slower or
anything bad.
"""
if patch_ORM_to_be_deterministic.have_patched:
return
patch_ORM_to_be_deterministic.have_patched = True
patch_QuerySet()
patch_Query()
patch_Q()
version = django.get_version()
if version.startswith('1.8') or version.startswith('1.9'):
patch_delete()
patch_ORM_to_be_deterministic.have_patched = False
def patch_QuerySet():
patchy.patch(QuerySet.annotate, """\
@@ -17,7 +17,7 @@
except (AttributeError, TypeError):
raise TypeError("Complex annotations require an alias")
annotations[arg.default_alias] = arg
- annotations.update(kwargs)
+ annotations.update(sorted(kwargs.items()))
clone = self._clone()
names = self._fields
""")
def patch_Query():
patchy.patch(Query.add_extra, """\
@@ -13,7 +13,7 @@
param_iter = iter(select_params)
else:
param_iter = iter([])
- for name, entry in select.items():
+ for name, entry in sorted(select.items()):
entry = force_text(entry)
entry_params = []
pos = entry.find("%s")
""")
def patch_Q():
# This one can't be done by patchy since __init__ is different in Python 3,
# maybe one day https://github.com/adamchainz/patchy/issues/31 will be
# fixed.
def __init__(self, *args, **kwargs):
super(Q, self).__init__(children=list(args) + sorted(kwargs.items()))
Q.__init__ = __init__
def patch_delete():
patchy.patch(get_candidate_relations_to_delete, """\
@@ -4,9 +4,12 @@ def get_candidate_relations_to_delete(opts):
candidate_models = {opts}
candidate_models = candidate_models.union(opts.concrete_model._meta.proxied_children)
# For each model, get all candidate fields.
- candidate_model_fields = set(chain.from_iterable(
- opts.get_fields(include_hidden=True) for opts in candidate_models
- ))
+ from collections import OrderedDict
+ candidates_dict = OrderedDict()
+ for opts in candidate_models:
+ for field in opts.get_fields(include_hidden=True):
+ candidates_dict[field.name] = field
+ candidate_model_fields = candidates_dict.values()
# The candidate relations are the ones that come from N-1 and 1-1 relations.
# N-N (i.e., many-to-many) relations aren't candidates for deletion.
return (
""")
| moumoutte/django-perf-rec | django_perf_rec/orm.py | Python | mit | 3,345 |
import time
from aquests.athreads import socket_map
from aquests.athreads import trigger
from rs4.cbutil import tuple_cb
from aquests.client.asynconnect import AsynSSLConnect, AsynConnect
from aquests.dbapi.dbconnect import DBConnect
import threading
from aquests.protocols.http import request as http_request
from aquests.protocols.http import request_handler as http_request_handler
from aquests.protocols.http2 import request_handler as http2_request_handler
from aquests.protocols.grpc.request import GRPCRequest
from aquests.protocols.http import response as http_response
from aquests.protocols.ws import request_handler as ws_request_handler
from aquests.protocols.ws import request as ws_request
from . import rcache
from skitai import lifetime
from aquests import asyncore
import sys
import inspect
from skitai import exceptions
from skitai import REQFAIL, UNSENT, TIMEOUT, NETERR, NORMAL
from ...corequest import corequest, response
import sqlite3
try:
import psycopg2
except ImportError:
class PGIntegrityError (Exception):
pass
else:
PGIntegrityError = psycopg2.IntegrityError
DEFAULT_TIMEOUT = 10
WAIT_POLL = False
class OperationError (Exception):
pass
class Result (response, rcache.Result):
def __init__ (self, id, status, response, ident = None):
rcache.Result.__init__ (self, status, ident)
self.node = id
self.__response = response
def __getattr__ (self, attr):
return getattr (self.__response, attr)
def reraise (self):
if self.status_code >= 300:
try:
self.__response.expt
except AttributeError:
# redircting to HTTPError
raise exceptions.HTTPError ("%d %s" % (self.status_code, self.reason))
else:
self.__response.raise_for_status ()
return self
def close (self):
self.__response = None
def cache (self, timeout = 60, cache_if = (200,)):
if not timeout:
return
if self.status != NORMAL or self.status_code not in cache_if:
return
rcache.Result.cache (self, timeout)
return self
def fetch (self, cache = None, cache_if = (200,), one = False):
self.reraise ()
self.cache (cache, cache_if)
if one:
if len (self.data) == 0:
raise exceptions.HTTPError ("410 Partial Not Found")
if len (self.data) != 1:
raise exceptions.HTTPError ("409 Conflict")
if isinstance (self.data, dict):
return self.data.popitem () [1]
return self.data [0]
return self.data
def one (self, cache = None, cache_if = (200,)):
try:
return self.fetch (cache, cache_if, True)
except (PGIntegrityError, sqlite3.IntegrityError):
# primary or unique index error
raise exceptions.HTTPError ("409 Conflict")
def commit (self):
self.reraise ()
class Results (response, rcache.Result):
def __init__ (self, results, ident = None):
self.results = results
self.status_code = [rs.status_code for rs in results]
rcache.Result.__init__ (self, [rs.status for rs in self.results], ident)
def __iter__ (self):
return self.results.__iter__ ()
@property
def data (self):
return [r.data for r in self.results]
@property
def text (self):
return [r.text for r in self.results]
def reraise (self):
[r.reraise () for r in self.results]
def cache (self, timeout = 60, cache_if = (200,)):
if [_f for _f in [rs.status != NORMAL or rs.status_code not in cache_if for rs in self.results] if _f]:
return
rcache.Result.cache (self, timeout)
return self
def fetch (self, cache = None, cache_if = (200,)):
self.cache (cache, cache_if)
return [r.fetch () for r in self.results]
def one (self, cache = None, cache_if = (200,)):
self.cache (cache, cache_if)
return [r.one () for r in self.results]
class Dispatcher:
def __init__ (self, cv, id, ident = None, filterfunc = None, cachefs = None, callback = None):
self._cv = cv
self.id = id
self.ident = ident
self.filterfunc = filterfunc
self.cachefs = cachefs
self.callback = callback
self.creation_time = time.time ()
self.status = UNSENT
self.result = None
self.handler = None
def get_id (self):
return self.id
def get_status (self):
with self._cv:
return self.status
def request_failed (self):
self.status = REQFAIL
tuple_cb (self, self.callback)
def set_status (self, code, result = None):
with self._cv:
self.status = code
if result:
self.result = result
return code
def get_result (self):
if not self.result:
if self.get_status () == REQFAIL:
self.result = Result (self.id, REQFAIL, http_response.FailedResponse (731, "Request Failed"), self.ident)
else:
self.result = Result (self.id, TIMEOUT, http_response.FailedResponse (730, "Timeout"), self.ident)
return self.result
def do_filter (self):
if self.filterfunc:
self.filterfunc (self.result)
def handle_cache (self, response):
self.set_status (NORMAL, Result (self.id, status, response, self.ident))
def handle_result (self, handler):
if self.get_status () == TIMEOUT:
# timeout, ignore
return
response = handler.response
# DON'T do_filter here, it blocks select loop
if response.code >= 700:
if response.code == 702:
status = TIMEOUT
else:
status = NETERR
else:
status = NORMAL
result = Result (self.id, status, response, self.ident)
cakey = response.request.get_cache_key ()
if self.cachefs and cakey and response.max_age:
self.cachefs.save (
cakey,
response.get_header ("content-type"), response.content,
response.max_age, 0
)
handler.callback = None
handler.response = None
self.set_status (status, result)
tuple_cb (self, self.callback)
class Task (corequest):
DEFAULT_CACHE_TIMEOUT = 42
proto_map = dict (
rpc = http_request.XMLRPCRequest,
xmlrpc = http_request.XMLRPCRequest,
jsonrpc = http_request.JSONRPCRequest,
grpc = GRPCRequest
)
def __init__ (self,
cluster,
uri,
params = None,
reqtype = "get",
headers = None,
auth = None,
meta = None,
use_cache = False,
mapreduce = True,
filter = None,
callback = None,
cache = None,
timeout = 10,
origin = None,
cachefs = None,
logger = None
):
self._uri = uri
self._params = params
self._headers = headers
self._reqtype = reqtype
self._auth = auth
self.set_defaults (cluster, meta, use_cache, mapreduce, filter, callback, cache, timeout, origin, logger, cachefs)
if not self._reqtype.lower ().endswith ("rpc"):
self._build_request ("", self._params)
@classmethod
def add_proto (cls, name, class_):
cls.proto_map [name] = class_
def set_defaults (self, cluster, meta, use_cache, mapreduce, filter, callback, cache, timeout, origin, logger, cachefs = None):
self._cluster = cluster
self._meta = meta or {}
self._use_cache = use_cache
self._mapreduce = mapreduce
self._filter = filter
self._callback = callback
self._cache_timeout = cache
self._timeout = timeout
self._origin = origin
self._cachefs = cachefs
self._logger = logger
self._requests = {}
self._results = []
self._canceled = False
self._init_time = time.time ()
self._cv = None
self._retry = 0
self._numnodes = 0
self._cached_result = None
self._cached_request_args = None
self._request = None
self._ccv = None
if self._cluster:
nodes = self._cluster.get_nodes ()
self._numnodes = len (nodes)
if self._mapreduce:
self._nodes = nodes
else: # anyone of nodes
self._nodes = [None]
def __del__ (self):
self._cv = None
self._results = []
def _get_ident (self):
cluster_name = self._cluster.get_name ()
if cluster_name == "__socketpool__":
_id = "%s/%s" % (self._uri, self._reqtype)
else:
_id = "%s/%s/%s" % (cluster_name, self._uri, self._reqtype)
_id += "/%s/%s" % self._cached_request_args
_id += "%s" % (
self._mapreduce and "/M" or ""
)
return _id
def _add_header (self, n, v):
if self._headers is None:
self._headers = {}
self._headers [n] = v
def _handle_request (self, request, rs, asyncon, handler):
if self._cachefs:
# IMP: mannual address setting
request.set_address (asyncon.address)
cakey = request.get_cache_key ()
if cakey:
cachable = self._cachefs.is_cachable (
request.get_header ("cache-control"),
request.get_header ("cookie") is not None,
request.get_header ("authorization") is not None,
request.get_header ("pragma")
)
if cachable:
hit, compressed, max_age, content_type, content = self._cachefs.get (cakey, undecompressible = 0)
if hit:
header = "HTTP/1.1 200 OK\r\nContent-Type: %s\r\nX-Skitaid-Cache-Lookup: %s" % (
content_type, hit == 1 and "MEM_HIT" or "HIT"
)
response = http_response.Response (request, header)
response.collect_incoming_data (content)
response.done ()
asyncon.set_active (False)
rs.handle_cache (response)
return 0
r = handler (asyncon, request, rs.handle_result)
if asyncon.get_proto () and asyncon.isconnected ():
asyncon.handler.handle_request (r)
else:
r.handle_request ()
return 1
def _build_request (self, method, params):
self._cached_request_args = (method, params) # backup for retry
if self._use_cache and rcache.the_rcache:
self._cached_result = rcache.the_rcache.get (self._get_ident (), self._use_cache)
if self._cached_result is not None:
self._cached_result.meta = self._meta
self._callback and tuple_cb (self._cached_result, self._callback)
return
else:
self._use_cache = False
requests = 0
while self._avails ():
if self._cluster.get_name () != "__socketpool__":
asyncon = self._get_connection (None)
else:
asyncon = self._get_connection (self._uri)
self._auth = self._auth or asyncon.get_auth ()
_reqtype = self._reqtype.lower ()
rs = Dispatcher (
self._cv, asyncon.address,
ident = not self._mapreduce and self._get_ident () or None,
filterfunc = self._filter, cachefs = self._cachefs,
callback = self._collect
)
self._requests [rs] = asyncon
args = (params, self._headers, self._auth, self._logger, self._meta)
try:
if _reqtype in ("ws", "wss"):
handler = ws_request_handler.RequestHandler
request = ws_request.Request (self._uri, *args)
else:
if not self._use_cache:
self._add_header ("Cache-Control", "no-cache")
handler = http_request_handler.RequestHandler
try:
class_ = self.proto_map [_reqtype]
except KeyError:
if _reqtype == "upload":
request = http_request.HTTPMultipartRequest (self._uri, _reqtype, *args)
else:
request = http_request.HTTPRequest (self._uri, _reqtype, *args)
else:
request = class_ (self._uri, method, *args)
requests += self._handle_request (request, rs, asyncon, handler)
except:
self._logger ("Request Creating Failed", "fail")
self._logger.trace ()
rs.request_failed ()
asyncon.set_active (False)
continue
if requests:
self._request = request # sample for unitest
trigger.wakeup ()
if _reqtype [-3:] == "rpc":
return self
def _avails (self):
return len (self._nodes)
def _get_connection (self, id = None):
if id is None: id = self._nodes.pop ()
else: self._nodes = []
asyncon = self._cluster.get (id)
self._setup (asyncon)
return asyncon
def _setup (self, asyncon):
asyncon.set_timeout (self._timeout)
if self._cv is None:
self._cv = asyncon._cv
def _cancel (self):
with self._cv:
self._canceled = True
def _count (self):
with self._cv:
return len (self._requests)
#---------------------------------------------------------
def _fail_log (self, status):
if self._origin:
self._logger ("backend status is {}, {} at {} LINE {}: {}".format (
status, self._origin [3], self._origin [1], self._origin [2], self._origin [4][0].strip ()
), "debug")
def _collect (self, rs, failed = False):
with self._cv:
if not failed and self._canceled:
return
try:
asyncon = self._requests.pop (rs)
except KeyError:
return
status = rs.get_status ()
if status == REQFAIL:
with self._cv:
self._results.append (rs)
self._cluster.report (asyncon, True) # not asyncons' Fault
elif status == TIMEOUT:
with self._cv:
self._results.append (rs)
self._cluster.report (asyncon, False) # not asyncons' Fault
elif not self._mapreduce and status == NETERR and self._retry < (self._numnodes - 1):
self._logger ("cluster response error, switch to another...", "fail")
self._cluster.report (asyncon, False) # exception occured
with self._cv:
self._retry += 1
self._canceled = False
self._nodes = [None]
return self.rerequest ()
elif status >= NETERR:
with self._cv:
self._results.append (rs)
if status == NETERR:
self._cluster.report (asyncon, False) # exception occured
else:
self._cluster.report (asyncon, True) # well-functioning
rs.do_filter ()
with self._cv:
requests = self._requests
callback, self._callback = self._callback, None
if not requests:
if callback:
self._do_callback (callback)
elif not failed:
cv = self._ccv is not None and self._ccv or self._cv
with cv:
cv.notify_all ()
def _do_callback (self, callback):
result = self.dispatch (wait = False)
tuple_cb (result, callback)
#-----------------------------------------------------------------
def rerequest (self):
self._build_request (*self._cached_request_args)
def reset_timeout (self, timeout, ccv = None):
with self._cv:
self._timeout = timeout
self._ccv = ccv
asyncons = list (self._requests.values ())
if timeout > 0:
for asyncon in asyncons:
asyncon.set_timeout (timeout)
def set_callback (self, callback, reqid = None, timeout = None):
if reqid is not None:
self._meta ["__reqid"] = reqid
if self._cv:
with self._cv:
requests = self._requests
self._callback = callback
else:
# already finished or will use cache
requests = self._requests
self._callback = callback
if not requests:
return self._do_callback (callback)
timeout and self.reset_timeout (timeout)
# synchronous methods ----------------------------------------------
def _wait (self, timeout = None):
timeout and self.reset_timeout (timeout)
remain = self._timeout - (time.time () - self._init_time)
if remain > 0:
with self._cv:
if self._requests and not self._canceled:
self._cv.wait (remain)
self._canceled = True
requests = list (self._requests.items ())
for rs, asyncon in requests:
rs.set_status (TIMEOUT)
asyncon.handle_abort () # abort imme
self._collect (rs, failed = True)
def dispatch (self, cache = None, cache_if = (200,), timeout = None, wait = True, reraise = False):
if self._cached_result is not None:
return self._cached_result
wait and self._wait (timeout)
rss = [rs.get_result () for rs in self._results]
for rs in rss:
if rs.status == NORMAL and rs.status_code < 300:
continue
self._fail_log (rs.status)
reraise and rs.reraise ()
if self._mapreduce:
self._cached_result = Results (rss, ident = self._get_ident ())
else:
self._cached_result = rss [0]
self.cache (cache, cache_if)
return self._cached_result
def dispatch_or_throw (self, cache = None, cache_if = (200,), timeout = None):
return self.dispatch (cache, cache_if, reraise = True, timeout = timeout)
def none_or_dispatch (self, cache = None, cache_if = (200,), timeout = None):
r = self.dispatch (cache, cache_if, reraise = True, timeout = timeout)
if r.data is not None:
return r
def wait (self, timeout = None, reraise = False):
return self.dispatch (reraise = reraise, timeout = timeout)
# direct access to data ----------------------------------------------
def commit (self, timeout = None):
return self.wait (timeout, True)
wait_or_throw = commit
def fetch (self, cache = None, cache_if = (200,), timeout = None):
res = self._cached_result or self.dispatch (timeout = timeout, reraise = True)
return res.fetch (cache or self._cache_timeout, cache_if)
def one (self, cache = None, cache_if = (200,), timeout = None):
try:
res = self._cached_result or self.dispatch (timeout = timeout, reraise = True)
except (PGIntegrityError, sqlite3.IntegrityError):
raise exceptions.HTTPError ("409 Conflict")
return res.one (cache or self._cache_timeout, cache_if)
def then (self, func):
from ..tasks import Future
return Future (self, self._timeout, **self._meta).then (func)
def cache (self, cache = 60, cache_if = (200,)):
cache = cache or self._cache_timeout
if not cache:
return self
if self._cached_result is None:
raise ValueError("call dispatch first")
self._cached_result.cache (cache, cache_if)
return self
getwait = getswait = dispatch # lower ver compat.
getwait_or_throw = getswait_or_throw = dispatch_or_throw # lower ver compat.
# cluster base call ---------------------------------------
class _Method:
def __init__(self, send, name):
self.__send = send
self.__name = name
def __getattr__(self, name):
return _Method(self.__send, "%s.%s" % (self.__name, name))
def __call__(self, *args):
return self.__send(self.__name, args)
class Proxy:
def __init__ (self, __class, *args, **kargs):
self.__class = __class
self.__args = args
self.__kargs = kargs
def __enter__ (self):
return self
def __exit__ (self, type, value, tb):
pass
def __getattr__ (self, name):
return _Method (self.__request, name)
def __request (self, method, params):
cdc = self.__class (*self.__args, **self.__kargs)
cdc._build_request (method, params)
return cdc
class TaskCreator:
def __init__ (self, cluster, logger, cachesfs):
self.cluster = cluster
self.logger = logger
self.cachesfs = cachesfs
def __getattr__ (self, name):
return getattr (self.cluster, name)
def Server (self, uri, params = None, reqtype="rpc", headers = None, auth = None, meta = None, use_cache = True, mapreduce = False, filter = None, callback = None, cache = None, timeout = DEFAULT_TIMEOUT, caller = None):
if type (headers) is list:
h = {}
for n, v in headers:
h [n] = v
headers = h
if reqtype.endswith ("rpc"):
return Proxy (Task, self.cluster, uri, params, reqtype, headers, auth, meta, use_cache, mapreduce, filter, callback, cache, timeout, caller, self.cachesfs, self.logger)
else:
return Task (self.cluster, uri, params, reqtype, headers, auth, meta, use_cache, mapreduce, filter, callback, cache, timeout, caller, self.cachesfs, self.logger)
| hansroh/skitai | skitai/corequest/httpbase/task.py | Python | mit | 22,202 |
# -*- coding: utf-8 -*-
"""Alternate versions of the splitting functions for testing."""
from __future__ import unicode_literals
import unicodedata
from natsort.compat.py23 import PY_VERSION
if PY_VERSION >= 3.0:
long = int
def int_splitter(x, signed, safe, sep):
"""Alternate (slow) method to split a string into numbers."""
if not x:
return []
all_digits = set('0123456789')
full_list, strings, nums = [], [], []
input_len = len(x)
for i, char in enumerate(x):
# If this character is a sign and the next is a number,
# start a new number.
if (i+1 < input_len and signed and
(char in '-+') and (x[i+1] in all_digits)):
# Reset any current string or number.
if strings:
full_list.append(''.join(strings))
if nums:
full_list.append(int(''.join(nums)))
strings = []
nums = [char]
# If this is a number, add to the number list.
elif char in all_digits:
nums.append(char)
# Reset any string.
if strings:
full_list.append(''.join(strings))
strings = []
# If this is a unicode digit, append directly to the full list.
elif char.isdigit():
# Reset any string or number.
if strings:
full_list.append(''.join(strings))
if nums:
full_list.append(int(''.join(nums)))
strings = []
nums = []
full_list.append(unicodedata.digit(char))
# Otherwise add to the string.
else:
strings.append(char)
# Reset any number.
if nums:
full_list.append(int(''.join(nums)))
nums = []
if nums:
full_list.append(int(''.join(nums)))
elif strings:
full_list.append(''.join(strings))
if safe:
full_list = sep_inserter(full_list, (int, long), sep)
if type(full_list[0]) in (int, long):
return [sep] + full_list
else:
return full_list
def float_splitter(x, signed, exp, safe, sep):
"""Alternate (slow) method to split a string into numbers."""
if not x:
return []
all_digits = set('0123456789')
full_list, strings, nums = [], [], []
input_len = len(x)
for i, char in enumerate(x):
# If this character is a sign and the next is a number,
# start a new number.
if (i+1 < input_len and
(signed or (i > 1 and exp and x[i-1] in 'eE' and
x[i-2] in all_digits)) and
(char in '-+') and (x[i+1] in all_digits)):
# Reset any current string or number.
if strings:
full_list.append(''.join(strings))
if nums and i > 0 and x[i-1] not in 'eE':
full_list.append(float(''.join(nums)))
nums = [char]
else:
nums.append(char)
strings = []
# If this is a number, add to the number list.
elif char in all_digits:
nums.append(char)
# Reset any string.
if strings:
full_list.append(''.join(strings))
strings = []
# If this is a decimal, add to the number list.
elif (i + 1 < input_len and char == '.' and x[i+1] in all_digits):
if nums and '.' in nums:
full_list.append(float(''.join(nums)))
nums = []
nums.append(char)
if strings:
full_list.append(''.join(strings))
strings = []
# If this is an exponent, add to the number list.
elif (i > 0 and i + 1 < input_len and exp and char in 'eE' and
x[i-1] in all_digits and x[i+1] in all_digits | set('+-')):
if 'e' in nums or 'E' in nums:
strings = [char]
full_list.append(float(''.join(nums)))
nums = []
else:
nums.append(char)
# If this is a unicode digit, append directly to the full list.
elif unicodedata.numeric(char, None) is not None:
# Reset any string or number.
if strings:
full_list.append(''.join(strings))
if nums:
full_list.append(float(''.join(nums)))
strings = []
nums = []
full_list.append(unicodedata.numeric(char))
# Otherwise add to the string.
else:
strings.append(char)
# Reset any number.
if nums:
full_list.append(float(''.join(nums)))
nums = []
if nums:
full_list.append(float(''.join(nums)))
elif strings:
full_list.append(''.join(strings))
# Fix a float that looks like a string.
fstrings = ('inf', 'infinity', '-inf', '-infinity',
'+inf', '+infinity', 'nan')
full_list = [float(y) if type(y) != float and y.lower() in fstrings else y
for y in full_list]
if safe:
full_list = sep_inserter(full_list, (float,), sep)
if type(full_list[0]) == float:
return [sep] + full_list
else:
return full_list
def sep_inserter(x, t, sep):
# Simulates the py3_safe function.
ret = [x[0]]
for i, y in enumerate(x[1:]):
if type(y) in t and type(x[i]) in t:
ret.append(sep)
ret.append(y)
return ret
| agustinhenze/natsort.debian | test_natsort/slow_splitters.py | Python | mit | 5,511 |
# Copyright (c) 2013 Matthieu Huguet
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
from .types import BackupCollection, Backup
def run(config):
backups = get_backup_collection(config.backup_dir)
days = backups.days()
if not days:
return 0
days_to_keep = get_days_to_keep(days, config)
if days_to_keep or config.force:
backups_to_remove = backups.except_days(set(days_to_keep))
backups_to_remove.remove_all(config.noop)
return 0
else:
sys.stderr.write("""
WARNING : With the specified retention rules, all the files in the specified
directory will be deleted. If you only specified -m and / or -w, it means that
there is no file in the directory that match your retention rules. Please look
at --day-of-week or --day-of-month options.
If you really know what you are doing, you can use option --force to
remove all your backup files according to your retention rules.
""")
return 1
def get_backup_collection(backup_dir):
daily_backups = BackupCollection()
for file in os.listdir(backup_dir):
fpath = os.path.join(backup_dir, file)
if not os.path.islink(fpath) and os.path.isfile(fpath):
backup = Backup.from_path(fpath)
daily_backups.add(backup)
return daily_backups
def get_days_to_keep(days, config):
days_to_keep = daily_backup_days(days, config.days_retention)
days_to_keep += weekly_backup_days(
days, config.dow, config.weeks_retention)
days_to_keep += monthly_backup_days(
days, config.dom, config.months_retention)
return days_to_keep
def daily_backup_days(days, retention):
return days[:retention]
def weekly_backup_days(days, dow, retention):
weekly_days = [day for day in days if day.isoweekday() == dow]
return weekly_days[:retention]
def monthly_backup_days(days, dom, retention):
monthly_days = [day for day in days if day.day == dom]
return monthly_days[:retention]
| madmatah/lapurge | lapurge/purge.py | Python | mit | 3,000 |
import sys
import multiprocessing
import os.path as osp
import gym
from collections import defaultdict
import tensorflow as tf
import numpy as np
from baselines.common.vec_env.vec_video_recorder import VecVideoRecorder
from baselines.common.vec_env.vec_frame_stack import VecFrameStack
from baselines.common.cmd_util import common_arg_parser, parse_unknown_args, make_vec_env, make_env
from baselines.common.tf_util import get_session
from baselines import logger
from importlib import import_module
from baselines.common.vec_env.vec_normalize import VecNormalize, VecNormalizeRewards
try:
from mpi4py import MPI
except ImportError:
MPI = None
try:
import pybullet_envs
except ImportError:
pybullet_envs = None
try:
import roboschool
except ImportError:
roboschool = None
_game_envs = defaultdict(set)
for env in gym.envs.registry.all():
# TODO: solve this with regexes
env_type = env._entry_point.split(':')[0].split('.')[-1]
_game_envs[env_type].add(env.id)
# reading benchmark names directly from retro requires
# importing retro here, and for some reason that crashes tensorflow
# in ubuntu
_game_envs['retro'] = {
'BubbleBobble-Nes',
'SuperMarioBros-Nes',
'TwinBee3PokoPokoDaimaou-Nes',
'SpaceHarrier-Nes',
'SonicTheHedgehog-Genesis',
'Vectorman-Genesis',
'FinalFight-Snes',
'SpaceInvaders-Snes',
}
def train(args, extra_args):
env_type, env_id = get_env_type(args.env)
print('env_type: {}'.format(env_type))
total_timesteps = int(args.num_timesteps)
seed = args.seed
learn = get_learn_function(args.alg)
alg_kwargs = get_learn_function_defaults(args.alg, env_type)
alg_kwargs.update(extra_args)
env = build_env(args)
if args.save_video_interval != 0:
env = VecVideoRecorder(env, osp.join(logger.Logger.CURRENT.dir, "videos"), record_video_trigger=lambda x: x % args.save_video_interval == 0, video_length=args.save_video_length)
if args.network:
alg_kwargs['network'] = args.network
else:
if alg_kwargs.get('network') is None:
alg_kwargs['network'] = get_default_network(env_type)
print('Training {} on {}:{} with arguments \n{}'.format(args.alg, env_type, env_id, alg_kwargs))
model = learn(
env=env,
seed=seed,
total_timesteps=total_timesteps,
**alg_kwargs
)
return model, env
def build_env(args):
ncpu = multiprocessing.cpu_count()
if sys.platform == 'darwin': ncpu //= 2
nenv = args.num_env or ncpu
alg = args.alg
seed = args.seed
env_type, env_id = get_env_type(args.env)
print(env_id)
#extract the agc_env_name
noskip_idx = env_id.find("NoFrameskip")
env_name = env_id[:noskip_idx].lower()
print("Env Name for Masking:", env_name)
if env_type in {'atari', 'retro'}:
if alg == 'deepq':
env = make_env(env_id, env_type, seed=seed, wrapper_kwargs={'frame_stack': True})
elif alg == 'trpo_mpi':
env = make_env(env_id, env_type, seed=seed)
else:
frame_stack_size = 4
env = make_vec_env(env_id, env_type, nenv, seed, gamestate=args.gamestate, reward_scale=args.reward_scale)
env = VecFrameStack(env, frame_stack_size)
else:
config = tf.ConfigProto(allow_soft_placement=True,
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
config.gpu_options.allow_growth = True
get_session(config=config)
env = make_vec_env(env_id, env_type, args.num_env or 1, seed, reward_scale=args.reward_scale)
if args.custom_reward != '':
from baselines.common.vec_env import VecEnv, VecEnvWrapper
import baselines.common.custom_reward_wrapper as W
assert isinstance(env,VecEnv) or isinstance(env,VecEnvWrapper)
custom_reward_kwargs = eval(args.custom_reward_kwargs)
if args.custom_reward == 'live_long':
env = W.VecLiveLongReward(env,**custom_reward_kwargs)
elif args.custom_reward == 'random_tf':
env = W.VecTFRandomReward(env,**custom_reward_kwargs)
elif args.custom_reward == 'preference':
env = W.VecTFPreferenceReward(env,**custom_reward_kwargs)
elif args.custom_reward == 'rl_irl':
if args.custom_reward_path == '':
assert False, 'no path for reward model'
else:
if args.custom_reward_lambda == '':
assert False, 'no combination parameter lambda'
else:
env = W.VecRLplusIRLAtariReward(env, args.custom_reward_path, args.custom_reward_lambda)
elif args.custom_reward == 'pytorch':
if args.custom_reward_path == '':
assert False, 'no path for reward model'
else:
env = W.VecPyTorchAtariReward(env, args.custom_reward_path, env_name)
else:
assert False, 'no such wrapper exist'
if env_type == 'mujoco':
env = VecNormalize(env)
# if env_type == 'atari':
# input("Normalizing for ATari game: okay? [Enter]")
# #normalize rewards but not observations for atari
# env = VecNormalizeRewards(env)
return env
def get_env_type(env_id):
if env_id in _game_envs.keys():
env_type = env_id
env_id = [g for g in _game_envs[env_type]][0]
else:
env_type = None
for g, e in _game_envs.items():
if env_id in e:
env_type = g
break
assert env_type is not None, 'env_id {} is not recognized in env types'.format(env_id, _game_envs.keys())
return env_type, env_id
def get_default_network(env_type):
if env_type in {'atari', 'retro'}:
return 'cnn'
else:
return 'mlp'
def get_alg_module(alg, submodule=None):
submodule = submodule or alg
try:
# first try to import the alg module from baselines
alg_module = import_module('.'.join(['baselines', alg, submodule]))
except ImportError:
# then from rl_algs
alg_module = import_module('.'.join(['rl_' + 'algs', alg, submodule]))
return alg_module
def get_learn_function(alg):
return get_alg_module(alg).learn
def get_learn_function_defaults(alg, env_type):
try:
alg_defaults = get_alg_module(alg, 'defaults')
kwargs = getattr(alg_defaults, env_type)()
except (ImportError, AttributeError):
kwargs = {}
return kwargs
def parse_cmdline_kwargs(args):
'''
convert a list of '='-spaced command-line arguments to a dictionary, evaluating python objects when possible
'''
def parse(v):
assert isinstance(v, str)
try:
return eval(v)
except (NameError, SyntaxError):
return v
return {k: parse(v) for k,v in parse_unknown_args(args).items()}
def main():
# configure logger, disable logging in child MPI processes (with rank > 0)
arg_parser = common_arg_parser()
args, unknown_args = arg_parser.parse_known_args()
extra_args = parse_cmdline_kwargs(unknown_args)
if MPI is None or MPI.COMM_WORLD.Get_rank() == 0:
rank = 0
logger.configure()
else:
logger.configure(format_strs=[])
rank = MPI.COMM_WORLD.Get_rank()
model, env = train(args, extra_args)
env.close()
if args.save_path is not None and rank == 0:
save_path = osp.expanduser(args.save_path)
model.save(save_path)
if args.play:
logger.log("Running trained model")
env = build_env(args)
obs = env.reset()
def initialize_placeholders(nlstm=128,**kwargs):
return np.zeros((args.num_env or 1, 2*nlstm)), np.zeros((1))
state, dones = initialize_placeholders(**extra_args)
while True:
actions, _, state, _ = model.step(obs,S=state, M=dones)
obs, _, done, _ = env.step(actions)
env.render()
done = done.any() if isinstance(done, np.ndarray) else done
if done:
obs = env.reset()
env.close()
if __name__ == '__main__':
main()
| dsbrown1331/CoRL2019-DREX | drex-atari/baselines/baselines/run.py | Python | mit | 8,217 |
Subsets and Splits